blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
710399c311e96fa14333f4bc99f4fb7e2ead09cb | 298b00c9c8da3811d99602045f7ddceaa9a40fe4 | /List-09/L9-ex008.py | 1552850aa04abfd825585da3a4952bca7447b9ca | [] | no_license | MariaGabrielaReis/Python-for-Zombies | 2154266d0858ee7bb0f1c143df4e82b47097f2a0 | 9fe0492fa9c42c360841029af8e56b4de46d7e61 | refs/heads/main | 2023-06-02T21:16:29.272813 | 2021-06-24T22:15:55 | 2021-06-24T22:15:55 | 338,445,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,401 | py | # H. squirrel_play
# os esquilos na FATEC brincam quando a temperatura está entre 60 e 90
# graus Fahreneit (são estrangeiros e o termômetro é diferente rs)
# caso seja verão, então a temperatura superior é 100 no lugar de 90
# retorne True caso os esquilos brinquem
# squirrel_play(70, False) -> True
# squirrel_play(95, False) -> False
# squirrel_play(95, True) -> True
def squirrel_play(temp, is_summer):
if is_summer:
if temp >= 60 and temp <= 100:
return True
else:
return False
else:
if temp >= 60 and temp <= 90:
return True
else:
return False
def test(obtido, esperado):
if obtido == esperado:
prefixo = ' Parabéns!'
else:
prefixo = ' Ainda não'
print ('%s obtido: %s esperado: %s'
% (prefixo, repr(obtido), repr(esperado)))
def main():
print ('squirrel_play')
test(squirrel_play(70, False), True)
test(squirrel_play(95, False), False)
test(squirrel_play(95, True), True)
test(squirrel_play(90, False), True)
test(squirrel_play(90, True), True)
test(squirrel_play(50, False), False)
test(squirrel_play(50, True), False)
test(squirrel_play(100, False), False)
test(squirrel_play(100, True), True)
test(squirrel_play(105, True), False)
test(squirrel_play(59, False), False)
test(squirrel_play(59, True), False)
test(squirrel_play(60, False), True)
if __name__ == '__main__':
main()
| [
"mariagabrielagreis@gmail.com"
] | mariagabrielagreis@gmail.com |
0971d3ea8bd8e43d4e6c7de3f98eec817c1012af | 1475e0769c7f9c0c4ede19f7686ed8ef219e763d | /01-oo/01-pessoa/python/modelo.py | 7b25a328b73807b198591895e81a2ab7a1308902 | [] | no_license | hvescovi/programar2020 | ea73efd6438239e77b70633935d0b4a32e5dcdf6 | eab13efd4329505d4354c86de55a305f42461832 | refs/heads/master | 2023-05-15T01:17:20.773602 | 2023-05-04T00:14:04 | 2023-05-04T00:14:04 | 239,891,090 | 3 | 10 | null | 2023-05-04T00:14:49 | 2020-02-12T00:07:26 | Java | UTF-8 | Python | false | false | 889 | py | class Pessoa:
# construtor com parâmetros opcionais
def __init__(self, nome="", email="", tel=""):
self.nome = nome
self.email = email
self.telefone = tel
# método que expressa o objeto em forma de string
def __str__(self):
return "\n" + self.nome + ", email: " + \
self.email + ", " + self.telefone
# o programa que sendo executado?
# se estiver sendo importado, não entra nesse if
if __name__ == "__main__":
# criar objetos e informar alguns valores depois
joao = Pessoa()
joao.nome = "João da Silva"
joao.email = "josilva@gmail.com"
# criar objeto já informando valores
ma = Pessoa("Maria Oliveira", "mao@gmail.com", "9449-2332")
# criar objeto e informar alguns valores
ti = Pessoa("Tiago Nune", "tinu@gmail.com")
# exibir os dados de teste
print(joao, ma, ti) | [
"hvescovi@gmail.com"
] | hvescovi@gmail.com |
ec7727370bda908c61bd6dffa2582b2e85827122 | 2a34596d5e3dc10998bc6288f0e59101e3ee1a91 | /bb_user/api/views/user.py | 53ccdc45a83ff4f9ed2a62ab498438a42d1e272e | [] | no_license | shaggy72023/test_backend | 09e8baa2ef29f90917bfd9cd21f6a861349c143c | 7e86215abb63a21b197f0ab7575d437192494f55 | refs/heads/master | 2021-07-02T07:52:19.706434 | 2017-09-22T05:45:12 | 2017-09-22T05:45:12 | 104,438,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | from django.views.generic import View
from bb_user.api.forms.user import LoginForm
from utils.api.exceptions import RequestValidationFailedAPIError
from utils.api.mixins import APIMixin
from bb_user.api.serializers.user import serialize, serialize_access_token
import bb_user.services.user
import json
class User(APIMixin, View):
def post(self, request, parameters):
parameters = json.loads(parameters)
activation_url = parameters['email_activation_url']
user = bb_user.services.user.create(activation_url, parameters)
return serialize_access_token(user)
def get(self, request, user_id, parameters):
user = bb_user.services.user.get(request, user_id)
return serialize(user)
def put(self, request, parameters):
parameters = json.loads(parameters)
return bb_user.services.user.activate(**parameters)
class LoginUser(User):
def post(self, request, parameters):
parameters = json.loads(parameters)
form = LoginForm(data=parameters)
if form.is_valid():
user = bb_user.services.user.login(**form.cleaned_data)
return serialize_access_token(user)
else:
raise RequestValidationFailedAPIError(form.errors)
| [
"shaggy7202@gmail.com"
] | shaggy7202@gmail.com |
0d0d2ffc39978ee892064a0a6dd67844f78dbe82 | 7448b23adc31d1fcdd2225959aafd3436ba4624d | /My_Contacts/manage.py | ea71b81ea2eb2edd3f03518f437c5f27b5c898be | [] | no_license | nameis-vivek/Phone-contacts | a9326cdf22bc8811b11d5094a19551cad2272e6f | 6714d9ac5a2f6ad4fe68281c3a220ecdf1cd855f | refs/heads/master | 2020-12-26T08:40:59.340293 | 2020-02-23T11:02:28 | 2020-02-23T11:02:28 | 237,451,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'My_Contacts.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"nameis.vivek434@gmail.com"
] | nameis.vivek434@gmail.com |
14088b4fdd11f11038d1da3cbd5acfe1ee24cf8f | 44b0e7f892ccd57430f1ad25e340d136bfa01e66 | /Week12-utilities.py | 61f05375fb93af0cbe47e65cacccb1e4185df990 | [] | no_license | qwhite723/102-week-12-v2 | 6e43cc906fb95c5bfa80a5aa269cad9d04ba7c55 | e427d0bf1c1a53edbb7e802e6e54e919bd10fc78 | refs/heads/master | 2020-09-12T08:14:05.534525 | 2019-11-22T02:56:03 | 2019-11-22T02:56:03 | 222,365,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py | # Incremental Build Model
# Quintin White
# CSCI 102-Section B
# Week 12 Part B
def PrintOutput(word):
print("OUTPUT",word)
def LoadFile(filename):
with open(filename, 'r') as file:
read = file.readlines()
return read
def UpdateString(string1, string2, index):
one = []
RS = ''
for char in string1:
one.append(char)
one[index] = string2
for let in one:
RS += let
return RS
def FindWordCount(my_list, string):
count = 0
for value in my_list:
if value == string:
count += 1
return count
def ScoreFinder(names, scores, name):
uNames = []
lNames = []
holder = ''
index = 0
for x in names:
holder = x
uNames.append(holder.upper())
lNames.append(holder.lower())
if name in names or name in uNames or name in lNames:
count = 0
for player in names:
if player == name:
index = count
count += 1
count = 0
for player in uNames:
if player == name:
index = count
count += 1
count = 0
for player in lNames:
if player == name:
index = count
count += 1
print("OUTPUT", name, "got a score of", scores[index])
else:
print("OUTPUT player not found")
def Union(list1, list2):
temp = list1 + list2
final = []
filler = 0
for value in temp:
if value in final:
filler += 1
else:
final.append(value)
return final
def Intersection(uno, dos):
fin = []
for name in uno:
if name in dos:
fin.append(name)
return fin
def NotIn(uno, dos):
finn = []
filler = 0
for name in uno:
if name in dos:
filler += 1
else:
finn.append(name)
return finn
| [
"qwhite@mymail.mines.edu"
] | qwhite@mymail.mines.edu |
11e23a39ee50259b80807054237d052b14197563 | 62ca5ef5ef0308ac6153e247c36b5a88ed41a148 | /tp4/Ej1.py | f15e17ee602ffb99bcfd37217540b24eb6281c2f | [] | no_license | MCarlomagno/frro-soporte-g9 | 4efc0a757d556b8da219a2ff143e7ffddcd0c285 | 6fd2267f26d005a93f6002e2f40d0ea2b05ff491 | refs/heads/master | 2020-03-08T11:25:38.071354 | 2018-08-22T21:36:34 | 2018-08-22T21:36:34 | 128,097,237 | 1 | 0 | null | 2018-06-22T23:01:03 | 2018-04-04T17:32:29 | Python | UTF-8 | Python | false | false | 1,281 | py | from tkinter import *
def sumar():
try:
v1 = int(var1.get())
v2 = int(var2.get())
print(v1+v2)
except ValueError:
print("Valores inválidos")
def restar():
try:
v1 = int(var1.get())
v2 = int(var2.get())
print(v1-v2)
except ValueError:
print("Valores inválidos")
def multiplicar():
try:
v1 = int(var1.get())
v2 = int(var2.get())
print(v1*v2)
except ValueError:
print("Valores inválidos")
def dividir():
try:
v1 = int(var1.get())
v2 = int(var2.get())
print(v1/v2)
except ZeroDivisionError:
print("¡No se puede dividir por cero!")
except ValueError:
print("Valores inválidos")
root = Tk()
etiq1 = Label(root, text="Primer operando")
input1 = IntVar()
var1 = Entry(root)
etiq2 = Label(root, text="Segundo operando")
input2 = IntVar()
var2 = Entry(root)
b1 = Button(root, text="+", command=sumar)
b2 = Button(root, text="-", command=restar)
b3 = Button(root, text="x", command=multiplicar)
b4 = Button(root, text="/", command=dividir)
etiq1.pack()
var1.pack()
etiq2.pack()
var2.pack()
b1.pack()
b2.pack()
b3.pack()
b4.pack()
root.mainloop()
| [
"marcoscarlomagno1@gmail.com"
] | marcoscarlomagno1@gmail.com |
6c79ae8cc7aed21c5f2b9410bcf90b219dedfe16 | 07af444dafa5bde373b0730e92d67e455d4ff4df | /SFData/StackOverflow/s36972087_ground_truth.py | 79f82ae3f49c2bb32dc969c91d323ecc4f7a516f | [] | no_license | tensfa/tensfa | 9114595b58a2e989780af0c348afb89a2abb04b4 | 415dcfaec589b0b14c5b9864872c912f3851b383 | refs/heads/main | 2023-06-30T14:27:38.217089 | 2021-08-03T01:33:30 | 2021-08-03T01:33:30 | 368,465,614 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | import tensorflow as tf
import numpy as np
train_images = np.array(np.random.random((10, 19)), dtype=np.float32)
train_labels = np.random.randint(0, 2, 10, dtype=np.int32)
train_labels = np.eye(2)[train_labels]
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 19])
y_ = tf.placeholder(tf.float32, shape=[None, 2])
W = tf.Variable(tf.zeros([19,2]))
b = tf.Variable(tf.zeros([2]))
sess.run(tf.global_variables_initializer())
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
start = 0
batch_1 = 50
end = 100
for i in range(1):
# batch = mnist.train.next_batch(50)
x1 = train_images[start:end]
y1 = train_labels[start:end]
start = start + batch_1
end = end + batch_1
x1 = np.reshape(x1, (-1, 19))
y1 = np.reshape(y1, (-1, 2))
train_step.run(feed_dict={x: np.expand_dims(x1[0], 0), y_: np.expand_dims(y1[0], 0)}) | [
"tensfa@yeah.net"
] | tensfa@yeah.net |
5e2d7c42cd9a82ff6a1f3c604c74eecb8986bec7 | 114da0dc8dfb2599fa71f7f45afec2329bb71877 | /Python/NLP in Python App/NLP.py | 1fe281c00ad89787d3b83633fbcaf3a6f2c83ec3 | [] | no_license | DanielBakerDev/Portfolio | 867269ff037c8e9a780514257671b12d2d537b1b | 6d469cc07d03a4eeb52556675d4f0039d251809f | refs/heads/main | 2023-05-25T10:04:59.738364 | 2023-05-23T15:53:47 | 2023-05-23T15:53:47 | 261,299,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | class NLP:
def __init__(self, email, money, noun):
self.email = email
self.money = money
self.noun = noun
| [
"danielbakerr13@gmail.com"
] | danielbakerr13@gmail.com |
62a13abd4c0147da29cd785233f04f06aca6a23a | 2a8abd5d6acdc260aff3639bce35ca1e688869e9 | /telestream_cloud_qc_sdk/test/test_container_essence_consistency_test.py | a53e951acde1e1e1d545fa4c1388c5f5ecb32225 | [
"MIT"
] | permissive | Telestream/telestream-cloud-python-sdk | 57dd2f0422c83531e213f48d87bc0c71f58b5872 | ce0ad503299661a0f622661359367173c06889fc | refs/heads/master | 2021-01-18T02:17:44.258254 | 2020-04-09T11:36:07 | 2020-04-09T11:36:07 | 49,494,916 | 0 | 0 | MIT | 2018-01-22T10:07:49 | 2016-01-12T11:10:56 | Python | UTF-8 | Python | false | false | 1,600 | py | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.container_essence_consistency_test import ContainerEssenceConsistencyTest # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestContainerEssenceConsistencyTest(unittest.TestCase):
"""ContainerEssenceConsistencyTest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ContainerEssenceConsistencyTest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.container_essence_consistency_test.ContainerEssenceConsistencyTest() # noqa: E501
if include_optional :
return ContainerEssenceConsistencyTest(
reject_on_error = True,
checked = True
)
else :
return ContainerEssenceConsistencyTest(
)
def testContainerEssenceConsistencyTest(self):
"""Test ContainerEssenceConsistencyTest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"cloudsupport@telestream.net"
] | cloudsupport@telestream.net |
5c90209a2a85242d66565dc74c3d13c88a2f10b7 | e7b7505c084e2c2608cbda472bc193d4a0153248 | /DailyChallenge/LC_126.py | 6a71e599fd82c9936054243d450e4e182fae01a5 | [] | no_license | Taoge123/OptimizedLeetcode | 8e5c1cd07904dfce1248bc3e3f960d2f48057a5d | 3e50f6a936b98ad75c47d7c1719e69163c648235 | refs/heads/master | 2023-02-27T21:13:40.450089 | 2023-02-07T04:11:09 | 2023-02-07T04:11:09 | 170,044,224 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py |
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList):
#we dont need visited since we will remove the newLayer.values() for the words we have processed
wordList = set(wordList)
res = []
lowercase = string.ascii_lowercase
#layer is similar to queue in 127
layer = collections.defaultdict(list)
layer[beginWord] = [[beginWord]]
while layer:
newLayer = collections.defaultdict(list)
for word in layer:
if word == endWord:
for i in layer[word]:
res.append(i)
else:
for i in range(len(word)):
for char in lowercase:
newWord = word[:i] + char + word[i+1:]
if newWord in wordList:
for valList in layer[word]:
# print(newWord, valList + [newWord])
newLayer[newWord].append(valList + [newWord])
wordList -= set(newLayer.keys())
layer = newLayer
return res
| [
"taocheng984@gmail.com"
] | taocheng984@gmail.com |
48f18952541b70373ac9620dd16f19fbb00d011b | 53dd91c433c439b7714621f5b7b03f5fa7ef5362 | /metl/manipulation.py | a0910fe8565255493111cdfbb22b9cdae953d119 | [] | no_license | anilktechie/mETL | d49059961310e69327932f26d503e195f6e9a92a | f83711caba50cb9c94a7f77e7a56c6f8013b6037 | refs/heads/master | 2023-05-13T08:24:09.265481 | 2021-05-31T09:07:13 | 2021-05-31T09:07:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py |
# -*- coding: utf-8 -*-
"""
mETLapp is a Python tool for do ETL processes with easy config.
Copyright (C) 2013, Bence Faludi (b.faludi@mito.hu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, <see http://www.gnu.org/licenses/>.
"""
import metl.reader
class Manipulation( metl.reader.Reader ):
init = []
use_args = False
# void
def __init__( self, reader, *args, **kwargs ):
self.reader = reader
self.fieldset = {}
super( Manipulation, self ).__init__( *args, **kwargs )
# void
def __iter__( self ):
return self
# Reader
def getReader( self ):
return self.reader
# void
def initialize( self ):
self.getReader().initialize()
return super( Manipulation, self ).initialize()
# void
def finalize( self ):
self.getReader().finalize()
return super( Manipulation, self ).finalize()
# FieldSet
def getFieldSetPrototypeCopy( self, final = True ):
self.fieldset.setdefault( str(final), self.getReader().getFieldSetPrototypeCopy( final = final ) )
return self.fieldset[ str(final) ]
| [
"b.faludi@mito.hu"
] | b.faludi@mito.hu |
65c3bab2e7420705136ad6968b1f0240b1046e3e | 65424fc44dec137c0dab770969b329c75706207f | /profiles/migrations/0001_initial.py | 8b018e7904f83913498d255198dece56265dd939 | [] | no_license | asplesa/boutique_ado_v1 | ded96317a4ceaadf4c84f3061014c47bb681d639 | 7c47dc636e038edd421713951850f7cfd6b1fd3b | refs/heads/master | 2023-06-08T11:17:12.186414 | 2021-06-28T09:01:18 | 2021-06-28T09:01:18 | 378,146,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | # Generated by Django 3.2.4 on 2021-06-27 13:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('default_phone_number', models.CharField(blank=True, max_length=20, null=True)),
('default_country', django_countries.fields.CountryField(blank=True, max_length=2, null=True)),
('default_postcode', models.CharField(blank=True, max_length=20, null=True)),
('default_town_or_city', models.CharField(blank=True, max_length=40, null=True)),
('default_street_address1', models.CharField(blank=True, max_length=80, null=True)),
('default_street_address2', models.CharField(blank=True, max_length=80, null=True)),
('default_county', models.CharField(blank=True, max_length=80, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"reillys2004@gmail.com"
] | reillys2004@gmail.com |
b0ab0a394f153fee13caf873e077ff67746fe353 | 9202cff12bf1e9a5cb8a255af2a2acb2cf192abe | /run_command.py | b95f3e96cfa5dd626f92c4a873c18b1744a8cc25 | [] | no_license | Sparky-python/Arista_scripts | 1cc97296dadca9c80dc5b7ac9d49873234798083 | 84b3cd2bd20af83c18e2a3ba7c9a58661c9b8534 | refs/heads/master | 2022-06-27T08:41:50.085933 | 2022-05-11T16:50:49 | 2022-05-11T16:50:49 | 202,784,775 | 10 | 2 | null | 2019-08-20T16:28:42 | 2019-08-16T19:08:09 | null | UTF-8 | Python | false | false | 2,838 | py | #!/usr/bin/python3
import pyeapi
import argparse
import ssl
import ipaddress
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
parser = argparse.ArgumentParser()
parser.add_argument('--conf', required=False, action='append',
default=[], help='Config to apply to all switches')
parser.add_argument('--interface', required=False,
default='', help='Interface to configure')
parser.add_argument('--addr', required=False,
default='', help='Address range to use')
parser.add_argument('--config_file', required=False,
default='', help='File with config in to apply')
parser.add_argument('--remove', required=False, action='store_true',
default='', help='If used will remove the config in the specified file by adding "no" to each line of config')
args = parser.parse_args()
conf = args.conf
interface = args.interface
addr = args.addr
config_file = args.config_file
remove = args.remove
if addr:
network_range = ipaddress.ip_network(addr)
# from the given IP subnet, build a list of available /32 subnets
available_addr = list(ipaddress.ip_network(network_range).subnets(prefixlen_diff=(32-network_range.prefixlen)))
# Find the home directory where the .eap.conf file is located
from os.path import expanduser
home = expanduser("~")
hosts = []
n = 1
config_list = []
# read in the contents of the eapi.conf file and build a list of all the hostnames in a list called 'hosts'
with open(home + "/.eapi.conf", "r") as file_object:
line = file_object.readline()
while line:
if "connection" in line:
hostname = line.lstrip('[connection:')
hostname = hostname.rstrip(']\n\r')
hosts.append(hostname)
line = file_object.readline()
else:
line = file_object.readline()
if config_file:
with open(config_file, 'r') as config_file_object:
line = config_file_object.readline()
while line:
if remove:
config_list.append("no " + line)
line = config_file_object.readline()
else:
config_list.append(line)
line = config_file_object.readline()
for x in hosts:
switch = pyeapi.connect_to(x)
if conf:
command = switch.config(conf)
elif interface:
command = switch.config(["interface " + interface, "ip address " + str(available_addr[n])])
n += 1
elif config_file:
command = switch.config(config_list)
| [
"54187620+Sparky-python@users.noreply.github.com"
] | 54187620+Sparky-python@users.noreply.github.com |
d8fc65947417b208d1c4cd93208dd2f4781db939 | 4c4532660a65007a59c8e8b3d93492a812a93ea4 | /ProductTagging.py | bf3846c2f7a908750b4853d86347071de5d9bec8 | [] | no_license | Sapphirine/TrendingProductsRecommender | 48883d65159b2cb1a3bf7d0ff79d789a954ad258 | 3247d08b9b80d4b65f16fbb51d3fcecfacbdda97 | refs/heads/master | 2022-12-13T05:25:57.283771 | 2018-12-23T05:51:20 | 2018-12-23T05:51:20 | 162,863,704 | 0 | 3 | null | 2022-12-09T09:31:18 | 2018-12-23T05:02:07 | Jupyter Notebook | UTF-8 | Python | false | false | 22,906 | py | import multiprocessing
import nltk.corpus
import argparse
import logging as logging
import os
import json
import warnings
import getpass
import findspark
from datetime import datetime
from multiprocessing import Process, Pipe
from nltk.corpus import wordnet as wn
from Memoized import Memoized
findspark.init()
import pyspark
from pyspark.sql import SQLContext, Window
from pyspark.ml.feature import StringIndexer
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import col, lower, rand, row_number
from pyspark.ml.feature import RegexTokenizer, StopWordsRemover, CountVectorizer, ChiSqSelector
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
LOGGER = logging.getLogger()
os.environ['PYSPARK_SUBMIT_ARGS'] = (
"--repositories http://repo.hortonworks.com/content/groups/public/ "
"--packages com.hortonworks:shc-core:1.1.1-2.1-s_2.11 "
" pyspark-shell")
nltk.download('wordnet')
schema = StructType([
StructField("marketplace", StringType(), True),
StructField("customer_id", StringType(), True),
StructField("review_id", StringType(), True),
StructField("product_id", StringType(), True),
StructField("product_parent", StringType(), True),
StructField("product_title", StringType(), True), # "label" replaces "product_title"
StructField("product_category", StringType(), True),
StructField("star_rating", IntegerType(), True),
StructField("helpful_votes", IntegerType(), True),
StructField("total_votes", IntegerType(), True),
StructField("vine", StringType(), True),
StructField("verified_purchase", StringType(), True),
StructField("review_headline", StringType(), True),
StructField("review_body", StringType(), True),
StructField("review_date", StringType(), True)])
DATA_SOURCE_FORMAT = 'org.apache.spark.sql.execution.datasources.hbase'
CATALOG = {
"table": {"namespace": "default", "name": "tweets"},
"rowkey": "row_id",
"columns": {
"row_id": {"cf": "rowkey", "col": "row_id", "type": "string"},
"category": {"cf": "tweet", "col": "category", "type": "string"},
"contributors": {"cf": "tweet", "col": "contributors", "type": "string"},
"coordinates": {"cf": "tweet", "col": "coordinates", "type": "string"},
"created_at": {"cf": "tweet", "col": "created_at", "type": "string"},
"display_text_range": {"cf": "tweet", "col": "display_text_range", "type": "string"},
"entities": {"cf": "tweet", "col": "entities", "type": "string"},
"extended_tweet": {"cf": "tweet", "col": "extended_tweet", "type": "string"},
"favorite_count": {"cf": "tweet", "col": "favorite_count", "type": "bigint"},
"favorited": {"cf": "tweet", "col": "favorited", "type": "boolean"},
"filter_level": {"cf": "tweet", "col": "filter_level", "type": "string"},
"geo": {"cf": "tweet", "col": "geo", "type": "string"},
"id": {"cf": "tweet", "col": "id", "type": "bigint"},
"in_reply_to_screen_name": {"cf": "tweet", "col": "in_reply_to_screen_name", "type": "string"},
"in_reply_to_status_id": {"cf": "tweet", "col": "in_reply_to_status_id", "type": "string"},
"in_reply_to_status_id_str": {"cf": "tweet", "col": "in_reply_to_status_id_str", "type": "string"},
"in_reply_to_user_id": {"cf": "tweet", "col": "in_reply_to_user_id", "type": "string"},
"in_reply_to_user_id_str": {"cf": "tweet", "col": "in_reply_to_user_id_str", "type": "string"},
"is_quote_status": {"cf": "tweet", "col": "is_quote_status", "type": "boolean"},
"lang": {"cf": "tweet", "col": "lang", "type": "string"},
"place": {"cf": "tweet", "col": "place", "type": "string"},
"quote_count": {"cf": "tweet", "col": "quote_count", "type": "bigint"},
"reply_count": {"cf": "tweet", "col": "reply_count", "type": "bigint"},
"retweet_count": {"cf": "tweet", "col": "retweet_count", "type": "bigint"},
"retweeted": {"cf": "tweet", "col": "retweeted", "type": "boolean"},
"source": {"cf": "tweet", "col": "source", "type": "string"},
"text": {"cf": "tweet", "col": "text", "type": "string"},
"timestamp_ms": {"cf": "tweet", "col": "timestamp_ms", "type": "bigint"},
"truncated": {"cf": "tweet", "col": "truncated", "type": "boolean"},
}
}
# tokenizer = Tokenizer(inputCol="text", outputCol="words")
# cv = CountVectorizer(vocabSize=2**16, inputCol="words", outputCol='cv')
# idf = IDF(inputCol='cv', outputCol="features", minDocFreq=5) #minDocFreq: remove sparse terms
# label_stringIdx = StringIndexer(inputCol = "target", outputCol = "label")
# lr = LogisticRegression(maxIter=100)
# pipeline = Pipeline(stages=[tokenizer, cv, idf, label_stringIdx, lr])
#
# pipelineFit = pipeline.fit(train_set)
# predictions = pipelineFit.transform(val_set)
# accuracy = predictions.filter(predictions.label == predictions.prediction).count() / float(val_set.count())
# roc_auc = evaluator.evaluate(predictions)
#
# print("Accuracy Score: {0:.4f}".format(accuracy))
# print("ROC-AUC: {0:.4f}".format(roc_auc))
class ProductAnalyzer(object):
def __init__(self, sc: pyspark.SparkContext = None, input_file: str = None, model_prefix: str = None,
predictions_prefix: str = None, hdfs_base_url: str = None, category: str = None):
self._model_prefix = model_prefix
self._predictions_prefix = predictions_prefix
self._hdfs_base_url = hdfs_base_url
self.trigram_model = None
self.input_tweets = None
self._source_data = None
self._preprocess_pipeline = None
self.predictions = None
self._fitted_lr_model = None
self.table_name = None
self._category = category
self._sc = sc
if self._sc:
self._sqlc = SQLContext(self._sc)
self._input_file = input_file
if self._input_file:
self.load_data()
@property
def source_data(self):
return self._source_data
def load_data(self):
LOGGER.warning('Loading {}'.format(self._input_file))
self._source_data = self._sqlc.read.format('com.databricks.spark.csv').options(header='true',
schema=schema,
delimiter="\t").load(
self._input_file).withColumnRenamed('review_body', 'review_body_raw')
self._source_data = self._source_data \
.dropna() \
.drop('marketplace', 'customer_id', 'review_id', 'product_id', 'product_parent', 'product_category',
'star_rating', 'helpful_votes', 'total_votes', 'vine', 'verified_purchase', 'review_headline',
'review_date') \
.where('length(review_body_raw)>200') \
.withColumn("review_body", lower(col("review_body_raw"))) \
.drop("review_body_raw")
LOGGER.warning('Loaded {} product reviews from {}.'.format(self._source_data.count(), self._input_file))
def resample_data(self):
LOGGER.warning('Resampling data.')
counts = self._source_data.groupBy('product_title').count().selectExpr("product_title as product_title_tmp",
"count as count")
self._source_data = self._source_data \
.join(counts, self._source_data.product_title == counts.product_title_tmp) \
.drop('product_title_tmp') \
.where("count > 1000")
min_count = self._source_data.groupBy("product_title").count().agg({"count": "min"}).collect()[0][0]
# Take a random sample from each product's entries, with a sample size
# equal to the size of the smallest corpus.
w = Window.partitionBy(col("product_title")).orderBy(col("rnd_"))
self._source_data = (self._source_data
.withColumn("rnd_", rand()) # Add random numbers column
.withColumn("rn_", row_number().over(w)) # Add rowNumber over window
.where(col("rn_") <= min_count) # Take n observations
.drop("rn_") # drop helper columns
.drop("rnd_"))
LOGGER.warning('Resampled down to {} examples per product'.format(min_count))
@staticmethod
@Memoized
def get_stopwords():
list_adv = []
list_adj = []
list_n = []
for s in wn.all_synsets():
if s.pos() in ['r']: # if synset is adverb
for i in s.lemmas(): # iterate through lemmas for each synset
list_adv.append(i.name())
elif s.pos() in ['a']:
for i in s.lemmas(): # iterate through lemmas for each synset
list_adj.append(i.name())
elif s.pos() in ['n']: # if synset is noun
for i in s.lemmas(): # iterate through lemmas for each synset
list_n.append(i.name())
# remove stop words and irrelevant words
add_stopwords = ["i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "your", "yours",
"yourself", "yourselves", "he", "him", "his", "himself", "she", "her", "hers", "herself", "it",
"its", "itself", "they", "them", "their", "theirs", "themselves", "what", "which", "who",
"whom", "this", "that", "these", "those", "am", "is", "are", "was", "were", "be", "been",
"being", "have", "has", "had", "having", "do", "does", "did", "doing", "a", "an", "the", "and",
"but", "if", "or", "because", "as", "until", "while", "of", "at", "by", "for", "with", "about",
"against", "between", "into", "through", "during", "before", "after", "above", "below", "to",
"from", "up", "down", "in", "out", "on", "off", "over", "under", "again", "further", "then",
"once", "here", "there", "when", "where", "why", "how", "all", "any", "both", "each", "few",
"more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so",
"than", "too", "very", "s", "t", "can", "will", "just", "don", "should", "now", "ve", "se",
"didn", "hasn", "hadn", "hasnt", "isnt", "havent", "although", "despite", "however"]
add_irrelevantwords = ["poor", "perfect", "good", "excellent", "excelent", "great", "horrible", "cheap",
"expensive", "different", "awesome"]
single_alphabet = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z"]
# word_filter is a concatenated string of all unnecessary words
word_filter = add_stopwords + add_irrelevantwords + single_alphabet + list_adv + list_adj
return StopWordsRemover(inputCol="words", outputCol="filtered").setStopWords(word_filter)
def build_preprocess_pipeline(self, vocab_size=10000, min_df=5):
LOGGER.warning("Building preprocessing pipeline.")
# regular expression tokenizer
regex_tokenizer = RegexTokenizer(inputCol="review_body", outputCol="words", pattern="[^A-Za-z]+",
toLowercase=True)
stopwords = ProductAnalyzer.get_stopwords()
count_vectors = CountVectorizer(inputCol="filtered", outputCol="features", vocabSize=vocab_size, minDF=min_df)
label_string_idx = StringIndexer(inputCol="product_title", outputCol="label")
self._preprocess_pipeline = Pipeline(stages=[regex_tokenizer, stopwords, count_vectors, label_string_idx])
LOGGER.warning("Built preprocessing pipeline.")
return self._preprocess_pipeline
def fit_preprocess_pipeline(self, save: bool = True):
LOGGER.warning("Fitting preprocessing model.")
if self._preprocess_pipeline:
pipeline = self._preprocess_pipeline
else:
pipeline = self.build_preprocess_pipeline()
fitted = pipeline.fit(self.source_data)
if save:
# ProductAnalyzer.save_model(fitted,
# self._hdfs_base_url + 'models/' +
# self._model_prefix + '_preprocess_pipeline.model')
ProductAnalyzer.save_model(fitted,
self._hdfs_base_url +
'models/' +
self._model_prefix +
"_" +
self._category +
'_fit_preprocess_pipeline.model')
self._source_data = fitted.transform(self.source_data).where('label < 10').drop('count', 'words', 'filtered')
LOGGER.warning("Fitted preprocessing model.")
return fitted
@staticmethod
def run_feature_selection_on(data):
LOGGER.warning("Running feature selection.")
selector = ChiSqSelector(numTopFeatures=10, featuresCol="features", outputCol="selectedFeatures",
labelCol="label")
data = selector.fit(data).transform(data).drop(
'features').withColumnRenamed('selectedFeatures', 'features')
LOGGER.warning("Ran feature selection.")
return data
def run_feature_selection(self):
return self.run_feature_selection_on(self._source_data)
def train(self, train_set, save: bool = True):
LOGGER.warning("Training.")
lrt = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0.8)
param_grid = (ParamGridBuilder()
.addGrid(lrt.regParam, [0.1, 0.3, 0.5]) # regularization parameter
.addGrid(lrt.elasticNetParam, [0.0, 0.1, 0.2]) # Elastic Net Parameter (Ridge = 0)
.build())
# 5-fold CrossValidator
cv = CrossValidator(estimator=lrt, estimatorParamMaps=param_grid, evaluator=MulticlassClassificationEvaluator(),
numFolds=5)
self._fitted_lr_model = cv.fit(train_set)
if save:
ProductAnalyzer.save_model(self._fitted_lr_model,
self._hdfs_base_url +
'models/' +
self._model_prefix +
"_" +
self._category +
'_fitted_lr.model')
LOGGER.warning("Training complete.")
return self._fitted_lr_model
def load_model(self, fieldname, load_file, klass=PipelineModel):
LOGGER.warning("Loading model from {} into {}".format(load_file, fieldname))
setattr(self, fieldname, klass.load(load_file))
return getattr(self, fieldname)
@staticmethod
def save_model(model, save_file):
model.write().overwrite().save(save_file)
def validate(self, val_set):
LOGGER.warning("Validating.")
evaluator = MulticlassClassificationEvaluator(predictionCol="prediction")
predictions = self._fitted_lr_model.transform(val_set)
evaluation = evaluator.evaluate(predictions)
LOGGER.warning("Validation complete.")
return evaluation
def predict(self, data):
LOGGER.warning("Predicting.")
predictions = self._fitted_lr_model.transform(data)
self.predictions = predictions
return predictions
def read_table(self, table_name: str):
LOGGER.warning("Reading from input table {}.".format(table_name))
catalog = CATALOG
catalog['table']['name'] = table_name
self.input_tweets = self._sqlc.read.options(catalog=json.dumps(catalog)).format(DATA_SOURCE_FORMAT).load()
return self.input_tweets
def save_predictions(self, predictions=None, table_name: str = None):
table_name = table_name or self.table_name
predictions = predictions or self.predictions
if predictions is None:
raise ValueError('No predictions provided or saved in the analyzer.')
if table_name is None:
raise ValueError('No hbase table_name provided or saved in the analyzer.')
predictions.select('row_id', 'prediction').write.options(
catalog=json.dumps(
{
"table": {
"namespace": "default",
"name": table_name},
"rowkey": "row_id",
"columns": {
"row_id": {
"cf": "rowkey", "col": "row_id", "type": "string"
},
"prediction": {
"cf": "tweet", "col": "product", "type": "string"
}
}
}),
newtable=5).format(DATA_SOURCE_FORMAT).save()
def train_val_split(data):
return data.randomSplit([0.85, 0.15], seed=100)
def run_analyzer(hbase_table, model_prefix, predictions_prefix, inputs, pipe, hdfs_base_url):
try:
sc = pyspark.SparkContext()
sc.setLogLevel("ERROR")
except ValueError as err:
warnings.warn("SparkContext already exists in this scope")
raise err
analyzer = ProductAnalyzer(sc, inputs[0], model_prefix, predictions_prefix, hdfs_base_url, inputs[2])
analyzer.resample_data()
preprocessor = analyzer.build_preprocess_pipeline()
analyzer.fit_preprocess_pipeline()
processed_data = analyzer.run_feature_selection()
train, test = train_val_split(processed_data)
trained = analyzer.train(train)
LOGGER.warning('Accuracy: {}'.format(analyzer.validate(test)))
tweets = analyzer.read_table(hbase_table).where(col('category') == inputs[2])
preprocessed = preprocessor.fit(tweets).transform(tweets).where(
'label < 10').drop('count', 'words', 'filtered')
preprocessed = ProductAnalyzer.run_feature_selection_on(preprocessed)
predictions = analyzer.predict(preprocessed)
predictions.show()
selected = trained.select("product_title", "label")
distinct_pts = selected.alias('distinct_pts').selectExpr('product_title as predicted_pt',
'label as lookup_label').distinct()
predicted_pts = predictions.alias('predicted_pts').select('prediction', 'row_id')
joined = predicted_pts.join(distinct_pts, predicted_pts.prediction == distinct_pts.lookup_label, 'left')
joined.orderBy(rand()).show(1000)
analyzer.save_predictions(
predictions=joined.drop('prediction').drop('lookup_label').withColumnRenamed('predicted_pt', 'prediction'),
table_name=hbase_table)
pipe.send(True)
pipe.close()
def main(hbase_table, model_prefix, predictions_prefix, reviews_files, process_names, hdfs_base_url, categories):
process_and_pipes = []
for p in zip(reviews_files, process_names, categories):
parent_conn, child_conn = Pipe()
proc = Process(target=run_analyzer,
args=(hbase_table, model_prefix, predictions_prefix, p, child_conn, hdfs_base_url),
name=p[1])
process_and_pipes.append(
(proc, parent_conn)
)
proc.start()
results = []
for p in process_and_pipes:
pipe = p[1]
proc = p[0]
results.append(pipe.recv())
pipe.close()
proc.join()
LOGGER.warning('Results: {}'.format(results))
print("Product Tagging Complete.")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Twitter streamer parameters.')
parser.add_argument('-l', '--loglevel', help='One of: DEBUG, INFO, WARNING, ERROR, CRITICAL', type=str,
default='WARNING')
parser.add_argument('--logfile', help='Filename to write to. If not specified, write logs to stderr', type=str,
default=None)
parser.add_argument('--hbase_table', help='Name of HBase table to which tweets will be written', type=str,
default='20181210_224422_alexdziena_tweets')
parser.add_argument('--model_prefix', help='Prefix for model filenames', type=str,
default='{}_{}_product_analyzer'.format(datetime.now().strftime("%Y%m%d_%H%M%S"),
getpass.getuser()))
parser.add_argument('--predictions_prefix', help='Prefix for predictions filenames', type=str,
default='{}_{}_product_analyzer'.format(datetime.now().strftime("%Y%m%d_%H%M%S"),
getpass.getuser()))
parser.add_argument('--hdfs_base_url', help='Base url for interacting with hdfs', type=str,
default='hdfs://big-data-analytics:1234/')
parser.add_argument('-f', '--reviews_files',
help='space delimited list of file paths of product reviews, minimum of one path', nargs='+',
type=str)
parser.add_argument('-n', '--process_names',
help=('space delimited list of file of process names, used for model saving filepaths, '
'one per review file'),
nargs='+',
type=str)
parser.add_argument('-c', '--categories',
help=('space delimited list of categories to map to review files, '
'one per review file'),
nargs='+',
type=str)
args = parser.parse_args()
loglevel = args.loglevel.upper()
if args.logfile is None:
logging.basicConfig(datefmt='%Y-%m-%d %H:%M:%S', format='%(asctime)s %(levelname)-8s %(message)s',
level=loglevel)
else:
logging.basicConfig(datefmt='%Y-%m-%d %H:%M:%S', format='%(asctime)s %(levelname)-8s %(message)s',
filename=args.logfile, level=loglevel)
LOGGER = multiprocessing.log_to_stderr()
LOGGER.info(args)
# hbase_table_prefix = None if str.lower(args.hbase_table_prefix) == 'none' else args.hbase_table_prefix
main(args.hbase_table, args.model_prefix, args.predictions_prefix, args.reviews_files, args.process_names,
args.hdfs_base_url, args.categories)
| [
"alexdziena@gmail.com"
] | alexdziena@gmail.com |
6f44aa3d14b8e09dd70cc8cf2573ec85284cc3a8 | 903a813229812ae3d6a649eba4bfcec529dabc31 | /notes/4.py | 7fe5970c5af22a975bca309b11406e84379a67e6 | [] | no_license | tianya1/django | 4303da9bfcff8644e27b9d7f5ed949a1c46aa80a | c8bf3438c8a8cbcf7c85fd6390f8b67009a9fb36 | refs/heads/master | 2020-05-05T11:09:00.347225 | 2019-04-07T14:27:01 | 2019-04-07T14:27:01 | 179,976,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | # -*- coding: utf-8 -*-
# @Author: Marte
# @Date: 2019-03-20 09:02:44
# @Last Modified by: Marte
# @Last Modified time: 2019-03-20 09:12:31
scrapy startproject douban
scrapy genspider douban_spider movie.douban.com
| [
"noreply@github.com"
] | tianya1.noreply@github.com |
4e7eb91fe1d09211b9bd1a08ad237e37699b1484 | ac549e553263801bdc6962a10ebbe784dc2631df | /Python/graphs/traversal.py | e3e6b65ebfcfc36492062561afd6ccc02a61bcd2 | [] | no_license | Bishal44/DataStructure | e595890d18bde39e65f02a7ca3a6904c6070c3c8 | 939c47de6dcfe3b2578aaa0610d3cdc5726572c7 | refs/heads/master | 2020-09-10T22:40:46.368607 | 2020-03-28T12:15:08 | 2020-03-28T12:15:08 | 221,854,694 | 0 | 0 | null | 2019-12-10T15:47:45 | 2019-11-15T05:59:40 | Python | UTF-8 | Python | false | false | 1,863 | py | '''
Created on Sat Jan 11 2020
'''
graph = {'A': set(['B', 'C', 'F']),
'B': set(['A', 'D', 'E']),
'C': set(['A', 'F']),
'D': set(['B']),
'E': set(['B', 'F']),
'F': set(['A', 'C', 'E'])}
# dfs and bfs are the ultimately same except that they are visiting nodes in
# different order. To simulate this ordering we would use stack for dfs and
# queue for bfs.
#
def dfs_traverse(graph, start):
visited, stack = set(), [start]
while stack:
node = stack.pop()
if node not in visited:
visited.add(node)
for nextNode in graph[node]:
if nextNode not in visited:
stack.append(nextNode)
return visited
# print(dfs_traverse(graph, 'A'))
def bfs_traverse(graph, start):
visited, queue = set(), [start]
while queue:
node = queue.pop(0)
if node not in visited:
visited.add(node)
for nextNode in graph[node]:
if nextNode not in visited:
queue.append(nextNode)
return visited
# print(bfs_traverse(graph, 'A'))
def dfs_traverse_recursive(graph, start, visited=None):
if visited is None:
visited = set()
visited.add(start)
for nextNode in graph[start]:
if nextNode not in visited:
dfs_traverse_recursive(graph, nextNode, visited)
return visited
# print(dfs_traverse_recursive(graph, 'A'))
# def find_path(graph, start, end, visited=[]):
# # basecase
# visitied = visited + [start]
# if start == end:
# return visited
# if start not in graph:
# return None
# for node in graph[start]:
# if node not in visited:
# new_visited = find_path(graph, node, end, visited)
# return new_visited
# return None
# print(find_path(graph, 'A', 'F')) | [
"bhattaraibishal704@gmail.com"
] | bhattaraibishal704@gmail.com |
97c825d02fcd6674a5d3500557278e550a12ee98 | 24ec32878c9a119bc9b1327c13deb2bb308793eb | /235_assn4.py | 0b4e6aba5c536bbb50158464a1cd453745d51a45 | [] | no_license | JulieSchneiderman/Prim-vs-Breadth-first-search | c84def4ed98c35bca163d1a65ef8253298240b84 | 8285018eccf5be38fcdc893c8b9c0a658a1aabe3 | refs/heads/master | 2021-01-19T13:31:54.172824 | 2017-04-12T21:45:33 | 2017-04-12T21:45:33 | 88,098,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,999 | py | #CISC 235 Assignment 4
#Julie Schneiderman - 10201092
''' “I confirm that this
submission is my own work and is consistent with the Queen's regulations on Academic
Integrity.”'''
import random
#Creates a Graph object with the following attributes
class Graph:
def __init__(self):
self.vertexList = {}
self.numVertices = 0
def addVertex(self,key):
self.numVertices = self.numVertices + 1
newVertex = Vertex(key)
self.vertexList[key] = newVertex
return newVertex
def addEdge(self,f,t,cost=0):
if f not in self.vertexList:
newVertex = self.addVertex(f)
if t not in self.vertexList:
newVertex = self.addVertex(t)
self.vertexList[f].addNeighbor(self.vertexList[t], cost)
def getVertices(self):
return self.vertexList.keys() #vertices
def __iter__(self):
return iter(self.vertexList.values()) #weights
#Creates a Vertex object with the following attributes
class Vertex:
def __init__(self,key):
self.vNum = key #vertex
self.connectedTo = {} #dictionary for vertex's neigbours
self.inT = False
self.connector = None
self.cost = None
def addNeighbor(self,nbr,weight=0):
self.connectedTo[nbr] = weight
def getConnections(self):
return self.connectedTo.keys()
def getVNum(self):
return self.vNum
def getWeight(self,nbr):
return self.connectedTo[nbr]
#Creates a Queue object with the following attributes
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def insert(self, item):
self.items.append(item)
def remove_first(self):
return self.items.pop()
#Breadth-first search function
#input - graph object and a random starting vertex
#output - the total weight of the edges the search selects
def bfs(g,start):
Q = Queue()
total = 0
visited = [False]*len(g.vertexList)
#set start as visited
visited[start] = True
Q.insert(g.vertexList[start]) #add it to the queue
while (not(Q.isEmpty())):
y = Q.remove_first()
for x in y.getConnections():
vNum = x.vNum
if visited[vNum] == False:
visited[vNum] = True
Q.insert(x)
total += x.getWeight(y)
return total
#Prims algorithm
#input - graph object
#ouput - the total weight of the edges it selects
def prim(g):
v = g.vertexList[0]
totalWeight = 0
v.inT = True
v.cost = 100000 #infinity
for y in g.getVertices():
vert = g.vertexList[y]
vert.inT == False
vert.cost = 100000
vert.connector = None
for x in v.getConnections():
vNum = x.vNum
vert = g.vertexList[vNum]
vert.cost = v.getWeight(vert)
vert.connector = 0
for count in range(1,(len(g.vertexList))):
smallestCost = 100000
for x in g.getVertices():
vert = g.vertexList[x]
if vert.inT == False:
if vert.cost < smallestCost:
smallestV = vert
smallestCost = vert.cost
totalWeight += smallestCost
smallestV.inT = True
smallestV.cost = 100000
smallestV.connector = None
for y in smallestV.getConnections():
vNum = y.vNum
vert = g.vertexList[vNum]
if vert.inT == False:
if vert.getWeight(smallestV) < vert.cost:
vert.cost = vert.getWeight(smallestV)
vert.connector = smallestV
return totalWeight
#runs the search algorithms and sends a list of all the % differences to
#getAvgPercentDifference()
def comparison():
dlist = []
for k in range(250): #number of random graphs generated
g = Graph() #new graph object
for i in range(2,62): #create graph with (2,n) vertices
x = random.randint(1,i-1)
S = random.sample(range(i-1),x)
for s in S:
w = random.randint(10,100)
g.addEdge(i,s,w) #new vertex i, old vertex s, weight w
g.addEdge(s,i,w) #creates edge going in both directions
start = random.randint(1,20)
B = bfs(g,start)
P = prim(g)
Diff = (B/P -1)*100 #Percent Different Calculation
dlist.append(Diff)
#print("bfs total weight: ", B)
#print("prim total weight: ", P)
#print("% Diff is: ",Diff,"\n")
print("Average Percent Difference is", getAvgPercentDifference(dlist), "%")
#computes the results and returns the average percent difference
def getAvgPercentDifference(lst):
print("results:")
num = 0
for i in lst:
num +=i
averagePercentDifference = num/len(lst)
averagePercentDifference = round(averagePercentDifference,3)
return averagePercentDifference
def main():
comparison()
main()
| [
"noreply@github.com"
] | JulieSchneiderman.noreply@github.com |
71e14075bfcc9914d593bfe86486059770768c7e | 6a4fbd39640f2b249a0643e7de452e7a50498a93 | /env/bin/wheel | 4757f727641be52433626a6cc75ecfd78373cc8a | [] | no_license | choudharyab/python_django | 4d9cb27c45f257b2a369b61fbcf16d873ac7694c | ce382fc7278dfb8e52c597b22f8d3a77a43e71bd | refs/heads/master | 2020-03-18T06:35:14.321941 | 2018-05-30T04:50:15 | 2018-05-30T04:50:15 | 134,404,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | #!/home/tecture7/pharajinny/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"anup.choudhary@tecture.in"
] | anup.choudhary@tecture.in | |
3ea090fd1788488e4f260ee6ffb0662f85939cc4 | 89859d3866af3ce486b3027de6d59ea746796219 | /wipctv/__init__.py | 9420af8b9665da560e2994996f75e6a31d14f5b2 | [
"Apache-2.0"
] | permissive | a4saha/wipctv | 1e2795484733ef37ba75d73ba94718fc59d32497 | f289e306a9ea51c17853c58fd7e0af84dd8764cc | refs/heads/master | 2023-03-02T09:58:32.502254 | 2021-02-10T05:43:36 | 2021-02-10T05:43:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | #
# Copyright (c) 2019-2020 StephLin.
#
# This file is part of wipctv
# (see https://gitea.mcl.math.ncu.edu.tw/StephLin/wipctv).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
| [
"stephen359595@gmail.com"
] | stephen359595@gmail.com |
aeb610b09b0c9d2cd3f230690fa462bbab915093 | f125a883dbcc1912dacb3bf13e0f9263a42e57fe | /tsis1/Loop for/3532.py | e1fad768f54887c4a2ca4540e551ec7aadfa2c56 | [] | no_license | AruzhanBazarbai/pp2 | 1f28b9439d1b55499dec4158e8906954b507f04a | 9d7f1203b6735b27bb54dfda73b3d2c6b90524c3 | refs/heads/master | 2023-07-13T05:26:02.154105 | 2021-08-27T10:20:34 | 2021-08-27T10:20:34 | 335,332,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | #Задача №3532. Сумма кубов
n=int(input())
cnt=0
for i in range(1,n+1):
cnt+=i**3
print(cnt) | [
"aruzhanart2003@mail.ru"
] | aruzhanart2003@mail.ru |
b08ad2fefef80365d87004cef4629d3c62aa60b3 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/layout/legend/_traceorder.py | d5fe177e6cf14ddf521d4e55b0eef9d2d0fa8d2e | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 532 | py | import _plotly_utils.basevalidators
class TraceorderValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="traceorder", parent_name="layout.legend", **kwargs):
super(TraceorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
extras=kwargs.pop("extras", ["normal"]),
flags=kwargs.pop("flags", ["reversed", "grouped"]),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
a1aaf46dd9471e77d464bc3887a9b0526889eeef | 343a1a47f71d3798b63f6d3fe3a69aa4b765fc67 | /test_query.py | fc3c8bd06b3d31d3438835aa9d09cf77aaa88ca0 | [
"BSD-2-Clause",
"MIT"
] | permissive | kosugi/wox.y-transit | d4c1b47d04994d9e37ef9e18451c097102f9d39c | 7a5c1ff98406cf54a30db1518ea73c2ec3f9efba | refs/heads/master | 2022-05-22T00:49:17.225837 | 2020-04-29T07:22:10 | 2020-04-29T07:22:10 | 259,849,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | # -*- coding: utf-8 -*-
import unittest
import re
from query import *
class QueryTestCase(unittest.TestCase):
def test_parse_names(self):
self.assertEqual(None, parse_names(''))
self.assertEqual(None, parse_names(' '))
self.assertEqual(None, parse_names('\t'))
self.assertEqual(None, parse_names('\r'))
self.assertEqual(None, parse_names('\n'))
self.assertEqual(None, parse_names('a'))
self.assertEqual(None, parse_names(' a'))
self.assertEqual(None, parse_names(' a\t'))
self.assertEqual(None, parse_names(' a\t '))
self.assertEqual(('a', 'b'), parse_names(' a b'))
self.assertEqual(('a', 'b'), parse_names(' a b '))
self.assertEqual(('a', 'b'), parse_names(' a b '))
self.assertEqual(('a', 'b'), parse_names('a-b'))
self.assertEqual(('a', 'b'), parse_names('a - b'))
self.assertEqual(('a', 'b'), parse_names('a〜b'))
self.assertEqual(('a', 'b'), parse_names('a~b'))
self.assertEqual(('a', 'b'), parse_names('a-b'))
self.assertEqual(('a', 'b'), parse_names('a - b'))
self.assertEqual(None, parse_names(' a b c'))
def test_do(self):
self.assertEqual({'valid': False, 'title': 'type “from” and “to” station names', 'url': ''}, do(''))
self.assertEqual({'valid': False, 'title': 'type “from” and “to” station names', 'url': ''}, do(' a '))
self.assertEqual({'valid': True, 'title': 'Query routes from a to b', 'url': 'http://transit.yahoo.co.jp/search/result?from=a&to=b'}, do(' a b '))
if __name__ == '__main__':
unittest.main()
| [
"tomo@cry.ptomeria.com"
] | tomo@cry.ptomeria.com |
fdc1687c4cb91d245a23364be90633d4ced12a20 | 7bb07f688757cc0e4a9a58540e3a3a5ce1de3219 | /multirotor/src/AgentHelpers.py | 4d9d77c847ccb3e06dbda93aa0599b01d80a32cd | [] | no_license | rafi-vivanti/rl | ba7b62af1000a4c366c8b77d2ae212ef9005f41a | d082347428464923c138dfbf6b6f73fe6ff0d651 | refs/heads/master | 2021-05-11T05:57:16.876500 | 2018-01-14T19:08:34 | 2018-01-14T19:08:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | from Common import *
from VectorMath import *
def getPosition(**kwargs):
return kwargs['agent'].client.getPosition().toNumpyArray()
def getOrientation(**kwargs):
return Quaternion(kwargs['agent'].client.getOrientation().toNumpyArray())
def getVelocity(**kwargs):
return kwargs['agent'].client.getVelocity().toNumpyArray()
def getAngularVelocity(**kwargs):
return kwargs['agent'].client.getAngularVelocity().toNumpyArray()
def getAngularAcceleration(**kwargs):
return kwargs['agent'].client.getAngularAcceleration().toNumpyArray()
def getLinearAcceleration(**kwargs):
return kwargs['agent'].client.getLinearAcceleration().toNumpyArray()
def isGoal(**kwargs):
return kwargs['agent'].getState().areEqual(kwargs['agent'].goal, kwargs['agent'].goalMargins) if kwargs[
'agent'].getGoal() else False
def getCollisionInfo(**kwargs):
return kwargs['agent'].hitObstacleFlag
def getHorizontalDistance(p1, p2):
return ((p1.x - p2.x) ** 2 +
(p1.y - p2.y) ** 2) ** 0.5
def getHorizontalDistanceGoal(**kwargs):
return ((kwargs['agent'].getGoal().position[0] - kwargs['partialUpdate'].position[0]) ** 2 +
(kwargs['agent'].getGoal().position[1] - kwargs['partialUpdate'].position[1]) ** 2) ** 0.5
def onPress(key, token):
token.update(key)
def onRelease(key, token):
token.clear()
| [
"talaataboudakika@std.sehir.edu.tr"
] | talaataboudakika@std.sehir.edu.tr |
cf2f44b57da8e5e964a6491b7e038f64ac47a973 | 86bad28b490267101c11cb480ddad9f09805622b | /keras/keras39_cifar10_3_DNN.py | feea313e8c7dfa93ac36347e76652f8a7edb54aa | [] | no_license | minijaypark/bit_seoul | f097180929e15309e0cf44faa8ea75d954e5d629 | 86ddc9198fd05c234c422c7fbcae26fbf955276f | refs/heads/master | 2023-02-15T21:57:53.737352 | 2020-12-09T11:19:19 | 2021-01-18T14:46:27 | 311,254,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | # One Hot Encoding
import numpy as np
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
x_train = x_train.reshape(50000, 32 * 32 * 3).astype('float32')/255
x_test = x_test.reshape(10000, 32 * 32 * 3).astype('float32')/255
model = Sequential()
model.add(Dense(256, input_shape=(32 * 32 * 3,)))
model.add(Dense(512))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))
model.summary()
es = EarlyStopping(monitor='loss', patience=5, mode='auto')
to_hist = TensorBoard(log_dir='graph', histogram_freq=0,
write_graph=True, write_images=True)
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=100, batch_size=32,
verbose=1, validation_split=0.2, callbacks=[es, to_hist])
loss, acc = model.evaluate(x_test, y_test, batch_size=32)
print("loss: ", loss)
print("acc:", acc)
y_predict = model.predict(x_test[:10])
print("y predicts: ")
print([np.argmax(y, axis=None, out=None) for y in y_predict])
print()
print("real y's")
print([np.argmax(y, axis=None, out=None) for y in y_test[:10]])
| [
"minijaiypark@gmail.com"
] | minijaiypark@gmail.com |
eca636f88013040d91112536d490c4cc646e4f0c | c00b2665d0f00070e17080785bebc940b69a0022 | /model.py | 6cb50d786dad03a6158092fbb406de0bb7878819 | [
"MIT"
] | permissive | mohsin5432/football-team-score-monitoring-flask | b0bab3542a21cdb3bc79ff8b704625b4af7f3fb1 | c4c62e5e27d2b324625117bb4054e5f1ddad4425 | refs/heads/main | 2023-05-17T01:36:22.051556 | 2021-06-03T16:14:22 | 2021-06-03T16:14:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,346 | py | import sqlite3
def pass_check(username):
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" SELECT password FROM users WHERE username='{username}' ORDER BY pk DESC;""".format(username = username))
passs = cursor.fetchone()
if passs is None:
return "invalid"
connection.commit()
cursor.close()
connection.close()
else:
password = passs and passs[0]
connection.commit()
cursor.close()
connection.close()
return password
def name(username):
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" SELECT fname FROM users WHERE username='{username}' ORDER BY pk DESC;""".format(username = username))
name = cursor.fetchone()
fname = name and name[0]
connection.commit()
cursor.close()
connection.close()
return fname
def intro(username):
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" SELECT intro FROM users WHERE username='{username}' ORDER BY pk DESC;""".format(username = username))
info = cursor.fetchone()
if info is None:
intro = ""
else:
intro = info[0]
connection.commit()
cursor.close()
connection.close()
return intro
def addintro(username,fname,intro):
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" UPDATE users SET fname = '{fname}' , intro = '{intro}' WHERE username = '{username}';""".format(fname = fname , intro = intro , username = username))
connection.commit()
cursor.close()
connection.close()
return 'you have successfully updated profile'
def delintro(username,intro):
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" UPDATE users SET intro = NULL WHERE username = '{username}';""".format(intro = intro , username = username))
connection.commit()
cursor.close()
connection.close()
return 'you have successfully deleted intro'
def signup(email,fname,username,password):
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" SELECT password FROM users WHERE username='{username}';""".format(username = username))
exist = cursor.fetchone()
if exist is None:
cursor.execute("""INSERT INTO users(email,fname,username,password,date)VALUES('{email}','{fname}','{username}','{password}',(CURRENT_TIMESTAMP));""".format(email = email , password = password , fname = fname , username = username))
connection.commit()
cursor.close()
connection.close()
else:
return('User Already existed')
return 'you have successfully signed up'
def check_users():
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute("""SELECT username from users ORDER BY pk DESC;""")
db_users = cursor.fetchall()
users = []
for i in range(len(db_users)):
person = db_users[i][0]
users.append(person)
connection.commit()
cursor.close()
connection.close()
return users
def totalusers():
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute("""SELECT COUNT(pk)
FROM users;""")
totalusers = cursor.fetchone()
tusers =totalusers and totalusers[0]
connection.commit()
cursor.close()
connection.close()
return tusers
def totalusersrecord():
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute("""SELECT *
FROM users;""")
totalusersdata = cursor.fetchall()
connection.commit()
cursor.close()
connection.close()
return totalusersdata
def totalteams():
connection = sqlite3.connect('teams.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute("""SELECT COUNT(pk)
FROM teams;""")
totalusers = cursor.fetchone()
tusers =totalusers and totalusers[0]
connection.commit()
cursor.close()
connection.close()
return tusers
def teamranks():
connection = sqlite3.connect('teams.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute("""SELECT *
FROM teams
ORDER BY score DESC;""")
teams = cursor.fetchall()
connection.commit()
cursor.close()
connection.close()
return teams
def totalusers24():
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute("""SELECT COUNT(pk)
FROM users
WHERE date >= datetime('now','-1 day');""")
totalusers24 = cursor.fetchone()
tusers24 =totalusers24 and totalusers24[0]
connection.commit()
cursor.close()
connection.close()
return tusers24
def totalusersrecord24():
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute("""SELECT *
FROM users
WHERE date >= datetime('now','-1 day');""")
totalusersdata24 = cursor.fetchall()
connection.commit()
cursor.close()
connection.close()
return totalusersdata24
def deluser(username):
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" DELETE FROM users WHERE username = '{username}';""".format(username = username))
connection.commit()
cursor.close()
connection.close()
def admpass_check(username):
connection = sqlite3.connect('admin.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" SELECT password FROM admin WHERE username='{username}' ORDER BY pk DESC;""".format(username = username))
passs = cursor.fetchone()
password = passs and passs[0]
connection.commit()
cursor.close()
connection.close()
return password
def addteam(teamname,wins,defeat,draws):
score = (int(wins)*3)+(int(draws)*1)
tmatches = int(wins)+int(defeat)+int(draws)
connection = sqlite3.connect('teams.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" SELECT teamname FROM teams WHERE teamname='{teamname}';""".format(teamname = teamname))
exist = cursor.fetchone()
if exist is None:
cursor.execute("""INSERT INTO teams(teamname,tmatches,wins,draws,defeat,score,date)VALUES('{teamname}','{tmatches}','{wins}','{draws}','{defeat}','{score}',(CURRENT_TIMESTAMP));""".format(teamname = teamname,wins=wins,tmatches=tmatches,draws = draws,defeat=defeat,score=score))
connection.commit()
cursor.close()
connection.close()
else:
return('TEAM Already existed')
return 'you have successfully ADDED TEAM'
def delteam(teamname):
connection = sqlite3.connect('teams.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" DELETE FROM teams WHERE teamname = '{teamname}';""".format(teamname = id_data))
connection.commit()
cursor.close()
connection.close()
def wins(teamname):
connection = sqlite3.connect('teams.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" SELECT wins FROM teams WHERE teamname='{teamname}' ORDER BY pk DESC;""".format(teamname = teamname))
name = cursor.fetchone()
fname = name and name[0]
connection.commit()
cursor.close()
connection.close()
return fname
def defeat(teamname):
connection = sqlite3.connect('teams.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" SELECT defeat FROM teams WHERE teamname='{teamname}' ORDER BY pk DESC;""".format(teamname = teamname))
name = cursor.fetchone()
fname = name and name[0]
connection.commit()
cursor.close()
connection.close()
return fname
def draws(teamname):
connection = sqlite3.connect('teams.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" SELECT draws FROM teams WHERE teamname='{teamname}' ORDER BY pk DESC;""".format(teamname = teamname))
name = cursor.fetchone()
fname = name and name[0]
connection.commit()
cursor.close()
connection.close()
return fname
def updateteam(id_data,teamname,wins,defeat,draws):
score = (int(wins)*3)+(int(draws)*1)
tmatches = int(wins)+int(defeat)+int(draws)
connection = sqlite3.connect('teams.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" UPDATE teams SET teamname = '{teamname}',tmatches='{tmatches}',wins='{wins}',draws='{draws}',defeat = '{defeat}',score='{score}' WHERE teamname = '{teamname}';""".format(teamname = teamname,wins=wins,tmatches=tmatches,draws = draws,defeat=defeat,score=score))
connection.commit()
cursor.close()
connection.close()
def addpic(username,picname):
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" UPDATE users SET picname = '{picname}' WHERE username = '{username}';""".format(username = username,picname=picname))
connection.commit()
cursor.close()
connection.close()
return 'you have successfully uploaded picture'
def pic(username):
connection = sqlite3.connect('insta.db' , check_same_thread = False)
cursor = connection.cursor()
cursor.execute(""" SELECT picname FROM users WHERE username='{username}' ORDER BY pk DESC;""".format(username = username))
name = cursor.fetchone()
fname = name and name[0]
connection.commit()
cursor.close()
connection.close()
return fname
| [
"noreply@github.com"
] | mohsin5432.noreply@github.com |
650c3934dcb1a4a2dcc9609b404fe3e2cafd1c55 | 9f30247de67e3f8a8cba146bc8bbacf57bbf6cc7 | /main.py | cf46e69e9a00e542b4c037a08ae57a7082e3ce19 | [] | no_license | adisrael/opti-project | c9ee80f91e2efdaf80c1f3825b0499b870d426ff | 6d46b0a2e8f5af2ceaa49c96eb2ad967f945578e | refs/heads/master | 2020-03-19T08:58:59.240855 | 2018-06-09T04:17:36 | 2018-06-09T04:17:36 | 136,250,320 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,010 | py | # from gurobipy import *
from gurobipy import Model
from gurobipy import quicksum
from gurobipy import GRB
from gurobipy import GurobiError
# import random
# import sys
# Run on Mac OSX: time(gurobi.sh main.py) in the folder containing file
try:
# Mencionar modelo (aqui va cualquier nombre
m = Model('decision')
# Definicion de parametros
# Deportes
D = {1: 'Futbol hombre',
2: 'Futbol mujer',
3: 'Futsal hombre',
4: 'Futsal mujer',
5: 'Handbol hombre',
6: 'Handbol mujer',
7: 'Voleibol hombre',
8: 'Voleibol mujer',
9: 'Basquetbol hombre',
10: 'Basquetbol mujer'}
# Canchas
C = [1, 2, 3, 4, 5]
# 1 -> Futbol
# 2 -> Gimnasio: Futsal, Volley, Basquet
# 3 -> Handbol
# 4 -> Fuera1: Basquet,Volley
# 5 -> Fuera2: Basquet,Volley
# Bloques de Tiempo
T = [i + 1 for i in range(4)]
# Equipos
# 10 colegios en total
E = [i + 1 for i in range(10)]
# Conjunto de dias en un mes en los que se puede jugar partidos(Sabado y Domingo intercalado).
A = [i + 1 for i in range(8)]
# A = [i + 1 for i in range(16)]
# P_d: Cantidad de partidos de liga del deporte d, con l in L.
# por ahora 3 a 5 partidos por deporte por liga
P_d = [4, 4, 3, 3, 5, 5, 5, 5, 3, 3]
# n_d: personas por equipo en el deporte d in D
n_d = [22, 22, 13, 13, 13, 13, 15, 15, 16, 16]
# duracion del partido del deporte d in D
t_d = [120, 120, 90, 90, 70, 70, 90, 90, 80, 80]
# cantidad de vehiculos por equipo e
veh_e = [11, 11, 9, 9, 9, 9, 8, 8, 10, 10]
# bonificacion para los deportes, 100 para todos
b_d = [100 for i in range(len(D))]
# minimo de partidos del deporte d in D
minp_d = [1 for d in range(len(D))]
# cantidad maxima de espectadores en la cancha c in C
emax_c = [200, 200, 120, 80, 80]
# emax_c = [200, 200, 120, 80, 80]
# capacidad maxima del camarin
mc = 40
# mc = 128
# capacidad maxima del estacionamiento
mveh = 100
# mveh = 50
# cantidad de buses que caben en el estacionamiento
mbus = 2
# mbus = 100
#Costo de contratar un bus
cb_d = [10000, 10000, 6000, 6000, 6000, 6000, 7000, 7000, 8000, 8000]
#Presupuesto disponible para cada liga para los buses
pres_d = [100000, 90000, 70000, 60000, 70000, 60000, 80000, 70000, 90000, 80000]
# pres_d = [110000, 100000, 76000, 66000, 76000, 66000, 87000, 77000, 98000, 88000]
# si en la cancha c se puede jugar el deporte d = 1, 0 e.o.c.
s_cd = [[1, 1, 0, 0, 0, 0, 0, 0, 0, 0], # 1 -> Futbol
[0, 0, 1, 1, 0, 0, 1, 1, 1, 1], # 2 -> Gimnasio: Futsal, Volley, Basquet
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0], # 3 -> Handbol
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1], # 4 -> Fuera1: Basquet,Volley
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1]] # 5 -> Fuera2: Basquet,Volley
# Puros 1s para probar que las canchas no sean el problema
# s_cd = [[1 for i in range(10)] for i in range(5)]
# q_ed = [[0 for i in range(len(E))] for i in range(len(D))] # original
# q_ed = [[1 for i in range(len(E))] for i in range(len(D))]
# i = 0
# j = 5
# for lista in q_ed:
# lista[i:j] = [1] * 5
# i += 5
# j += 5
# for i in range(10):
# print(random.sample([0, 1, 0, 1, 0, 1, 0, 1, 0, 1], 10))
# si el equipo e juega el deporte d = 1, 0 e.o.c.
q_ed = [[1, 0, 1, 0, 1, 1, 0, 1, 0, 0], # si usamos 10 equipos
[1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 1, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 0, 1, 1, 1, 0, 0, 0],
[1, 1, 0, 1, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 1, 0, 1, 0]]
# Crear variables
# u_eca = m.addVars(E, C, A, vtype=GRB.CONTINUOUS, lb=0.0, ub=GRB.INFINITY, name="u")
print("parametros listos")
# si el equipo e comienza a jugar contra el equipo o en la cancha c, el deporte d, en el minuto t del dia a
X_coedta = m.addVars(C, E, E, D, T, A, vtype=GRB.BINARY, name="x")
# Si el equipo e llega en bus en el tiempo t en el dia a
B_eta = m.addVars(E, T, A, vtype=GRB.BINARY, name="b")
# Si el equipo e llega en vehiculos en el tiempo t en el dia a
V_eta = m.addVars(E, T, A, vtype=GRB.BINARY, name="v")
print("variables listas")
# FUNCION OBJETIVO
m.setObjective(quicksum((quicksum(
X_coedta[cancha, equipoo, equipoe, deporte, bloque, dia]/2
for dia in A for bloque in T for cancha in C for equipoe in E
for equipoo in E if equipoo != equipoe) - minp_d[deporte - 1]) * b_d[deporte - 1]
for deporte in D), GRB.MAXIMIZE)
print("FO lista")
# Restriccion 1: Solo se puede jugar un partido en un bloque de tiempo t en cada cancha.
m.addConstrs(
(quicksum(X_coedta[cancha, equipoo, equipoe, deporte, bloque, dia]/2 for equipoe in E for equipoo in E if equipoo != equipoe for deporte in D) <= 1 for cancha in C for bloque in T for dia in A ), 'C1')
print("R1 lista")
# Restriccion 2: Se puede jugar en una cancha si esta lo permite y los equipos juegan los respectivos deportes
m.addConstrs(
(X_coedta[cancha, equipoO, equipoE, deporte, bloque, dia]
<= s_cd[cancha-1][deporte-1] * q_ed[deporte-1][equipoE-1] * q_ed[deporte-1][equipoO-1] for equipoE in E for equipoO in E if equipoE != equipoO for cancha in C for deporte in D
for bloque in T for dia in A), "C2")
print("R2 lista")
# Restriccion 3: Cada equipo puede jugar como maximo una vez al dia
m.addConstrs(
(quicksum(X_coedta[cancha, equipoO, equipoE, deporte, bloque, dia] for cancha in C for deporte in D for bloque in T)
<= 1 for equipoE in E for equipoO in E if equipoE != equipoO for dia in A), "C3")
print("R3 lista")
# Restriccion 4: Se debera jugar como minimo un partido por liga durante el mes
m.addConstrs(
(quicksum(X_coedta[cancha, equipoO, equipoE, deporte, bloque, dia] for cancha in C for equipoE in E for equipoO in E
if equipoE != equipoO for bloque in T for dia in A) >= 1 for deporte in D), "C4")
print("R4 lista")
# Restriccion 5: Si un equipo juega, tiene parte de los estacionamientos ocupados (autos y buses), de lo contrario no llega
m.addConstrs(
(quicksum(X_coedta[cancha, equipo, equipoo, deporte, bloque, dia] for deporte in D for cancha in C for equipoo in E if equipo != equipoo) == B_eta[equipo, bloque, dia] + V_eta[equipo, bloque, dia]
for bloque in T for dia in A for equipo in E), "C5")
print("R5 lista")
# Restriccion 6: No. de autos estacionados en un bloque t no puede superar capacidad max de estacionamientos
m.addConstrs(
(quicksum(V_eta[equipo, bloque, dia] * veh_e[equipo-1] * q_ed[deporte-1][equipo-1] for equipo in E) <= mveh
for bloque in T for dia in A for deporte in D), "C6")
print("R6 lista")
# m.addConstrs(
# (quicksum(V_eta[equipo, bloque, dia] * veh_e[equipo-1] * q_ed[deporte-1][equipo-1] for equipo in E) >= 10
# for bloque in T for dia in A for deporte in D), "C66")
# print("R66 lista")
# Restriccion 7: No. de buses estacionados no puede superar capacidad max de buses
m.addConstrs(
(quicksum(B_eta[equipo, bloque, dia] * q_ed[deporte-1][equipo-1] for equipo in E) <= mbus
for bloque in T for dia in A for deporte in D), "C7")
print("R7 lista")
# Restriccion 7-2 : Presupuesto que hay para contratar buses
m.addConstrs(
(quicksum(B_eta[equipo, bloque, dia] * q_ed[deporte-1][equipo-1] * cb_d[deporte-1] for equipo in E for dia in A for bloque in T) <= pres_d[deporte-1]
for deporte in D), "C77")
print("R77 lista")
# Restriccion 8: Ctdad. de jugadores que usan los camarines tiene que ser menor a su capacidad maxima para H y M
m.addConstrs(
(quicksum(X_coedta[cancha, equipoO, equipoE, deporte, bloque, dia] * n_d[deporte-1] for cancha in C for equipoE in E for equipoO in E
if equipoE != equipoO for deporte in D if deporte % 2 != 0) <= 2 * mc for bloque in T for dia in A), "C81")
m.addConstrs(
(quicksum(X_coedta[cancha, equipoO, equipoE, deporte, bloque, dia] * n_d[deporte-1] for cancha in C for equipoE in E for equipoO in E
if equipoE != equipoO for deporte in D if deporte % 2 == 0) <= 2 * mc for bloque in T for dia in A), "C82")
print("R8 lista")
# Restriccion 9: Ctdad. de espectadores para c/partido no debe superar capacidad max de c/cancha
m.addConstrs(
(quicksum(V_eta[equipo, bloque, dia] * n_d[deporte-1] * q_ed[deporte-1][equipo-1] for equipo in E) * 2 <= emax_c[cancha-1]
for cancha in C for bloque in T for dia in A for deporte in D), "C9")
print("R9 lista")
# Restriccion 10: Si equipo e juega con o, o juega con e.
m.addConstrs(
(X_coedta[cancha, equipoO, equipoE, deporte, bloque, dia] == X_coedta[cancha, equipoE, equipoO, deporte, bloque, dia] for cancha in C for deporte in D for bloque in T for equipoE in E for equipoO in E if equipoE != equipoO for dia in A), "C10"
)
# m.addConstrs((quicksum(a_rpt[punto, producto, tiempo] for punto in R for producto in S) >= 1 for tiempo in T), "c9")
# m.addConstrs((X_coedta[cancha, equipo, equipo, deporte, bloque, dia] == 0 for cancha in C for equipo in E for bloque in T for dia in A for deporte in D), "C10")
print("R10 lista")
# Optimizar
m.optimize()
print("Num Vars: ", len(m.getVars()))
print("Num Restricciones: ", len(m.getConstrs()))
status = m.status
print('Status:', status)
if status != GRB.Status.OPTIMAL:
print('Optimization was stopped with status %d' % status)
# exit(0)
if status == GRB.Status.INF_OR_UNBD:
print('The model cannot be solved because it is infeasible or unbounded')
if status == GRB.Status.INFEASIBLE:
print("Model is INFEASIBLE")
if status == GRB.Status.UNBOUNDED:
print('Model is UNBOUNDED')
# exit(1)
if status == GRB.Status.OPTIMAL or status == 2:
with open('results.txt', 'w') as archivo:
# Por si queremos poner el archivo resultados los valores de algunos parametro importantes
# archivo.write(str(G_p) + '\r\n')
# archivo.write(str(V) + '\r\n')
# archivo.write(str(D) + '\r\n')
# archivo.write(str(len(T)) + '\r\n')
print('Obj:', m.objVal)
archivo.write('\nObj: {} \r\n'.format(m.objVal))
archivo.write("\nVariables No 0\n")
for v in m.getVars():
if v.x != 0.0:
archivo.write('{}, {} \r\n'.format(v.varName, v.x))
archivo.write("\nTodas las Vars\n")
for v in m.getVars():
# print(v.varName, v.x)
archivo.write('{}, {} \r\n'.format(v.varName, v.x))
with open('results.csv', 'w') as file:
file.write('Obj: {} \r\n'.format(m.objVal))
file.write('Non-Zero Variables\r\n')
file.write('varName;value;\r\n')
for v in m.getVars():
if v.x != 0.0:
file.write('{};{};\r\n'.format(v.varName, v.x))
file.write('All Variables\r\n')
file.write('varName;value;')
for v in m.getVars():
file.write('{};{};\r\n'.format(v.varName, v.x))
with open('cons.txt', 'w') as const_file:
for c in m.getConstrs():
const_file.write('{}, {} \r\n'.format(c.constrName, c.slack))
except GurobiError as e:
print('Error code ' + str(e.errno) + ": " + str(e) + '-' + str(e.message))
# except: # catch *all* exceptions
# e = sys.exc_info()
# print("Error: {}".format(str(e)))
| [
"adisrael@uc.cl"
] | adisrael@uc.cl |
86886088855f18904eb8ec54503c050c44d54602 | 808b78503f15d72f278cb1211bef7f4d23672763 | /udn_nlp/util.py | a92b8cf54b7ac329a441c8345d606929439dbadc | [] | no_license | MCodeLab/udn-nlp | 8d10db6beb2bc5c9b5eabfdc3c4b37cbd8f827bb | 9a0601df8e6f9ee59bf75392aabd22604073af9b | refs/heads/master | 2020-04-17T23:47:53.245395 | 2019-01-24T17:31:16 | 2019-01-24T17:31:16 | 167,051,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,997 | py | from urllib.request import urlopen
import sys
import requests
import subprocess
import os
import errno
import datetime
import requests_cache # type: ignore
from typing import *
request_root = 'https://api.lib.utah.edu/udn/v1/'
dir_path = os.path.dirname(os.path.realpath(__file__))
# Cache UDN requests without expiry by default because UDN documents are
# never updated as far as I know
requests_cache.install_cache('udn_requests', backend='sqlite')
def is_windows():
# type: () -> bool
return os.name == 'nt'
def is_linux():
# type: () -> bool
return os.name == 'posix'
def call_console(command, use_shell=True):
print('Calling command: ' + command)
if is_linux():
print(subprocess.check_output(command, executable='/bin/bash', shell=use_shell))
else:
print(subprocess.check_output(command, shell=use_shell))
def retrieve_document(id):
# type: (int) -> Dict
''' Retrieve the json data of the document with the given id
'''
response, _ = dry_make_request('docs/id/{}'.format(id))
if 'docs' in response and len(response['docs']) == 1:
return response['docs'][0]
else:
raise BaseException('There is no document with id {}'.format(id))
def open_file_make_dirs(filename, access_type):
# type: (str, str) -> IO
''' Open a file from the local disk, creating directories for
every folder in its path that doesn't exist
'''
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
return open(filename, access_type)
def global_dest_for_webfile(url):
# type: (str) -> str
''' Parse out a local path to save a file from the internet.
'''
filename = url[url.rfind("/")+1:]
return os.path.expanduser('~') + "/ocr-temp/" + filename
def download_file(url, dest=""):
# type: (str, str) -> str
''' Download a file to the given location. Return the local filepath
'''
if len(dest) == 0:
dest = global_dest_for_webfile(url)
# Don't re-download a file that's already been downloaded
if not os.path.exists(global_dest_for_webfile(url)):
print('downloading file from ' + url)
with open_file_make_dirs(dest, 'wb') as f:
webfile = urlopen(url)
f.write(webfile.read())
return dest
def open_webfile(url, access_type):
# type: (str, str) -> IO
''' Open a file from the internet (downloading if necessary)
'''
download_file(url)
return open(global_dest_for_webfile(url), access_type)
def files_in_dir(dir):
# type: (str) -> Tuple[int, str]
''' Count the files in a directory and return the name of the last one (alphabetically)
'''
files = [name for name in os.listdir(dir) if os.path.isfile(os.path.join(dir, name))]
return len(files), files[-1]
False
print_on_request = True
def dry_make_request(query, starting_idx=0, limit=100, timeout=0.1):
# type: (str, int, int) -> Tuple[Dict, bool]
''' Convenience function for querying the UDN API. Uses cached responses where possible.
NOTE returns 2 values: response json, and bool from_cache
'''
req = requests.Request('GET', request_root + query, params={
'sort': 'id|asc',
'start': starting_idx,
'limit': limit,
})
prepared = req.prepare()
if print_on_request:
print('{} {}'.format(prepared.method, prepared.url))
try:
with requests.Session() as session:
response = session.send(prepared,timeout=timeout)
if print_on_request:
print("Used Cache: {0}".format(response.from_cache))
# return a tuple of the response object with whether the query was cached
# in case a script wants to know if it was already run partially
return response.json(), response.from_cache
except BaseException as e:
if type(e) is KeyboardInterrupt or type(e) is EOFError:
raise e
else:
input("An HTTPS query failed. Check your internet connection and press ENTER to try again.")
return dry_make_request(query, starting_idx, limit)
def query_all_document_pages(query, starting_idx=0, limit=100):
# type: (str, int, int) -> Iterator[Tuple[List[Dict], bool]]
''' Return a generator that will eventually yield all "pages" of documents
matching the given UDN query (page = list of 100, the maximum API query).
Returns pages instead of individual documents in order to be multiprocessing-friendly
NOTE this returns a second value, cached! Do not ignore it or you may end up iterating through
the tuple, not the page
'''
while True:
next_batch, cached = dry_make_request(query, starting_idx, limit)
total_num_found = next_batch['numFound']
if total_num_found == 0:
print('No docs respond to query {}'.format(query))
break
if len(next_batch['docs']) == 0:
print('Reached the last page')
break
starting_idx+=len(next_batch['docs'])
yield next_batch['docs'], cached
def query_all_documents(query_or_list_file, starting_idx=0, limit=100):
# type: (str, int, int) -> Iterator[Dict]
''' Generator that will eventually yield every document matching the given UDN query, OR every document whose ID is listed in a given text file
NOTE limit is max 100, and the generator will slow down to query the next page every time it reaches the limit.
'''
last_id = 0
docs_yielded = 0
if os.path.exists(query_or_list_file):
# If this function is called with the path to a file listing document IDs, query every document listed in the file
doc_list = open(query_or_list_file, 'r').readlines()
for doc_id in doc_list[starting_idx:]:
yield retrieve_document(doc_id.strip())
else:
# Otherwise, this function must be called with a valid UDN query. Iterate through pages of results and return documents
query = query_or_list_file
dummy_batch, _ = dry_make_request(query, starting_idx, limit)
total_docs = dummy_batch['numFound']
for page, _ in query_all_document_pages(query, starting_idx, limit):
# print(page)
# print(len(page))
for doc in page:
# print(len(doc))
# Sanity check for ascending id sort order
# print(doc['id'])
if int(doc['id']) < last_id:
print('Error! The query {} is not respecting sort order!'.format(query))
docs_yielded += 1
# if docs_yielded % 5 == 0:
# print('So far, Retrieved {}/{} documents for query {}'.format(docs_yielded, total_docs, query))
last_id = int(doc['id'])
yield doc
if docs_yielded < total_docs:
print('Query stopped returning docs at {1} (probably) because you started the query at id {0}. {0} + {1} = {2} '.format(starting_idx, docs_yielded, (starting_idx+docs_yielded)/ total_docs))
def safe_print(document, field, outfile=None, format_str="{}\n"):
# type: (Dict, str, Optional[IO], str) -> None
''' Print a field from a UDN document if it exists.
format_str can define a structure for the field to be printed in where {} will be replaced with the field's value. For example:
util.safe_print(doc, 'title', format_str = "Title: {}\n")
'''
if field in document:
outstr = format_str.format(document[field])
if outfile:
outfile.write(outstr)
else:
print(outstr, end="")
def confirm(message):
y_n = input('{} (Y/n)'.format(message))
return y_n.lower() == 'y'
def choose_from_list(message):
# TODO refactor the collocation-classifier int csv parsing here
pass | [
"natquaylenelson@gmail.com"
] | natquaylenelson@gmail.com |
34c9ff3af63613fee8e761e8cca0904a4d006d4e | 9e6739b0ee06b79af38e94ef1af4f8bceb76f030 | /algoReco/valid.py | c328ae10fd430021cef73f46030eb18ce5b6c68c | [] | no_license | jhylands/battle | 084fa9e13698c7632acb211a495f91944d99c3d0 | a164878fe996b0e0c2eb538d339c4d4f702a3d0a | refs/heads/master | 2021-01-01T19:51:36.978021 | 2014-09-24T14:21:47 | 2014-09-24T14:21:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | import math
def rotate(coordinates,angle):
x = coordinates['x']
y = coordinates['y']
Xn = x*math.cos(angle) - y * math.sin(angle)
Yn = x*math.sin(angle) + y * math.cos(angle)
return {'x': Xn , 'y': Yn}
AircraftCarrior = [{'x':0,'y':0},{'x':0,'y':-1},{'x':0,'y':-2},{'x':0,'y':-3},{'x':-1,'y':-3},{'x':1,'y':-3}]
for pannal in AircraftCarrior:
print rotate(pannal,(math.pi*3/2))
print rotate({'x': 1, 'y': -3},4.71)
| [
"james.space.ict@gmail.com"
] | james.space.ict@gmail.com |
fb2dc56539cdf51cd1d14fa04f375e98d0178ecc | ea16c6da19fce9a4dff085aaeff3ac12baa21d59 | /tests/test_obvs.py | 5febd213e3768347232d28f1e8c604c5c017648c | [] | no_license | changhoonhahn/specmulator | a31b17aeab1ba1a29118e431fd7558dd8bbc7e5b | 9453e7fcc30d74b732594bfb78f7e4f5d20bc95f | refs/heads/master | 2021-09-10T18:57:21.361837 | 2018-03-31T05:52:33 | 2018-03-31T05:52:33 | 106,511,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,710 | py | import numpy as np
import env
import util as UT
import obvs as Obvs
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
def Plk_halo_mneut_ratio(nzbin=4, zspace=False):
''' Plot the ratio of P_l^mneut(k)/P_l^0.0eV
for different neutrino masses
'''
mneuts = [0.0, 0.06, 0.10, 0.15, 0.6] # eV
p0ks_mneut, p2ks_mneut, p4ks_mneut = [], [], []
for mneut in mneuts:
p0ks, p2ks, p4ks = [], [], []
for ireal in range(1, 101):
# read all 100 realizations
plk_i = Obvs.Plk_halo(mneut, ireal, nzbin, zspace=zspace)
if ireal == 1: k = plk_i['k']
p0ks.append(plk_i['p0k'])
p2ks.append(plk_i['p2k'])
p4ks.append(plk_i['p4k'])
# plot the average
p0ks_mneut.append(np.average(np.array(p0ks), axis=0))
p2ks_mneut.append(np.average(np.array(p2ks), axis=0))
p4ks_mneut.append(np.average(np.array(p4ks), axis=0))
plks_mneut = [p0ks_mneut, p2ks_mneut, p4ks_mneut]
fig = plt.figure(figsize=(15, 5))
for i, ell in enumerate([0,2,4]):
sub = fig.add_subplot(1,3,i+1)
for ii in range(len(mneuts)):
sub.plot(k, plks_mneut[i][ii]/plks_mneut[i][0], lw=2, label=r'$\sum m_\nu = $ '+str(mneuts[ii])+'eV')
if i == 0:
sub.legend(loc='lower right', prop={'size': 12})
else:
sub.set_yticks([])
sub.set_xscale('log')
sub.set_xlim([0.01, 0.5])
sub.set_xlabel('k', fontsize=20)
sub.set_ylim([0.9, 1.15])
sub.set_ylabel('$P_{'+str(ell)+'}(k)/P_{'+str(ell)+'}^{0.0\mathrm{eV}}(k)$', fontsize=20)
if zspace: str_space = 'z'
else: str_space = 'r'
fig.savefig(''.join([UT.fig_dir(), 'tests/plk_halo.mneuts_ratio.nzbin', str(nzbin),
'.', str_space, 'space.png']), bbox_inches='tight')
return None
def Plk_halo_mneut(nzbin=4, zspace=False):
''' Plot P_l(k) for different neutrino masses
'''
mneuts = [0.0, 0.06, 0.10, 0.15, 0.6] # eV
p0ks_mneut, p2ks_mneut, p4ks_mneut = [], [], []
for mneut in mneuts:
p0ks, p2ks, p4ks = [], [], []
for ireal in range(1, 101):
# read all 100 realizations
plk_i = Obvs.Plk_halo(mneut, ireal, nzbin, zspace=zspace)
if ireal == 1: k = plk_i['k']
p0ks.append(plk_i['p0k'])
p2ks.append(plk_i['p2k'])
p4ks.append(plk_i['p4k'])
# plot the average
p0ks_mneut.append(np.average(np.array(p0ks), axis=0))
p2ks_mneut.append(np.average(np.array(p2ks), axis=0))
p4ks_mneut.append(np.average(np.array(p4ks), axis=0))
plks_mneut = [p0ks_mneut, p2ks_mneut, p4ks_mneut]
fig = plt.figure(figsize=(15, 5))
for i, ell in enumerate([0,2,4]):
sub = fig.add_subplot(1,3,i+1)
for mneut, plk in zip(mneuts, plks_mneut[i]):
sub.plot(k, plk, lw=2, label=r'$\sum m_\nu = $ '+str(mneut)+'eV')
if i == 0:
sub.legend(loc='lower right', prop={'size': 12})
else:
sub.set_yticks([])
sub.set_xscale('log')
sub.set_xlim([0.01, 0.15])
sub.set_xlabel('k', fontsize=20)
sub.set_ylim([1e3, 1e5])
sub.set_yscale('log')
sub.set_ylabel('$k P_{'+str(ell)+'}(k)$', fontsize=20)
if zspace: str_space = 'z'
else: str_space = 'r'
fig.savefig(''.join([UT.fig_dir(), 'tests/plk_halo.mneuts.nzbin', str(nzbin),
'.', str_space, 'space.png']), bbox_inches='tight')
return None
def Plk_halo(mneut=0.0, nzbin=4, zspace=False):
''' **TESTED --- Nov 7, 2017 **
Test the Plk_halo
'''
p0ks, p2ks, p4ks = [], [], []
for ireal in range(1, 101):
# read all 100 realizations
plk_i = Obvs.Plk_halo(mneut, ireal, nzbin, zspace=zspace)
if ireal == 1: k = plk_i['k']
p0ks.append(plk_i['p0k'])
p2ks.append(plk_i['p2k'])
p4ks.append(plk_i['p4k'])
fig = plt.figure()
sub = fig.add_subplot(111)
for p0k, p2k, p4k in zip(p0ks, p2ks, p4ks):
sub.plot(k, k * p0k, c='k', lw=0.1)
sub.plot(k, k * p2k, c='b', lw=0.1)
sub.plot(k, k * p4k, c='r', lw=0.1)
# plot the average
sub.plot(k, k * np.average(np.array(p0ks), axis=0), c='k', lw=2, ls='--', label='$\ell=0$')
sub.plot(k, k * np.average(np.array(p2ks), axis=0), c='b', lw=2, ls='--', label='$\ell=2$')
sub.plot(k, k * np.average(np.array(p4ks), axis=0), c='r', lw=2, ls='--', label='$\ell=4$')
sub.set_xlim([0.01, 0.15])
sub.set_xlabel('k', fontsize=20)
sub.set_ylim([-2000., 2500.])
sub.set_ylabel('$k P(k)$', fontsize=20)
sub.legend(loc='lower right', prop={'size': 15})
if zspace: str_space = 'z'
else: str_space = 'r'
fig.savefig(''.join([UT.fig_dir(), 'tests/plk_halo.', str(mneut), 'eV.nzbin', str(nzbin),
'.', str_space, 'space.png']), bbox_inches='tight')
return None
if __name__=="__main__":
Plk_halo_mneut_ratio(nzbin=4, zspace=False)
Plk_halo_mneut_ratio(nzbin=4, zspace=True)
#Plk_halo_mneut(nzbin=4, zspace=False)
#Plk_halo_mneut(nzbin=4, zspace=True)
#Plk_halo(mneut=0.6, zspace=False)
#Plk_halo(mneut=0.6, zspace=True)
| [
"chh327@nyu.edu"
] | chh327@nyu.edu |
23e458c00cc1061016a1a200b2fd13050566f90b | f93da2d3a394d968bd61b2b58af8aa366f6a8591 | /main/views.py | b831f4e5259e635b8ec797ef9889e52dbe09c9c7 | [] | no_license | hurry-hub/site | 3bfa2a13a927d2aa986416da950742c3979e6131 | e730d76991497dedad6ada2cc1eb29839ec7366e | refs/heads/main | 2023-07-08T06:30:43.797344 | 2021-08-08T07:58:02 | 2021-08-08T07:58:02 | 393,893,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from django.shortcuts import render
import json
from users.models import User
from chat_tool.models import Blog
# Create your views here.
def index(request):
a = Blog.objects.all().order_by('-id')[:5]
return render(request, 'mainbase.html', {'blogs': a})
def accounts_profile(request):
if request.method == 'POST':
a = json.loads(request.body.decode('utf-8'))
print(a)
b = User.objects.get(email=request.user.email)
b.name = a['name']
b.save()
return render(request, 'accounts_profile.html') | [
"noreply@github.com"
] | hurry-hub.noreply@github.com |
4041bf407205690c4fd588dd50a7479aa0c6417b | 9c06acef707bd67b1ac315e4cf143f63b4e3d36d | /initializer7a.py | 7eec429afabd52ea3216ef26cbfd5b521085be86 | [] | no_license | guyer/CHiMaDPhaseFieldVI | 9fc4197e02f85218085b5437a7cdc7e50fe5c623 | 8093ac8f542d45b70a0b503f97c02f60c56fa31c | refs/heads/master | 2021-04-27T21:13:01.098211 | 2020-01-08T01:09:56 | 2020-01-08T01:09:56 | 122,394,314 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,612 | py | # script based on
# https://pages.nist.gov/pfhub/benchmarks/benchmark7.ipynb
import os
import pickle
import platform
import subprocess
import sys
import time
import yaml
import datreant.core as dtr
import fipy as fp
yamlfile = sys.argv[1]
with open(yamlfile, 'r') as f:
params = yaml.load(f)
try:
from sumatra.projects import load_project
project = load_project(os.getcwd())
record = project.get_record(params["sumatra_label"])
output = record.datastore.root
except:
# either there's no sumatra, no sumatra project, or no sumatra_label
# this will be the case if this script is run directly
output = os.getcwd()
print "storing results in {0}".format(output)
data = dtr.Treant(output)
from sympy import Symbol, symbols, simplify, init_printing
from sympy import Eq, sin, cos, tanh, sqrt, pi
from sympy.printing import pprint
from sympy.abc import kappa, S, t, x, xi, y, alpha
from sympy.physics.vector import ReferenceFrame, dynamicsymbols, time_derivative, divergence, gradient
N = ReferenceFrame('N')
t = symbols('t')
# symbolic form
# alpha = symbols('a')
A1, A2 = symbols('A1 A2')
B1, B2 = symbols('B1 B2')
C2 = symbols('C2')
# Define interface offset (alpha)
alpha = 0.25 + A1 * t * sin(B1 * N[0]) + A2 * sin(B2 * N[0] + C2 * t)
# Define the solution equation (eta)
xi = (N[1] - alpha) / sqrt(2*kappa)
eta_sol = (1 - tanh(xi)) / 2
eq_sol = simplify(time_derivative(eta_sol, N)
+ 4 * eta_sol * (eta_sol - 1) * (eta_sol - 0.5)
- divergence(kappa * gradient(eta_sol, N), N))
parameters = ((kappa, params['kappa']),
(A1, 0.0075), (B1, 8.0 * pi),
(A2, 0.03), (B2, 22.0 * pi),
(C2, 0.0625 * pi))
# substitute coefficient values
subs = [sub.subs(parameters) for sub in (eq_sol, eta_sol)]
# generate FiPy lambda functions
from sympy.utilities.lambdify import lambdify, lambdastr
(eq_fp, eta_fp) = [lambdify((N[0], N[1], t), sub, modules=fp.numerix) for sub in subs]
kappa_fp = float(kappa.subs(parameters))
# Can't pickle lambda functions
(eq_str, eta_str) = [lambdastr((N[0], N[1], t), sub) for sub in subs]
data.categories["eq"] = eq_str
data.categories["eta"] = eta_str
data.categories["kappa"] = kappa_fp
# initialize and store variables
totaltime = params['totaltime']
dt = float(params['dt'])
Lx = params['Lx']
Ly = params['Ly']
nx = params['nx']
ny = int(nx * Ly / Lx)
dx = Lx / nx
dy = Ly / ny
mesh = fp.PeriodicGrid2DLeftRight(nx=nx, dx=dx, ny=ny, dy=dx)
xx, yy = mesh.cellCenters[0], mesh.cellCenters[1]
eta = fp.CellVariable(mesh=mesh, name="$eta$", hasOld=True)
eta.value = eta_fp(xx, yy, 0.)
error = eta - eta_fp(xx, yy, 0.)
error.name = r"$\Delta\eta$"
fname = data["step0.tar.gz"].make().abspath
fp.tools.dump.write((eta, error), filename=fname)
data.categories["numsteps"] = int(totaltime / dt)
data.categories["dt_exact"] = totaltime / data.categories["numsteps"]
if params['nproc'] > 1:
cmd = ["mpirun", "-n", str(params['nproc']), "--wdir", os.getcwd()]
else:
cmd = []
cmd += [sys.executable, params['script'], yamlfile]
start = time.time()
chunk = 1000
for startfrom in range(0, data.categories["numsteps"], chunk):
thischunk = min(chunk, data.categories["numsteps"] - startfrom)
cmdstr = " ".join(cmd + [str(startfrom), str(thischunk)])
p = subprocess.Popen(cmdstr, shell=True,
close_fds=(platform.system() == 'Linux'))
ret = p.wait()
if ret != 0:
raise RuntimeError("""\
{}
returned: {}""".format(cmdstr, ret))
end = time.time()
data.categories["solvetime"] = end - start
| [
"guyer@nist.gov"
] | guyer@nist.gov |
68836861363afd1c44fcb0dd2cdc94697292618a | 7fb9bfe9473f9d85a4eaa775d0dc2c270ab5313e | /simulation_results/ppe_090_wild_type/screen_7/intra_hospital/params.py | a5d693eb6abb8dc70a5ddbb4d185138448009197 | [] | no_license | htahir2/covid_intra-hospital_model | cfbe86d175ecd950a285e913c8c7c658f396917e | ab2ec3452303bff6fbf5e084cbc8fc80bd97c287 | refs/heads/main | 2023-06-19T16:40:59.681356 | 2021-07-20T06:55:13 | 2021-07-20T06:55:13 | 328,629,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,962 | py | """
@author: hannantahir
This file contains all the required parameters that are required for the model to work.
"""
import numpy as np
"""
#### Gerneral simulation_parameters #####
"""
#simulation_time = 1 # in months
time_step = 10 # in minutes
num_steps_per_day = 60*24/time_step
max_iter = int(num_steps_per_day * 239) ## 239 corresponds to maximum number of days
'''
##### Hospital structure and patient related parameters ########
'''
patient_avg_arrival_rate = 40 ## Daily patient arrival rate
corona_start_day = 60 ## start admitting colonized patints from day 60 onwards.
corona_start_sim_time = corona_start_day * num_steps_per_day
'''
The below room_num list corresponds to the number of rooms in each ward.
4 types of wards. ward 1-4:Corona ICU, ward 5-8: Corona ward,
ward 9: Normal ICU, ward 10-28: normal wards
'''
room_num = [17,17,17,17,23,23,23,22,12,\
20,20,19,19,19,19,18,18,18,\
18,18,18,18,18,18,18,18,18,18]
perc_hosp_room_vaccant_initially =50 # percentage of rooms remain vaccan at model initialization.
'''
### Length of stay in different wards
'''
## LOS for covid icu patients (Gamma distribution)
shape_icu = 1.58719488513702
rate_icu = 0.0476524364643747
## LOS for covid nonicu patients (Gamma distribution)
shape_nonicu = 1.88256496519631
rate_nonicu = 0.24844307876682
## los for normal ICU (Lognormal distribution)
meanlog_norm_icu = 0.3680895454008
sdlog_norm_icu = 0.82071280273592
## los for regular wards (Weibull distribution)
shape_reg_ward = 0.9182222985585
scale_reg_ward = 4.1794422300417
los_max_days = 190 # this is the maximum x value in days until where you want to draw samples from fitting equation.
'''
##### Physicians and Nurse related and contact rates #######
'''
### contact rates for contact matrix - Estimated from model per shift, n : nurses, hc : physicians, p : patients
C_nn = 4.6 ## nurse to nurse contacts
C_np = 19.07 ## nurse to patient
C_nhc = 3 ## nurse to physician
C_pn = 6 ## patient to nurse
C_phc = 2 ## patient to physician
C_hcn = 3 ## physician to nurse
C_hcp = 17.4 ## physician to patient
C_hchc = 0.43 ## physician to physician
### physicians related
shifts_per_day = 3 ## 3 shifts a day for both physicians (also true for nurses)
phy_pat_ratio = [6,6,6,6,6,6,6,6,6,\
10,10,10,10,10,10,10,10,10,\
10,10,10,10,10,10,10,10,10,10] # ratio of phy-patient in every ward. ward 1-4:Corona ICU, ward 5-8: Corona ward, ward 9: Normal ICU, ward 10-28: rest of the hospital
ratio_by_ward = np.ceil(np.divide(room_num,phy_pat_ratio))
phy_per_ward = [x * shifts_per_day for x in ratio_by_ward]
phy_per_ward[:] = [int(x) for x in phy_per_ward]
rounds_during_shift = 2
phy_service_time = [2,2,2,2,2,2,2,2,2,\
1,1,1,1,1,1,1,1,1,\
1,1,1,1,1,1,1,1,1,1] ## in model time steps , 1 means 10 minutes servcie time, 3 means 30 minutes assuming time step of 10 minutes
## Nurses related Regular ward 1:4, corona ward 1:2, normal ICU 1:1, corona ICU 1:1 based on average ratios calculated by Baastian for UMCU
nur_pat_ratio = [1,1,1,1,2,2,2,2,1,\
4,4,4,4,4,4,4,4,4,\
4,4,4,4,4,4,4,4,4,4] # ratio of phy-patient in every ward. ward 1-4:Corona ICU, ward 5-8: Corona cohort ward, ward 9: Normal ICU, ward 10-28: rest of the hospital
nurse_ratio_by_ward = np.ceil(np.divide(room_num,nur_pat_ratio))
nur_per_ward = [x * shifts_per_day for x in nurse_ratio_by_ward]
nur_per_ward[:] = [int(x) for x in nur_per_ward]
nurse_rounds_during_shift = 6
nur_service_time = [3,3,3,3,2,2,2,2,2,\
1,1,1,1,1,1,1,1,1,\
1,1,1,1,1,1,1,1,1,1] ## in model time steps , 1 means 10 minutes servcie time, 3 means 30 minutes assuming time step of 10 minutes
### proportion of HCWs daily ward change
prop_phy_wardchange = 0.025 ### 1% of the physician switch wards for the next duty shift. This happens once per day
prop_nur_wardchange = 0.025 ### 1% of the nurse switch wards for the next duty shift. This happens once per day
'''
## Proportions and disease related parameters
'''
Pa_p = 0.20 ## proportion of asymptomatic patients
Pa_hcws = 0.31 ## proportion of asymptomatic HCWs
Ps = 0.2 ## proportion of symptomatic individuals that develop severe infections
Ra = 0.5 ##0.5 ## reproduction number asymptomatic
Rs = 1.25 # 1.0 ## repoduction number symptomatic
## recovery of individuals
recov_severe = 35*num_steps_per_day ## in simulation steps - severe recover after 35 days
recov_mild = 14*num_steps_per_day ## in simulation steps - mild recover after 14 days
recov_asymp = 14*num_steps_per_day ## in simulation steps - asymp recover after 14 days
quarantine_period = 7*num_steps_per_day ## in simulation steps - 7 days quarantine period
## Infectiousness Curve (Weibull distribution)
shape = 2.83
scale = 6.84
## incubation period (Gamma distribution)
inc_shape = 5.807
inc_scale = 0.948
inc_period = np.arange(0,14,1/num_steps_per_day)
'''
####### INTERVENTIONS ######
'''
### Personal protective equipment (PPE) effectiveness
gear_effectiveness = 1-0.9 ## 1-0.10 mean 10% effective, 1-0.9 means 90% effective
ppe_covid_wards = 1
ppe_noncovid_wards = 0
### contact tracing related
cont_period = 2 ## in days, 2 days means contacts within the last two days are traced only, change to 7 for contact tracing 7 days scenario
cont_period_simtime = cont_period * num_steps_per_day
#time_to_trace_contacts_again = 2*num_steps_per_day ## contacts will be traced 2 times. one immediately after hcw is symptomatic, and second after 2 days. define here when to trace contacts again
testing_day = 5 ## testing moment for contacts. This does not apply to contact tracing with perfect sensitivity
testing_day_simtime = testing_day * num_steps_per_day
## screening related
scr_perc = 1.0 ## proportion of HCWs screened. 1.0 means 100% screened, 0 mean no one
screening_moment = 7 ## in days - every 3rd day or change to 7 for weekly screening
| [
"thi.mui.pham@posteo.de"
] | thi.mui.pham@posteo.de |
8a692dcf1c7895d638939d50d99579e5a67fb22f | b50f28fcd1445c85aae8f2279dcc2a52690f4073 | /parse_gb_mcrA.py | e64e34a049da4f32325b37defe7dd545d67f7db5 | [] | no_license | camel315/microbiology | afc67a8d3a87ba3429bc61a71cb3941a14de7e06 | 2a3003245a7a33b3d5dfe28594d0e631d4bbd424 | refs/heads/master | 2020-03-09T10:27:58.214756 | 2018-05-25T09:07:45 | 2018-05-25T09:07:45 | 128,737,718 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,491 | py | #!/usr/bin/env python
"""find the corresponding 16S rRNA sequence in a genbank file for functional (mcrA) genes sequence.
DEPENDENCIES:
Biopython
"""
# load required packages
import sys
import re
from Bio import Entrez, SeqIO
from Bio.SeqRecord import SeqRecord
Entrez.email = "your.email"
## mcrA have multiple descriptions
feats = ['methyl coenzyme m reductase alpha subunit','methyl-coenzyme m reductase alpha subunit',\
'methyl coenzyme m reductase subunit alpha','methyl-coenzyme m reductase subunit alpha','mcra'\
'methyl-coenzyme m reductase i subunit a',\
'methyl-coenzyme m reductase i subunit alpha']
note = {'methyl coenzyme m reductase alpha subunit','methyl-coenzyme m reductase alpha subunit',\
'methyl coenzyme m reductase subunit alpha','methyl-coenzyme m reductase subunit alpha','mcra'\
'methyl-coenzyme m reductase i subunit a',\
'methyl-coenzyme m reductase i subunit alpha'}
# handle of 16S genbank files
hdin = open('LJKK01.1.gb', 'rU')
with open('parsed_mcrA.fasta', 'a') as hd_out:
for rec in SeqIO.parse(hdin,'gb'):
#print(len(rec))
featss = set()
for feat in rec.features:
featss.add(feat.type)
if feat.type == "CDS":
if "product" in feat.qualifiers:
print(feat.qualifiers['product'][0].lower())
if feat.qualifiers['product'][0].lower().replace(",","") in feats:
print('Product label is %s' % feat.qualifiers['product'][0].lower())
seq = feat.extract(rec.seq)
print('Sequence length is %i' % len(seq))
newrecord = SeqRecord(seq, id = rec.id, description = rec.description)
mcrA.append(newrecord)
SeqIO.write(mcrA, hd_out, "fasta")
else:
print ("Not found the gene you are seaerching for")
print ("="*30)
continue
if "gene" in feat.qualifiers:
if feat.qualifiers['gene'][0].lower().replace(",","") in ('mcra','mrta','mcr','mrt'):
print ("Indexing qualifiers with 'gene' as key and %s as \
value" % (feat.qualifiers['gene'][0].lower()))
seq = feature.extract(rec.seq)
print('Sequence length is %i' % len(seq))
newrecord = SeqRecord(seq, id = rec.id, description = rec.description)
mcrA.append(newrecord)
SeqIO.write(mcrA, hd_out, "fasta")
print ("~"*20)
continue
if "note" in feat.qualifiers:
if feat.qualifiers['note'][0].lower().replace(",","") in note:
print ("Indexing qualifiers with 'note' as key and %s as \
value" % (feat.qualifiers['note'][0].lower()))
seq = feature.extract(rec.seq)
print('Sequence length is %i' % len(seq))
newrecord = SeqRecord(seq, id = rec.id, description = rec.description)
mcrA.append(newrecord)
SeqIO.write(mcrA, hd_out, "fasta")
print ("+"*25)
print(featss)
hdin.close()
| [
"noreply@github.com"
] | camel315.noreply@github.com |
47fa555272fd74b78bedcc3ff50355c0db7c798d | 2ebee13657852e0f509213faca587bdfe10014b2 | /setup.py | 4c6270da34f63c8caf8b947dec8659f9f18cc978 | [] | no_license | qiangsiwei/jiebac | aabd9437f6a4bd19a7cb4102484874eace84d630 | d978183d7e9ca3fc312c77f858e628655259451c | refs/heads/master | 2020-03-21T20:21:02.610840 | 2018-06-28T10:17:29 | 2018-06-28T10:17:29 | 139,001,652 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # -*- coding: utf-8 -*-
import os
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
# for osx
os.environ['CFLAGS'] = '-std=c++11'
extensions = [Extension("jieba_cpy",["jieba_cpy.pyx"],include_dirs=\
["/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1/"])]
setup(name="jieba_cpy",ext_modules=cythonize(extensions))
# setup(ext_modules=cythonize('jieba_cpy.pyx',language='c++'))
# python setup.py build_ext --inplace
| [
"qiangsiwei@sjtu.edu.cn"
] | qiangsiwei@sjtu.edu.cn |
5b9b1b13e99fcba14210feea8fbe51304e5f5a92 | 318707f31679d30280c51ca9dc003818779b5695 | /tests/test_released_data.py | 9de0ea859a0b7cc8f6b96c7fbc6843a4d3637905 | [
"Apache-2.0"
] | permissive | MarcSaric/gdc-ng-models | 2f9d0dd8afe65dea8f2cdd26d070a670e8191c00 | ef92b507de8e7e09886b8499d3d30c2a1d46b801 | refs/heads/master | 2022-11-30T10:29:06.871398 | 2020-07-02T19:36:06 | 2020-07-02T19:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,746 | py | import pytest
from sqlalchemy.sql.expression import func
from gdc_ng_models.models import released_data
@pytest.fixture
def fake_released_data(create_released_data_db, db_session):
def helper(
name="name", code="code", is_controlled=False, data_type="cnv", is_open=False,
):
node = released_data.ReleasedData(
program_name=name,
project_code=code,
is_controlled=is_controlled,
data_type=data_type,
is_open=is_open,
)
db_session.merge(node)
return node
return helper
@pytest.fixture
def fake_released_log(create_released_data_db, db_session, request):
def helper(
name="name",
code="code",
release_number="1",
data_type="cnv",
is_open=False,
action="release",
):
node = released_data.ReleasedDataLog(
program_name=name,
project_code=code,
release_number=release_number,
data_type=data_type,
is_open=is_open,
action=action,
)
db_session.merge(node)
return node
return helper
def test_released_data__sqlalchemy_model_registered():
assert released_data.ReleasedData
@pytest.mark.parametrize("data_type", ["cnv", "ssm", "case"])
def test_released_data__valid_data_type(fake_released_data, db_session, data_type):
fake_released_data(data_type=data_type)
node = db_session.query(released_data.ReleasedData).first()
assert node.data_type == data_type
db_session.delete(node)
def test_released_data__invalid_data_type(fake_released_data):
with pytest.raises(ValueError, match=r"not a valid value for data_type"):
fake_released_data(data_type="not-applicable")
def test_released_data__project_id(fake_released_data, db_session):
fake_released_data()
node = db_session.query(released_data.ReleasedData).first()
assert node.project_id == "{}-{}".format(node.program_name, node.project_code)
def test_release_data_log__sqlalchemy_model_registered():
assert released_data.ReleasedDataLog
@pytest.mark.parametrize("data_type", ["cnv", "ssm", "case"])
def test_release_data_log__valid_data_type(db_session, data_type, fake_released_log):
fake_released_log(data_type=data_type)
db_session.commit()
node = db_session.query(released_data.ReleasedDataLog).first()
assert node.data_type == data_type
def test_release_data_log__invalid_data_type(db_session, fake_released_log):
with pytest.raises(ValueError, match=r"not a valid value for data_type"):
fake_released_log(data_type="not-applicable")
@pytest.mark.parametrize("action", ["release", "unrelease"])
def test_release_data_log__valid_action(db_session, action, fake_released_log):
fake_released_log(action=action)
db_session.commit()
node = db_session.query(released_data.ReleasedDataLog).first()
assert node.action == action
def test_release_data_log__invalid_action(db_session, fake_released_log):
with pytest.raises(ValueError, match=r"not a valid value for action"):
fake_released_log(action="not-applicable")
def test_release_data_log__auto_increment(db_session, fake_released_log):
max_id = -1
for i in range(10):
fake_released_log(release_number=str(i))
current_id = db_session.query(
func.max(released_data.ReleasedDataLog.id)
).scalar()
assert current_id > max_id
max_id = current_id
def test_release_data_log__project_id(fake_released_log, db_session):
fake_released_log()
node = db_session.query(released_data.ReleasedDataLog).first()
assert node.project_id == "{}-{}".format(node.program_name, node.project_code)
| [
"noreply@github.com"
] | MarcSaric.noreply@github.com |
37573cdcf6dad3f7a77d40310b957808001f573c | 3e6f35ae5b603d4c18ea2b802643251487968d63 | /legacy/checkfiles.py | 8576100106e6b60b6026c15f7d8a3185137a69de | [] | no_license | gdsidlemath/mlb_modeling | 2a6305c5808b20150aada3c123c94a42b8ef2b92 | 512e0cdcd8ce8a73b2d480a89df9b96a49039a48 | refs/heads/master | 2021-01-01T23:03:09.370351 | 2020-09-17T13:17:04 | 2020-09-17T13:17:04 | 239,383,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from __future__ import division
from scipy import stats
import numpy as np
import cPickle as pickle
from definedataclass import GamePitchData
import sys
import math
with open('MLBDataJuly2016.pkl','rb') as input:
Data1 = pickle.load(input)
for i in range(0,100):
print [Data1.Inning[i],Data1.outpitch[i],Data1.nasty[i],Data1.outenspd[i],Data1.res_des[i]]
| [
"gsidle.math@gmail.com"
] | gsidle.math@gmail.com |
ddd7dae206499784efbd51e589fd4d5885dc7878 | 900538b72265209d65986f5faa5d3a2bf6d982c4 | /ExchangeEnv/bin/pip | 10cff6d9951ece9b7b2a47d564a0972ea06d63f7 | [] | no_license | nedstarksbastard/Exchange | 8a8f03759c7ed6adb5f38e472d29a83b9f91b64d | 91b4794352c20e4825ea518d00242fa8b6873aa0 | refs/heads/master | 2021-01-19T03:28:25.380318 | 2015-11-02T04:40:41 | 2015-11-02T04:40:41 | 45,372,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | #!/Users/fizi/PycharmProjects/Exchange/ExchangeEnv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"fizi@Fizis-MacBook-Pro.local"
] | fizi@Fizis-MacBook-Pro.local | |
fb20480488e3ac27cd370e6b80bbab2bc1acb43b | 7f50730ad6dae57777151b1a82e56c61bcac31c3 | /main/__init__.py | b33c2feb7239d34154a8bde1ed9b258e755980b1 | [] | no_license | itmo-wad/task03_Tatarov_Dmitriy | c1d64c14c57e87e90ca469e65bc9739a5fcd6331 | 0f7b2bf2950340430e2d993c1dd6ed6966b78ed9 | refs/heads/master | 2022-04-22T10:04:33.289841 | 2020-04-18T16:48:22 | 2020-04-18T16:48:22 | 256,803,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | from flask import Flask
app = Flask(__name__)
app.secret_key = 'hello_world'
app.config.from_object(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
from main import models
from main import views
| [
"root@almosttchap.fr.kukuxumushi.icu"
] | root@almosttchap.fr.kukuxumushi.icu |
9054fc4b301d59244d0bdff543a2efdecb9e684e | c175a97a4a950eb4a3adcbfb7d1349499252e011 | /djangoapp/sample_env/bin/easy_install-3.5 | c7b6637257c153d0a1a61f0d95b5024738e5c274 | [] | no_license | kishorekumarkkk/django | 207353e0fdda97382a60359c97dc56a7aea3bcb1 | f3e290d3566a292c3d3192c4281d173cda6ec1fa | refs/heads/master | 2020-08-02T10:59:00.760397 | 2019-09-27T13:31:45 | 2019-09-27T13:31:45 | 211,325,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | 5 | #!/home/starsystems/Documents/Sample/djangoapp/sample_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"latheesh@star-systems.in"
] | latheesh@star-systems.in |
300a44200811432d661ee357ff954741ce18d469 | b2d7ef9491a80b44aa01751519f2d285bac938d7 | /NetworkAnalysisRNAnoPDB.py | 6eb3aaa533f5db73ce6daa75f0d486545880817c | [] | no_license | nicolenlama/WeeksScripts | 350dfc425227a98d46a7da1ca41843ede6741769 | 943ae2f73b073a25479546da9247d867f3ac13fa | refs/heads/master | 2023-01-10T13:20:29.531635 | 2023-01-04T22:51:06 | 2023-01-04T22:51:06 | 137,272,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,487 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 30 13:51:19 2018
Network Graph to test integrity of base pair detection. (inspired by
A.L. and N.L. meeting)
Note that a cutoff is imposed in the resCorr Function!!!
@author: nlama
"""
######################## IMPORT NECESSARY LIBRARIES ###########################
import os
import sys
import numpy as np
import csv
import pandas as pd
##### ADD PATHS ###############################################################
sys.path.append('D:\Documents2\Rotations\Weeks\PythonScripts') #add RNAtools dir
sys.path.append('D:\Documents2\Rotations\Weeks\RNAFiles\TPP')
############# CREATING NETWORK ANALYSIS CLASS #################################
class NetworkAnalysis(object):
def __init__(self, directory = os.getcwd(), ct=None,
fileCorr=None):
#assign file names and appropriate directories
self.directory = directory
self.fileCorr = os.path.join(directory,fileCorr)
self.rnaName = "champ.fill"
if ct is not None:
self.ct = self.readCt(ct) #returns ct, assigns rnaName and rnaLen
self.fileName = os.path.splitext(os.path.basename(self.fileCorr))[0] #Get file name without extention
self.corrF = open(self.fileCorr) #same for RING output
# prepare distance, coordinate, and nt lists
self.cutoffs = self.cutoffMaker(self.fileCorr)
self.resCorrDist = self.resCorrDistFunc(WC=False) #get coordinates and distances
################################ FUNCTIONS ####################################
def readCt(self,ctF):
ct = open(ctF)
lines = []
for i in range(1):
l=ct.readline().split()#get header information
self.rnaLen=int(l[0])
self.rnaName=l[1]
for line in ct:
lineList = line.split()
lines.append(lineList)
ct.close()
ct = pd.DataFrame(lines)
return ct
def fillResCorr(self,res,ij):
fillUpTo = int(res[0][0])
x=1
while x < fillUpTo:
res.append([0,x,16])
x+=1
for num in range(x,self.rnaLen+1):
ind = ij.get(num,0)
if ind == 0:
res.append([0,num,16])
return res
# Look up residues in coorPair for computing distances
def resCorrDistFunc(self, WC = False):
resCorrDist = [] #will store correlations and distances for residues i and j
iJDict = {}
for i in range(2):
self.corrF.readline() #skip first 2 lines because they are headers
for line in self.corrF:
corrLine = line.split() #split line into list
if len(corrLine) > 1:
i = corrLine[0] #extract residue1 in pair
j = corrLine[1] #extract residue2 in pair
iJDict[int(i)]=int(j)
iJDict[int(j)]=int(i)
self.cutoff = self.cutoffs[1]
if float(np.nanmean([float(corrLine[5]), float(corrLine[6])])) >= self.cutoff:
resCorrDist.append([i , j ,
float(np.nanmean([float(corrLine[5]), float(corrLine[6])]))]) #i,j,mean zscore
if resCorrDist == []:
raise ValueError("returned empty resCorrDist list")
else:
# resCorrDist=self.fillResCorr(resCorrDist,iJDict)
return resCorrDist
def cutoffMaker(self,fileCorr):
corrF = open(fileCorr)
cutoffVec = []
for i in range(2):
corrF.readline() #skip first 2 lines because they are headers
for line in corrF:
coor = line.split() #split line into list
if len(coor) > 1:
cutoffVec.append(np.nanmean([float(coor[5]), float(coor[6])])) #extract correlation
self.q = [25,30,75,90]
self.m = np.percentile(cutoffVec, self.q)
return self.m
####################### PLOTTING FUNCTIONS ###################################
###############################################################################
if __name__ == '__main__':
# directory = 'D:/Weeks/Data/NetworkAnalysis/resCorrDistFiles'
directory = 'D:/Weeks/Data/JE_TPP_TMO'
# ct = 'D:/Weeks/Data/BasePairDetectionProject/SecondaryStructure/AmpliconData/ec16S.ct'
ct = 'D:/Weeks/Data/TPP_pairMap_ringMap/TPP_+L_structure.ct'
# directory = 'D:/Weeks/Data/ringRnaseP'
# ct = 'D:/Weeks/Data/BasePairDetectionProject/SecondaryStructure/AmpliconData/rnpB.ct'
# directory = 'D:/Weeks/Data/NetworkAnalysis/TPP'
# ct = 'D:/Weeks/Data/NetworkAnalysis/TPP/TPP_+L_structure.ct'
##
fileCorr = 'tpp.l.win5.tmo.ring'
net = NetworkAnalysis(
directory = directory, ct=ct,
fileCorr=fileCorr)
header = ['Source','Target','Weight']
resCorrFileName = net.rnaName + '.'+ fileCorr + '.' + str(net.q[1]) + '.csv'
rowNum=1
with open(os.path.join('D:/Weeks/Data/NetworkAnalysis/TPPResCorr',resCorrFileName), "w+", newline = '') as file1:
fWriter = csv.writer(file1, delimiter=',')
hWriter = csv.writer(file1, delimiter=',')
hWriter.writerow(header)
for line in net.resCorrDist:
fWriter.writerow(line)
rowNum+=1
# | [
"noreply@github.com"
] | nicolenlama.noreply@github.com |
cd7b3ff1015a79bb290f2a3a576695c305ebd380 | e92e59fc797c3a0196a1eb10a7b6ffb57708a7a1 | /py/cluster.py | a316f89bc03bd66078972b144b73453677f42ab9 | [] | no_license | bederson/qa | 8898c0abb7eda2bca27113b11f6a7627bbfb178e | 0a6ad7b3f3f02e8eba2510b0338d9cac73637a8a | refs/heads/master | 2021-01-25T12:30:43.470830 | 2014-07-08T14:13:43 | 2014-07-08T14:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,194 | py | #
# This is part of "python-cluster". A library to group similar items together.
# Copyright (C) 2006 Michel Albert
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# MODIFICATION: Made changes to keep track of indices for K-means clustering
# input data. Changes marked with ATR. 2013
#
from types import TupleType
class ClusteringError(Exception):
pass
def flatten(L):
"""
Flattens a list.
Example:
flatten([a,b,[c,d,[e,f]]]) = [a,b,c,d,e,f]
"""
if type(L) != type([]): return [L]
if L == []: return L
return flatten(L[0]) + flatten(L[1:])
def median(numbers):
"""Return the median of the list of numbers.
found at: http://mail.python.org/pipermail/python-list/2004-December/253517.html"""
# Sort the list and take the middle element.
n = len(numbers)
copy = numbers[:] # So that "numbers" keeps its original order
copy.sort()
if n & 1: # There is an odd number of elements
return copy[n // 2]
else:
return (copy[n // 2 - 1] + copy[n // 2]) / 2.0
def mean(numbers):
"""Returns the arithmetic mean of a numeric list.
found at: http://mail.python.org/pipermail/python-list/2004-December/253517.html"""
return float(sum(numbers)) / float(len(numbers))
def minkowski_distance(x, y, p=2):
"""
Calculates the minkowski distance between two points.
PARAMETERS
x - the first point
y - the second point
p - the order of the minkowski algorithm.
Default = 2. This is equal to the euclidian distance.
If the order is 1, it is equal to the manhatten
distance.
The higher the order, the closer it converges to the
Chebyshev distance, which has p=infinity
"""
from math import pow
assert(len(y)==len(x))
assert(x>=1)
sum = 0
for i in range(len(x)):
sum += abs(x[i]-y[i]) ** p
return pow(sum, 1.0/float(p))
def genmatrix(list, combinfunc, symmetric=False, diagonal=None):
"""
Takes a list and generates a 2D-matrix using the supplied combination
function to calculate the values.
PARAMETERS
list - the list of items
combinfunc - the function that is used to calculate teh value in a cell.
It has to cope with two arguments.
symmetric - Whether it will be a symmetric matrix along the diagonal.
For example, it the list contains integers, and the
combination function is abs(x-y), then the matrix will be
symmetric.
Default: False
diagonal - The value to be put into the diagonal. For some functions,
the diagonal will stay constant. An example could be the
function "x-y". Then each diagonal cell will be "0".
If this value is set to None, then the diagonal will be
calculated.
Default: None
"""
matrix = []
row_index = 0
for item in list:
row = []
col_index = 0
for item2 in list:
if diagonal is not None and col_index == row_index:
# if this is a cell on the diagonal
row.append(diagonal)
elif symmetric and col_index < row_index:
# if the matrix is symmetric and we are "in the lower left triangle"
row.append( matrix[col_index][row_index] )
else:
# if this cell is not on the diagonal
row.append(combinfunc(item, item2))
col_index += 1
matrix.append(row)
row_index += 1
return matrix
def printmatrix(list):
"""
Prints out a 2-dimensional list cleanly.
This is useful for debugging.
PARAMETERS
list - the 2D-list to display
"""
# determine maximum length
maxlen = 0
colcount = len(list[0])
for col in list:
for cell in col:
maxlen = max(len(str(cell)), maxlen)
# print data
format = " %%%is |" % maxlen
format = "|" + format*colcount
for row in list:
print format % tuple(row)
def magnitude(a):
"calculates the magnitude of a vecor"
from math import sqrt
sum = 0
for coord in a:
sum += coord ** 2
return sqrt(sum)
def dotproduct(a, b):
"Calculates the dotproduct between two vecors"
assert(len(a) == len(b))
out = 0
for i in range(len(a)):
out += a[i]*b[i]
return out
def centroid(list, method=median):
"returns the central vector of a list of vectors"
out = []
for i in range(len(list[0])):
out.append( method( [x[i] for x in list] ) )
return tuple(out)
class Cluster:
"""
A collection of items. This is internally used to detect clustered items in
the data so we could distinguish other collection types (lists, dicts, ...)
from the actual clusters. This means that you could also create clusters of
lists with this class.
"""
def __str__(self):
return "<Cluster@%s(%s)>" % (self.__level, self.__items)
def __repr__(self):
return self.__str__()
def __init__(self, level, *args):
"""
Constructor
PARAMETERS
level - The level of this cluster. This is used in hierarchical
clustering to retrieve a specific set of clusters. The higher
the level, the smaller the count of clusters returned. The
level depends on the difference function used.
*args - every additional argument passed following the level value
will get added as item to the cluster. You could also pass a
list as second parameter to initialise the cluster with that
list as content
"""
self.__level = level
if len(args) == 0: self.__items = []
else: self.__items = list(args)
def append(self, item):
"""
Appends a new item to the cluster
PARAMETERS
item - The item that is to be appended
"""
self.__items.append(item)
def items(self, newItems = None):
"""
Sets or gets the items of the cluster
PARAMETERS
newItems (optional) - if set, the items of the cluster will be
replaced with that argument.
"""
if newItems is None: return self.__items
else: self.__items = newItems
def fullyflatten(self, *args):
"""
Completely flattens out this cluster and returns a one-dimensional list
containing the cluster's items. This is useful in cases where some items
of the cluster are clusters in their own right and you only want the
items.
PARAMETERS
*args - only used for recursion.
"""
flattened_items = []
if len(args) == 0: collection = self.__items
else: collection = args[0].items()
for item in collection:
if isinstance(item, Cluster):
flattened_items = flattened_items + self.fullyflatten(item)
else:
flattened_items.append(item)
return flattened_items
def level(self):
"""
Returns the level associated with this cluster
"""
return self.__level
def display(self, depth=0):
"""
Pretty-prints this cluster. Useful for debuging
"""
print depth*" " + "[level %s]" % self.__level
for item in self.__items:
if isinstance(item, Cluster):
item.display(depth+1)
else:
print depth*" "+"%s" % item
def topology(self):
"""
Returns the structure (topology) of the cluster as tuples.
Output from cl.data:
[<Cluster@0.833333333333(['CVS', <Cluster@0.818181818182(['34.xls',
<Cluster@0.789473684211([<Cluster@0.555555555556(['0.txt',
<Cluster@0.181818181818(['ChangeLog', 'ChangeLog.txt'])>])>,
<Cluster@0.684210526316(['20060730.py',
<Cluster@0.684210526316(['.cvsignore',
<Cluster@0.647058823529(['About.py',
<Cluster@0.625(['.idlerc', '.pylint.d'])>])>])>])>])>])>])>]
Corresponding output from cl.topo():
('CVS', ('34.xls', (('0.txt', ('ChangeLog', 'ChangeLog.txt')),
('20060730.py', ('.cvsignore', ('About.py',
('.idlerc', '.pylint.d')))))))
"""
left = self.__items[0]
right = self.__items[1]
if isinstance(left, Cluster):
first = left.topology()
else:
first = left
if isinstance(right, Cluster):
second = right.topology()
else:
second = right
return first, second
def getlevel(self, threshold):
"""
Retrieve all clusters up to a specific level threshold. This
level-threshold represents the maximum distance between two clusters. So
the lower you set this threshold, the more clusters you will receive and
the higher you set it, you will receive less but bigger clusters.
PARAMETERS
threshold - The level threshold
NOTE
It is debatable whether the value passed into this method should
really be as strongly linked to the real cluster-levels as it is right
now. The end-user will not know the range of this value unless s/he
first inspects the top-level cluster. So instead you might argue that
a value ranging from 0 to 1 might be a more useful approach.
"""
left = self.__items[0]
right = self.__items[1]
# if this object itself is below the threshold value we only need to
# return it's contents as a list
if self.level() <= threshold:
return [self.fullyflatten()]
# if this cluster's level is higher than the threshold we will investgate
# it's left and right part. Their level could be below the threshold
if isinstance(left, Cluster) and left.level() <= threshold:
if isinstance(right, Cluster):
return [left.fullyflatten()] + right.getlevel(threshold)
else:
return [left.fullyflatten()] + [[right]]
elif isinstance(right, Cluster) and right.level() <= threshold:
if isinstance(left, Cluster):
return left.getlevel(threshold) + [right.fullyflatten()]
else:
return [[left]] + [right.fullyflatten()]
# Alright. We covered the cases where one of the clusters was below the
# threshold value. Now we'll deal with the clusters that are above by
# recursively applying the previous cases.
if isinstance(left, Cluster) and isinstance(right, Cluster):
return left.getlevel(threshold) + right.getlevel(threshold)
elif isinstance(left, Cluster):
return left.getlevel(threshold) + [[right]]
elif isinstance(right, Cluster):
return [[left]] + right.getlevel(threshold)
else:
return [[left], [right]]
class BaseClusterMethod:
"""
The base class of all clustering methods.
"""
def __init__(self, input, distance_function):
"""
Constructs the object and starts clustering
PARAMETERS
input - a list of objects
distance_function - a function returning the distance - or opposite of
similarity ( distance = -similarity ) - of two
items from the input. In other words, the closer
the two items are related, the smaller this value
needs to be. With 0 meaning they are exactly the
same.
NOTES
The distance function should always return the absolute distance
between two given items of the list. Say,
distance(input[1], input[4]) = distance(input[4], input[1])
This is very important for the clustering algorithm to work!
Naturally, the data returned by the distance function MUST be a
comparable datatype, so you can perform arithmetic comparisons on
them (< or >)! The simplest examples would be floats or ints. But as
long as they are comparable, it's ok.
"""
self.distance = distance_function
self._input = input # the original input
self._data = input[:] # clone the input so we can work with it
def topo(self):
"""
Returns the structure (topology) of the cluster.
See Cluster.topology() for information.
"""
return self.data[0].topology()
def __get_data(self):
"""
Returns the data that is currently in process.
"""
return self._data
data = property(__get_data)
def __get_raw_data(self):
"""
Returns the raw data (data without being clustered).
"""
return self._input
raw_data = property(__get_raw_data)
class HierarchicalClustering(BaseClusterMethod):
"""
Implementation of the hierarchical clustering method as explained in
http://www.elet.polimi.it/upload/matteucc/Clustering/tutorial_html/hierarchical.html
USAGE
>>> from cluster import HierarchicalClustering
>>> # or: from cluster import *
>>> cl = HierarchicalClustering([123,334,345,242,234,1,3], lambda x,y: float(abs(x-y)))
>>> cl.getlevel(90)
[[345, 334], [234, 242], [123], [3, 1]]
Note that all of the returned clusters are more that 90 apart
"""
def __init__(self, data, distance_function, linkage='single'):
"""
Constructor
See BaseClusterMethod.__init__ for more details.
"""
BaseClusterMethod.__init__(self, data, distance_function)
# set the linkage type to single
self.setLinkageMethod(linkage)
self.__clusterCreated = False
def setLinkageMethod(self, method):
"""
Sets the method to determine the distance between two clusters.
PARAMETERS:
method - The name of the method to use. It must be one of 'single',
'complete', 'average' or 'uclus'
"""
if method == 'single':
self.linkage = self.singleLinkageDistance
elif method == 'complete':
self.linkage = self.completeLinkageDistance
elif method == 'average':
self.linkage = self.averageLinkageDistance
elif method == 'uclus':
self.linkage = self.uclusDistance
else:
raise ValueError, 'distance method must be one of single, complete, average of uclus'
def uclusDistance(self, x, y):
"""
The method to determine the distance between one cluster an another
item/cluster. The distance equals to the *average* (median) distance from
any member of one cluster to any member of the other cluster.
PARAMETERS
x - first cluster/item
y - second cluster/item
"""
# create a flat list of all the items in <x>
if not isinstance(x, Cluster): x = [x]
else: x = x.fullyflatten()
# create a flat list of all the items in <y>
if not isinstance(y, Cluster): y = [y]
else: y = y.fullyflatten()
distances = []
for k in x:
for l in y:
distances.append(self.distance(k,l))
return median(distances)
def averageLinkageDistance(self, x, y):
"""
The method to determine the distance between one cluster an another
item/cluster. The distance equals to the *average* (mean) distance from
any member of one cluster to any member of the other cluster.
PARAMETERS
x - first cluster/item
y - second cluster/item
"""
# create a flat list of all the items in <x>
if not isinstance(x, Cluster): x = [x]
else: x = x.fullyflatten()
# create a flat list of all the items in <y>
if not isinstance(y, Cluster): y = [y]
else: y = y.fullyflatten()
distances = []
for k in x:
for l in y:
distances.append(self.distance(k,l))
return mean(distances)
def completeLinkageDistance(self, x, y):
"""
The method to determine the distance between one cluster an another
item/cluster. The distance equals to the *longest* distance from any
member of one cluster to any member of the other cluster.
PARAMETERS
x - first cluster/item
y - second cluster/item
"""
# create a flat list of all the items in <x>
if not isinstance(x, Cluster): x = [x]
else: x = x.fullyflatten()
# create a flat list of all the items in <y>
if not isinstance(y, Cluster): y = [y]
else: y = y.fullyflatten()
# retrieve the minimum distance (single-linkage)
maxdist = self.distance(x[0], y[0])
for k in x:
for l in y:
maxdist = max(maxdist, self.distance(k,l))
return maxdist
def singleLinkageDistance(self, x, y):
"""
The method to determine the distance between one cluster an another
item/cluster. The distance equals to the *shortest* distance from any
member of one cluster to any member of the other cluster.
PARAMETERS
x - first cluster/item
y - second cluster/item
"""
# create a flat list of all the items in <x>
if not isinstance(x, Cluster): x = [x]
else: x = x.fullyflatten()
# create a flat list of all the items in <y>
if not isinstance(y, Cluster): y = [y]
else: y = y.fullyflatten()
# retrieve the minimum distance (single-linkage)
mindist = self.distance(x[0], y[0])
for k in x:
for l in y:
mindist = min(mindist, self.distance(k,l))
return mindist
def cluster(self, matrix=None, level=None, sequence=None):
"""
Perform hierarchical clustering. This method is automatically called by
the constructor so you should not need to call it explicitly.
PARAMETERS
matrix - The 2D list that is currently under processing. The matrix
contains the distances of each item with each other
level - The current level of clustering
sequence - The sequence number of the clustering
"""
if matrix is None:
# create level 0, first iteration (sequence)
level = 0
sequence = 0
matrix = []
# if the matrix only has two rows left, we are done
while len(matrix) > 2 or matrix == []:
matrix = genmatrix(self._data, self.linkage, True, 0)
smallestpair = None
mindistance = None
rowindex = 0 # keep track of where we are in the matrix
# find the minimum distance
for row in matrix:
cellindex = 0 # keep track of where we are in the matrix
for cell in row:
# if we are not on the diagonal (which is always 0)
# and if this cell represents a new minimum...
if (rowindex != cellindex) and ( cell < mindistance or smallestpair is None ):
smallestpair = ( rowindex, cellindex )
mindistance = cell
cellindex += 1
rowindex += 1
sequence += 1
level = matrix[smallestpair[1]][smallestpair[0]]
cluster = Cluster(level, self._data[smallestpair[0]], self._data[smallestpair[1]])
# maintain the data, by combining the the two most similar items in the list
# we use the min and max functions to ensure the integrity of the data.
# imagine: if we first remove the item with the smaller index, all the
# rest of the items shift down by one. So the next index will be
# wrong. We could simply adjust the value of the second "remove" call,
# but we don't know the order in which they come. The max and min
# approach clarifies that
self._data.remove(self._data[max(smallestpair[0], smallestpair[1])]) # remove item 1
self._data.remove(self._data[min(smallestpair[0], smallestpair[1])]) # remove item 2
self._data.append(cluster) # append item 1 and 2 combined
# all the data is in one single cluster. We return that and stop
self.__clusterCreated = True
return
def getlevel(self, threshold):
"""
Returns all clusters with a maximum distance of <threshold> in between
each other
PARAMETERS
threshold - the maximum distance between clusters
SEE-ALSO
Cluster.getlevel(threshold)
"""
# if it's not worth clustering, just return the data
if len(self._input) <= 1: return self._input
# initialize the cluster if not yet done
if not self.__clusterCreated: self.cluster()
return self._data[0].getlevel(threshold)
class KMeansClustering:
"""
Implementation of the kmeans clustering method as explained in
http://www.elet.polimi.it/upload/matteucc/Clustering/tutorial_html/kmeans.html
USAGE
=====
>>> from cluster import KMeansClustering
>>> cl = KMeansClustering([(1,1), (2,1), (5,3), ...])
>>> clusters = cl.getclusters(2)
"""
def __init__(self, data, distance=None):
"""
Constructor
PARAMETERS
data - A list of tuples or integers.
distance - A function determining the distance between two items.
Default: It assumes the tuples contain numeric values and
appiles a generalised form of the
euclidian-distance algorithm on them.
"""
self.__data = data
self.distance = distance
self.__initial_length = len(data)
# test if each item is of same dimensions
if len(data) > 1 and isinstance(data[0], TupleType):
control_length = len(data[0])
for item in data[1:]:
if len(item) != control_length:
raise ValueError("Each item in the data list must have the same amount of dimensions. Item", item, "was out of line!")
# now check if we need and have a distance function
if len(data) > 1 and not isinstance(data[0], TupleType) and distance is None:
raise ValueError("You supplied non-standard items but no distance function! We cannot continue!")
# we now know that we have tuples, and assume therefore that it's items are numeric
elif distance is None:
self.distance = minkowski_distance
def getclusters(self, n):
"""
Generates <n> clusters
PARAMETERS
n - The amount of clusters that should be generated.
n must be greater than 1
"""
# only proceed if we got sensible input
if n <= 1:
raise ClusteringError("When clustering, you need to ask for at least two clusters! You asked for %d" % n)
# return the data straight away if there is nothing to cluster
if self.__data == [] or len(self.__data) == 1 or n == self.__initial_length:
#return self.__data
raise ClusteringError("Unable to generate clusters. You supplied %d items, and asked for %d clusters." %
(self.__initial_length, n) ) # ATR raise exception instead of input data
# It makes no sense to ask for more clusters than data-items available
if n > self.__initial_length:
raise ClusteringError( """Unable to generate more clusters than items
available. You supplied %d items, and asked for %d clusters.""" %
(self.__initial_length, n) )
self.initialiseClusters(self.__data, n)
items_moved = True # tells us if any item moved between the clusters,
# as we initialised the clusters, we assume that
# is the case
while items_moved is True:
items_moved = False
cluster_index = 0 # ATR
for cluster in self.__clusters:
for item in cluster:
res = self.assign_item(item, cluster, cluster_index) # ATR - added cluster_index param
if items_moved is False: items_moved = res
cluster_index += 1 # ATR
return { "clusters": self.__clusters, "indices": self.__cluster_input_indices } # ATR - change to return dict instead of just self.__clusters
def assign_item(self, item, origin, origin_index): # ATR - added origin_index param
"""
Assigns an item from a given cluster to the closest located cluster
PARAMETERS
item - the item to be moved
origin - the originating cluster
"""
closest_cluster = origin
cluster_index = 0 # ATR
for cluster in self.__clusters:
if self.distance(item, centroid(cluster)) < self.distance(item, centroid(closest_cluster)):
closest_cluster = cluster
closest_cluster_index = cluster_index # ATR
cluster_index += 1 # ATR
if closest_cluster != origin:
self.move_item(item, origin, closest_cluster, origin_index, closest_cluster_index) # ATR - added origin_index, closest_cluster_index params
return True
else:
return False
def move_item(self, item, origin, destination, origin_index, destination_index): # ATR = added origin_index, destination_index params
"""
Moves an item from one cluster to anoter cluster
PARAMETERS
item - the item to be moved
origin - the originating cluster
destination - the target cluster
"""
origin_item_index = origin.index(item) # ATR
destination.append( origin.pop( origin_item_index ) ) # ATR - pulled out origin.index(item)
self.__cluster_input_indices[destination_index].append( self.__cluster_input_indices[origin_index].pop( origin_item_index) ) # ATR
def initialiseClusters(self, input, clustercount):
"""
Initialises the clusters by distributing the items from the data evenly
across n clusters
PARAMETERS
input - the data set (a list of tuples)
clustercount - the amount of clusters (n)
"""
# initialise the clusters with empty lists
self.__clusters = []
self.__cluster_input_indices = [] # ATR
for x in xrange(clustercount):
self.__clusters.append([])
self.__cluster_input_indices.append([]) # ATR
# distribute the items into the clusters
count = 0
for item in input:
self.__clusters[ count % clustercount ].append(item)
self.__cluster_input_indices[ count % clustercount ].append(count) # ATR
count += 1
| [
"anne.bobrose@gmail.com"
] | anne.bobrose@gmail.com |
dc346aff4ab8aec375ecc865fe723437ac5f7d1e | 3cb51259f14285e56a86416662b2b5d0d00a0266 | /eve-download-sov-maps.py | 5784e9e9d673918dc6a4a5a87b1e88ca32afa5fc | [] | no_license | sdonovan1985/eve-sov-map-downloader | 3810c6ba54e601859b37460f60433c84cf4c2b8a | cedaed2906be450a7bf2c68d5a564d543f3753dc | refs/heads/master | 2021-01-21T14:43:15.460346 | 2016-06-13T19:25:22 | 2016-06-13T19:25:22 | 59,485,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | import urllib2
import os.path
def get_file(url, filename):
attempts = 0
savepath = "sov_maps/%s" % (filename)
if os.path.isfile(savepath):
print "%s already exists" % (savepath)
return
while attempts < 3:
print "Attempt %d : %s" % (attempts, url)
try:
response = urllib2.urlopen(url, timeout = 2)
content = response.read()
f = open( savepath, 'w' )
f.write( content )
f.close()
print " Success"
break
except urllib2.URLError as e:
attempts += 1
print type(e)
for year in [2016]: #range(2016, 2016):
for month in range(1,13):
for day in range(1,32):
filename = "%d%02d%02d.png" % (year, month, day)
url = "http://go-dl1.eve-files.com/media/corp/verite/%s" % filename
get_file(url, filename)
| [
"sdonovan@gatech.edu"
] | sdonovan@gatech.edu |
e97f2fe3ef2c6df9a278d835ca974de5dbb54239 | 77ffbed85ca1a37980bdb967631fda17c1e231fe | /effects/photo_invert.py | d4d6be8b6f21f13288e092fe8c04120c5e34b59d | [] | no_license | fricker12/Photo_Booth_Telegram_Bot | 40eebe80f0f18ec14ff23eb5332b01986d72bf97 | 8db54e05eb2c573c8ee895f9a7a0c4457665350f | refs/heads/master | 2023-03-22T06:55:37.325631 | 2019-05-24T14:56:15 | 2019-05-24T14:56:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import os
def apply_invert(photo, message, bot):
'''
Inverts the message given
:param photo: path to an image file
:param message: the message sent to the bot
:param bot: telegram bot
:return: inverted image
'''
opened_photo = cv2.imread(photo)
inved_photo = cv2.bitwise_not(opened_photo)
cv2.imwrite("photos/inved" + str(message.message_id) + ".jpg", inved_photo)
result = "photos/inved" + str(message.message_id) + ".jpg"
bot.send_message(message.chat.id, "Вжух!!!")
bot.send_photo(message.chat.id, (open(result, "rb")))
os.remove("photos/inving" + str(message.message_id) + ".jpg")
os.remove(result) | [
"noreply@github.com"
] | fricker12.noreply@github.com |
d7f53e22fde0ca53ee451f3ff3b5e007a16c8a41 | 9c61ec2a55e897e4a3bb9145296081c648d812c4 | /docs/cd/06443007程式碼/ch01/1-8.py | d27b4e821287c1d67dba80b1f5b27da4d527b6e6 | [] | no_license | wildboy2arthur/ML-Class | 47899246251d12972a6d3875160c1cc8d1052202 | 345c86e3f8890919d59a63a79674acbdcd4577c4 | refs/heads/main | 2023-07-16T11:32:07.683652 | 2021-08-24T08:25:04 | 2021-08-24T08:25:04 | 399,388,026 | 0 | 0 | null | 2021-08-24T08:18:36 | 2021-08-24T08:18:35 | null | UTF-8 | Python | false | false | 240 | py | def cal_price_dict(k_cost):
rate = 0.03
nt_cost = k_cost * rate
inc = 0.2
nt_price = nt_cost * (1 + inc)
data = {
'k_cost': k_cost,
'nt_cost': nt_cost,
'nt_price': nt_price
}
return data | [
"1101404110@nkust.edu.tw"
] | 1101404110@nkust.edu.tw |
28de8a5346c0302e5c29884b041a8ac2b71aa5ac | a44c4aa814fc388533e7e7d573dd3f18243af48c | /Lesson 4/problem3.py | 2f3179c8f5ea0e9e858de45a9740ff8f59dfa393 | [] | no_license | Jadakis/Unit-3_Lesson4 | c608bf23dad027cbaddbb4a786e0ab6c99edb9eb | 2ab281d865d96f93c88a343d00fa1939d4c071ae | refs/heads/master | 2020-04-09T09:29:49.740062 | 2018-12-03T18:28:09 | 2018-12-03T18:28:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | e = 89
m = 95
h = 87
s = 84
a = e + m + h + s / 4 | [
"noreply@github.com"
] | Jadakis.noreply@github.com |
e54c0547e26521c86dc87571c8a0b466c9e91a8b | 6f795c1b977d6256b582a48bcb178a3e9a4bd505 | /exploit/web/Zabbix/sql_injection.py | 104780ebafe1ddcc1895cb238a3a142d4c6e9286 | [] | no_license | w4ter/myscan | a23faaeb9b01f1e1c8c26112bb57cd00e1482d8f | 6555033f932fb7441b59de4f2faf099815e685e7 | refs/heads/master | 2023-08-21T15:57:31.380321 | 2021-10-25T09:12:23 | 2021-10-25T09:12:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,933 | py | # coding=utf-8
# @Author : zpchcbd HG team
# @Time : 2021-09-15 11:58
import hashlib
from colorama import Fore
from tqdm import tqdm
from core.MyGlobalVariableManager import GlobalVariableManager
from exploit.web import BaseScript
from core.MyEnums import *
from core.MyAsyncHttp import *
# fofa: app="ZABBIX-监控系统" && country="CN"
# python batch.py -m exploit.web.Zabbix.sql_injection -cs -fs "app=\"ZABBIX-监控系统\" && country=\"CN\""
class Script(BaseScript):
name = 'Zabbix'
def __init__(self, target, session):
super().__init__()
# 漏洞目标
self.target = target
# 漏洞等级
self.bugLevel = BugLevel.HIGH
# 类型
self.bugType = BugType.SQLINJECTION
# 编号
self.bugNumber = 'CVE-2016-10134'
# 来源
self.refer = ''
# 特定路径判断
self.detectPathList = ['/favicon.ico']
# exec
self.execPathList = ['/jsrpc.php?sid=0bcd4ade648214dc&type=9&method=screen.get×tamp=1471403798083&mode=2&screenid=&groupid=&hostid=0&pageFile=history.php&profileIdx=web.item.graph&profileIdx2=2%273297&updateProfile=true&screenitemid=&period=3600&stime=20160817050632&resourcetype=17&itemids%5B23297%5D=23297&action=showlatest&filter=&filter_task=&mark_color=1']
# session
self.session = session
# 相关信息
self.info = ''
self.favicon = ['0fbe700fd7d07ec8d30ef8b3ac261484']
async def detect(self):
try:
# 同类型多模块的识别规则
checkList = GlobalVariableManager.getValue('exploitRule')[self.name]
for checkTarget in checkList:
if self.target == checkTarget:
tqdm.write(Fore.RED + '[{}] detect method skip...'.format('JBoss Finger'))
# 如果存在的话,那么当前这个target的框架已经探测完毕了,
# 就没必要再次探测,所以直接进入exec方法中,唯一需要变动的就是flag=True
self.flag = True
return None
for detectPath in self.detectPathList:
url = f'http://{self.target}{detectPath}' if self.target.startswith(
('http:', 'https:')) is False else f'{self.target}{detectPath}'
async with self.session.get(url=url, headers=self.headers, timeout=self.reqTimeout, verify_ssl=False) as response:
if response is not None:
text = await response.read()
await asyncio.sleep(2)
m1 = hashlib.md5()
m1.update(text)
theMD5 = m1.hexdigest()
for _ in self.favicon:
if _ == theMD5:
self.flag = True
checkList.append(self.target)
tqdm.write(Fore.RED + '[{}] {}'.format('Zabbix Finger', url))
return {'name': 'Zabbix Finger', 'url': url, 'software': 'Zabbix'}
except Exception:
return None
async def exec(self):
try:
for execPath in self.execPathList:
url = f'http://{self.target}{execPath}' if self.target.startswith(
('http:', 'https:')) is False else f'{self.target}{execPath}'
async with self.session.get(url=url, headers=self.headers, timeout=self.reqTimeout, verify_ssl=False) as response:
text = await response.text()
await asyncio.sleep(2)
if response is not None and 'SQL syntax' in text:
tqdm.write(Fore.RED + '[{}] {}'.format('Zabbix Sql Injection', url))
return {'name': 'Zabbix Sql Injection', 'url': url, 'software': 'Zabbix'}
except Exception:
return None
# def CVE_2020_11800(self):
# '''
# Zabbix Server trapper命令注入漏洞
# 参考链接:https://github.com/vulhub/vulhub/tree/master/zabbix/CVE-2020-11800
# :return:
# '''
#
# def send(ip, data):
# conn = socket.create_connection((ip, 10051), 10)
# conn.send(json.dumps(data).encode())
# data = conn.recv(2048)
# conn.close()
# return data
#
# host = urlparse(self.url).netloc.split(':')[0]
# print(host)
# try:
# print(send(host, {"request": "active checks", "host": "vulhub", "ip": "ffff:::;touch /tmp/success2"}))
# for i in range(10000, 10500):
# data = send(host, {"request": "command", "scriptid": 1, "hostid": str(i)})
# if data and b'failed' not in data:
# print('hostid: %d' % i)
# print(data)
# except Exception as e:
# # print(e.args)
# return False
async def attack(self, semaphore, pbar):
async with semaphore:
a = await self.detect()
if a is not None:
self.vulList.append(a)
if self.flag:
b = await self.exec()
if b is not None:
self.vulList.append(b)
pbar.update(1)
return self.vulList
if __name__ == '__main__':
import requests
import hashlib
resp = requests.get('http://47.92.74.171:8080/favicon.ico', verify=False)
if resp.status_code == 200:
m1 = hashlib.md5()
m1.update(resp.content)
theMD5 = m1.hexdigest()
print(theMD5)
# sem = asyncio.Semaphore(500)
# sc = Script('61.150.65.205:9999', 1, sem)
# l = asyncio.get_event_loop()
# l.run_until_complete(sc.attack())
| [
"1352483315@qq.com"
] | 1352483315@qq.com |
4c4e384257a8241bd1e50f269affbb7ba12233fc | 984e3b755cb11c1386360d6ca9260470ab002213 | /tubers/youtubers/views.py | 80c42fb585464d3e198ac5a6478ab8df23f2006e | [] | no_license | tussshar27/Youtubers-Clients-Business-Application-Project-in-Django | 9de35a8bba738329cb2108e0c824eb6048ca6ba6 | e6915b9d9c4b0d7f53d310155d32e3dbe51ee4b5 | refs/heads/master | 2023-07-06T07:47:32.992559 | 2021-07-29T09:13:56 | 2021-07-29T09:13:56 | 372,147,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,325 | py | from django.shortcuts import render, get_object_or_404
from .models import Youtuber
# Create your views here.
def youtubers(request):
tubers = Youtuber.objects.order_by('-created_date')
city_search = Youtuber.objects.values_list('city', flat=True).distinct()
#note: values_list() gives array of items #values_list() return array list
#we can also use all() instead of values_list() but values_list() is more precise then all().
camera_type_search = Youtuber.objects.values_list('camera_type', flat=True).distinct()
category_search = Youtuber.objects.values_list('category', flat=True).distinct()
if 'city' in request.GET: #if there is city in request...
city = request.GET['city']
if city:
tubers = tubers.filter(city__iexact=city)
if 'camera_type' in request.GET:
camera_type = request.GET['camera_type']
if camera_type:
tubers = tubers.filter(camera_type__iexact=camera_type)
if 'category' in request.GET:
category = request.GET['category']
if category:
tubers = tubers.filter(category__iexact=category)
data = {
'tubers': tubers,
'city_search': city_search,
'camera_type_search': camera_type_search,
'category_search': category_search
}
return render(request, 'youtubers/tubers.html', data)
def youtubers_detail(request, id):
tuber = get_object_or_404(Youtuber, pk=id)
data = {
'tuber': tuber
}
return render(request, 'youtubers/youtuber_detail.html', data)
def search(request):
tubers = Youtuber.objects.order_by('-created_date') #objects return key value pair objects
city_search = Youtuber.objects.values_list('city', flat=True).distinct()
#note: values_list() gives array of items #values_list() return array list
#we can also use all() instead of values_list() but values_list() is more precise then all().
camera_type_search = Youtuber.objects.values_list('camera_type', flat=True).distinct()
category_search = Youtuber.objects.values_list('category', flat=True).distinct()
#search logic #overriding tubers
if 'keyword' in request.GET: #if there is keyword in request...
keyword = request.GET['keyword'] #note: get or post have named parameter eg. <right side> 'keyword', bring that data into <left side> keyword
if keyword:
tubers = tubers.filter(description__icontains=keyword) #the above tubers variable is used wth filter() here.
if 'city' in request.GET: #if there is city in request...
city = request.GET['city']
if city:
tubers = tubers.filter(city__iexact=city)
if 'camera_type' in request.GET:
camera_type = request.GET['camera_type']
if camera_type:
tubers = tubers.filter(camera_type__iexact=camera_type)
if 'category' in request.GET:
category = request.GET['category']
if category:
tubers = tubers.filter(category__iexact=category)
data = {
'tubers': tubers,
'city_search': city_search,
'camera_type_search': camera_type_search,
'category_search': category_search
}
return render(request, 'youtubers/search.html', data) | [
"tushar.annam@gmail.com"
] | tushar.annam@gmail.com |
4da999cb489a900fa165b6cd924ab3776644bd18 | 9973dd9a35333f1b24e4c1e3cd2098391d17e193 | /clones/migrations/0002_auto_20200216_2103.py | d210099b236272054745ccd1c53767889b1d5bc6 | [] | no_license | smilepogz/FinalTrelloClone | 5140f804ceeb02e6969cb5693daa3cad7e296961 | 9affade23a0b911baa5fa11d9d2ce83e3db669e7 | refs/heads/master | 2021-01-04T11:20:08.893932 | 2020-02-17T13:44:11 | 2020-02-17T13:44:11 | 240,524,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | # Generated by Django 3.0.3 on 2020-02-16 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clones', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='boardlist',
name='title',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='card',
name='Attachment',
field=models.FileField(upload_to=''),
),
migrations.AlterField(
model_name='card',
name='description',
field=models.TextField(blank=True, max_length=10),
),
]
| [
"you@example.com"
] | you@example.com |
020942a036c94976bc69092a9f4d19b9c8c7ad90 | 8f455679fdb8e05c4c78141a8065250696d68d89 | /MultiNetV1.py | f46219308f3cf2135c2153d96f56870b3514b6ff | [
"MIT"
] | permissive | x5g/dogs_vs_cats | 63a17ac914ded5850d6d4e745408d50e4d242f74 | 8a6b992fe9abc6b20b31729eaec79ca8d6ec12e0 | refs/heads/master | 2022-10-20T02:25:51.097115 | 2020-06-09T17:21:52 | 2020-06-09T17:21:52 | 271,065,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,326 | py | import plaidml.keras
plaidml.keras.install_backend()
import os
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
import keras
import matplotlib.pyplot as plt
import numpy as np
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
ROWS = 299
COLS = 299
CHANNELS = 3
batch_size = 32
epochs = 10
train_dir = './train2'
validation_dir = './validation'
test_dir = './test1'
Inp = keras.layers.Input((ROWS, COLS, CHANNELS))
InceptionV3_model = keras.applications.InceptionV3(weights='imagenet', include_top=False, input_shape=(ROWS, COLS, CHANNELS))
Xception_model = keras.applications.Xception(weights='imagenet', include_top=False, input_shape=(ROWS, COLS, CHANNELS))
InceptionV3_layers = InceptionV3_model(Inp)
InceptionV3_layers = keras.layers.GlobalAveragePooling2D()(InceptionV3_layers)
Xception_layers = Xception_model(Inp)
Xception_layers = keras.layers.GlobalAveragePooling2D()(Xception_layers)
x = keras.layers.Concatenate()([InceptionV3_layers, Xception_layers])
output = keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs=Inp, outputs=output)
for layer in InceptionV3_model.layers:
layer.trainable = False
for layer in Xception_model.layers:
layer.trainable = False
keras.utils.plot_model(model, show_shapes=True, show_layer_names=True, to_file='MultiNetV1_model.pdf')
train_datagen = keras.preprocessing.image.ImageDataGenerator(
rotation_range = 40, # 随机旋转度数
width_shift_range = 0.2, # 随机水平平移
height_shift_range = 0.2,# 随机竖直平移
rescale = 1/255, # 数据归一化
shear_range = 20, # 随机错切变换
zoom_range = 0.2, # 随机放大
horizontal_flip = True, # 水平翻转
fill_mode = 'nearest', # 填充方式
)
test_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale = 1/255, # 数据归一化
)
# 生成训练数据
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(ROWS,COLS),
batch_size=batch_size,
)
# 验证数据
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(ROWS,COLS),
batch_size=batch_size,
)
model.summary()
# 定义优化器,代价函数,训练过程中计算准确率
model.compile(optimizer=keras.optimizers.SGD(lr=1e-4, momentum=0.9), loss=keras.losses.binary_crossentropy, metrics=['accuracy'])
## Callback for loss logging per epoch
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.val_losses = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
lossHistory = LossHistory()
history = model.fit_generator(
generator = train_generator,
steps_per_epoch=len(train_generator),
epochs = epochs,
validation_data=validation_generator,
validation_steps=len(validation_generator),
callbacks = [lossHistory, early_stopping])
model.save('MultiNetV1.h5')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
# acc = [
# 0.9014070402083021,
# 0.9552851634870563,
# 0.9575885033298283,
# 0.9616944569640881,
# 0.9623454008312052,
# 0.9634469981488059,
# 0.963747433781964,
# 0.9642982324370337,
# 0.9672024435431376,
# 0.9662009914375845]
# val_acc = [
# 0.9805572257894484,
# 0.9821607535505228,
# 0.98296251743106,
# 0.9831629585087192,
# 0.9825616355983163,
# 0.9841651633593906,
# 0.984365604222,
# 0.9845660452996593,
# 0.9851673683414814,
# 0.9851673681025372]
# loss = [
# 0.34548001789042687,
# 0.1829768680474425,
# 0.15205100328394244,
# 0.1336793582993715,
# 0.12181056393720338,
# 0.11529702214687088,
# 0.1095373861976298,
# 0.10428516739372867,
# 0.10034206073545955,
# 0.09901416560581902]
# val_loss = [
# 0.16728722282750116,
# 0.11115399416999794,
# 0.0901722999804482,
# 0.07770438194887197,
# 0.07115493825619816,
# 0.06525685261254752,
# 0.0611271229343917,
# 0.058128020974982354,
# 0.05485415271406638,
# 0.05218703313500113]
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages('MultiNetV1_result.pdf')
from matplotlib.ticker import MultipleLocator
# 绘制训练 & 验证的准确率值
fig = plt.figure()
ax = fig.add_subplot(111)
lns1 = ax.plot(acc, color='blue', linestyle='-', label='Train accuracy')
lns2 = ax.plot(val_acc, color='orange', linestyle='-', label='Validation accuracy')
ax2 = ax.twinx()
lns3 = ax2.plot(loss, color='red', linestyle='-', label='Train loss')
lns4 = ax2.plot(val_loss, color='green', linestyle='-', label='Validation loss')
lns = lns1 + lns2 + lns3 + lns4
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc='right')
# ax.legend(lns, labs, loc=0)
ax.grid()
ax.set_xlabel("Epoch")
ax.set_ylabel("Accuracy")
x_major_locator = MultipleLocator(1)
y_major_locator = MultipleLocator(0.01)
ax.xaxis.set_major_locator(x_major_locator)
ax.set_xlim(0, 9)
ax.set_ylim(0.90, 0.99)
ax.yaxis.set_major_locator(y_major_locator)
ax2.yaxis.set_major_locator(MultipleLocator(0.05))
ax2.set_ylabel("Loss")
ax2.set_ylim(0.05, 0.35)
# ax2.legend(loc=0)
plt.title('Training and validation accuracy and loss')
# plt.show()
# plt.savefig('MultiNetV1_result.png')
plt.tight_layout()
print('savefig...')
pdf.savefig()
plt.close()
pdf.close()
with open("MultiNetV1.txt", 'a+') as f:
f.write('acc\n')
for item in acc:
f.write("{}\n".format(item))
f.write('val_acc\n')
for item in val_acc:
f.write("{}\n".format(item))
f.write('loss\n')
for item in loss:
f.write("{}\n".format(item))
f.write('val_loss\n')
for item in val_loss:
f.write("{}\n".format(item))
def read_image(file_path):
from PIL import Image
img = Image.open(file_path)
if img.mode != 'RGB':
img = img.convert('RGB')
return img.resize((ROWS, COLS), Image.NEAREST)
def predict():
result = []
model = keras.models.load_model('MultiNetV1.h5')
test_images = [test_dir + '/' + str(i) + '.jpg' for i in range(1, 12501)]
count = len(test_images)
data = np.ndarray((count, ROWS, COLS, CHANNELS), dtype=np.float32)
for i, image_file in enumerate(test_images):
image = read_image(image_file)
data[i] = np.asarray(image) / 255.0
if i % 250 == 0: print('处理 {} of {}'.format(i, count))
test = data
predictions = model.predict(test, verbose=1)
print(predictions)
for i in range(len(predictions)):
dog_pre = predictions[i, 1]
if dog_pre <= 0.005:
result.append(0.005)
elif dog_pre >=0.995:
result.append(0.995)
else:
result.append(dog_pre)
# if predictions[i, 0] >= 0.5:
# result.append(0.005)
# else:
# result.append(0.995)
return result
result = predict()
print(result)
import pandas as pd
# 字典中的key值即为csv中列名
dataframe = pd.DataFrame({'id': [i for i in range(1, 12501)], 'label': result})
# 将DataFrame存储为csv,index表示是否显示行名,default=True
dataframe.to_csv("MultiNetV1_result.csv", index=False, sep=',')
| [
"1098766468@qq.com"
] | 1098766468@qq.com |
8e3f054d598f85623ae2376aac935bda04e154d6 | afbae26b958b5ef20548402a65002dcc8e55b66a | /ironstubs/process_stubs.py | 570dd3fd93b8d1d96302c69f2f0d497a9dd5adf3 | [
"MIT"
] | permissive | gtalarico/ironpython-stubs | d875cb8932c7644f807dc6fde9dd513d159e4f5c | c7f6a6cb197e3949e40a4880a0b2a44e72d0a940 | refs/heads/master | 2023-07-12T01:43:47.295560 | 2022-05-23T18:12:06 | 2022-05-23T18:12:06 | 95,340,553 | 235 | 88 | NOASSERTION | 2023-07-05T06:36:28 | 2017-06-25T05:30:46 | Python | UTF-8 | Python | false | false | 6,253 | py | """ Stub Generator for IronPython
Extended script based on script developed by Gary Edwards at:
gitlab.com/reje/revit-python-stubs
This is uses a slightly modify version of generator3,
github.com/JetBrains/intellij-community/blob/master/python/helpers/generator3.py
Iterates through a list of targeted assemblies and generates stub directories
for the namespaces using pycharm's generator3.
Note:
Some files ended up too large for Jedi to handle and would cause
memory errors and crashes - 1mb+ in a single files was enough to
cause problems. To fix this, there is a separate module that creates
a compressed version of the stubs, but it also split large file
into separate files to deal with jedi.
These directories will show up in the stubs as (X_parts)
MIT LICENSE
https://github.com/gtalarico/ironpython-stubs
Gui Talarico
--------------------------------------------------------------------------
Large files, such as `System/__init__.py` or `Revit/DB/__init__.py`
can exceed memory limits and crash the system.
These files need to be optimized so Jedi won't misbehave and crash your system
when parsing these files to index autocomplete options.
The primary strategies are:
1. Remove unecessary characters (empty lines, extra spaces, etc)
2. Split Large file into parts to improve Jedi perfomance and avoid crashes
#1 is very straight forward. Use a few regexes.
#2 is more complex. Some of the stubs created by generator3 such as DB/__init__.py
had nearyly 2mb. Doesn't seem like much, but for a raw .py file, that's more than
120K lines. System.Windows.Forms had over 7mb.
The strategy here was simple. Take all the classes inside this monster files,
create separate files for each one, and import them back into the original file.
For an example, compare:
`\stubs\Autodesk\Revit\DB\__init__.py`
and
``\stubs.min\Autodesk\Revit\DB\__init__.py`
"""
import re
import os
import sys
import subprocess
from collections import defaultdict
import json
from pprint import pprint
#############################################################################
#TODO: Integrate with CLI
#TODO: FIX Vars
#TODO: FIX Character Replacement + Optimize
#############################################################################
##########
# CONFIG #
##########
join = os.path.join
project_dir = os.getcwd() # Must execute from project dir
SAVE_PATH = os.path.join(project_dir, 'release', 'stubs')
LIMIT_IN_KB = 200
FILESIZE_LIMITE = LIMIT_IN_KB * 1024
def file_is_too_damn_big(filepath):
return os.path.getsize(filepath) > FILESIZE_LIMITE
def read_source(filepath):
with open(filepath) as fp:
source = fp.read()
return source
def write_source(filepath, source):
folderpath = os.path.dirname(filepath)
if not os.path.exists(folderpath):
os.makedirs(folderpath)
with open(filepath, 'w') as fp:
source = fp.write(source)
print('File Written: {}'.format(filepath))
target_files = []
TESTING = False
# TESTING = True
print('Starting...')
print(SAVE_PATH)
for root, subfolders, files in os.walk(SAVE_PATH):
py_files = [f for f in files if f.endswith('.py')]
for filename in py_files:
filepath = join(root, filename)
filesize = os.path.getsize(filepath)
filedir = os.path.dirname(filepath)
new_filedir = filedir.replace('\stubs', '\stubs.min')
new_filepath = os.path.join(new_filedir, filename)
source = read_source(filepath)
print("Processing File detected: {}".format(filepath))
if TESTING:
if not filepath.endswith('DB\\__init__.py'):
continue
# SOME OF THESE WORK IN TESTS BUT ARE NOT WORKING ON BATCH REPLACEMENT
replacements = [
(r' {4}', ' '), # Convert 4 spaces into single
(r':\r\n( )+pass', r':pass'), # Put pass in one line
(r'"""\r\n( )+pass', r'"""'), # If has doc string, not need to keep pass
(r'pass\n', r'pass'), # Remove Extra Line after pass
(r' = ', '='),
(r', ', ','),
(r' # known case of __new__', ''), # Pycharm Note
(r' #cannot find CLR method', ''), # Pycharm Note
(r' # default', ''), # Pycharm Note
]
new_source = source
for old, new in replacements:
new_source = re.sub(old, new, new_source)
write_source(new_filepath, new_source)
print('='*30)
#####################################
# SEPARATE FILE INTO SEPARATE FILES #
#####################################
if file_is_too_damn_big(new_filepath):
print('='*30)
print('WARNING: file above breaking max: {}'.format(new_filepath))
module_name = os.path.basename(filepath).replace('.py', '_parts')
chunks_dir = join(new_filedir, module_name)
# Create Blank Init File
write_source(join(chunks_dir, '__init__.py'), '')
# Split File into Classes
chunks = re.split(r'(?:\n)class ', new_source)
header = chunks.pop(0)
clean_source = header
write_source(new_filepath, clean_source)
for chunk in chunks:
# Find Class Name and body
class_source = 'class ' + chunk
re_class_name = re.search('(class )(\w+)', class_source)
class_name = re_class_name.group(2)
if not os.path.exists(chunks_dir):
os.mkdir(chunks_dir)
# Write individual class files
with open(join(chunks_dir, class_name + '.py'), 'w') as fp:
fp.write(class_source)
# New class file import to __init__
with open(new_filepath, 'a') as fp:
fp.write('from {0}.{1} import {1}\n'.format(module_name, class_name))
| [
"gtalarico@gmail.com"
] | gtalarico@gmail.com |
b135b8d196ad40b7bac0b04136cfef94ca3dbc37 | 962a553b880b2ce6f361692ea62ee2fad9cf4328 | /tests/conftest.py | 6d75f80ee75b677f6625c2d4a58d931272f257a1 | [
"Beerware"
] | permissive | tlochmanczyk/faceRecognition | 5a28b18deda28e8c515bcc24d1cfa89be1e3a51d | 75819cc32ff323eae24a4766b2e285ec80115971 | refs/heads/master | 2021-02-16T05:26:40.884796 | 2020-04-01T20:32:04 | 2020-04-01T20:32:04 | 244,971,100 | 0 | 0 | NOASSERTION | 2020-03-26T09:18:35 | 2020-03-04T18:11:16 | Python | UTF-8 | Python | false | false | 235 | py | # -*- coding: utf-8 -*-
"""
Dummy conftest.py for ml_cloud_model.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
# import pytest
| [
"bartek.jastrzebski0010@gmail.com"
] | bartek.jastrzebski0010@gmail.com |
9fabaf664d6dbaf4dd42fc7eb23fb3b411cfd395 | 845d8e6816e91474e673b6cda452254d40c65e5c | /django_mailbox/transports/mmdf.py | ad462849609331fa0f5cdc9bf69e107179dd2cb7 | [] | no_license | redtoad/django-mailbox | d0847f7f29f4e4459045e8d9d3d5d1406968175b | 6da17053d495bee58ea78d4fb394d7618aeaab1a | refs/heads/master | 2021-01-01T15:36:55.409316 | 2013-06-12T06:50:25 | 2013-06-12T06:50:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from mailbox import MMDF
from django_mailbox.transports.generic import GenericFileMailbox
class MMDFTransport(GenericFileMailbox):
_variant = MMDF
| [
"me@adamcoddington.net"
] | me@adamcoddington.net |
10a7004e11ee3d6829e1247367be945aa8b0c793 | e0750245bc531977b80084e09c8a2ac705bbb7c8 | /prompts/models.py | ddb7ba740b9a36fe057b036af50503920b37b550 | [] | no_license | adganong/promptme | 66f1515ff571eb96c3a3f2f3e34e09ad3db9d219 | 2144098d369c212013eb679182f0f0c58c13826e | refs/heads/master | 2021-04-28T12:59:17.163277 | 2018-02-25T02:36:11 | 2018-02-25T02:36:11 | 122,092,495 | 0 | 0 | null | 2018-03-24T01:19:32 | 2018-02-19T16:51:43 | Python | UTF-8 | Python | false | false | 2,275 | py | from django.db import models
from django.conf import settings
from django.urls import reverse
from rest_framework.reverse import reverse as api_reverse
# Create your models here.
class Genre(models.Model):
# Hidden: PK or id. This is auto generated by django when making models
genre_name = models.CharField(max_length=256, null=False, blank=False)
parent_id = models.ForeignKey("self", null=True, blank=True)
def __str__(self):
return str(self.genre_name)
class PieceType(models.Model):
# Hidden: PK or id. This is auto generated by django when making models
piece_type_name = models.CharField(max_length=256, null=True)
def __str__(self):
return str(self.piece_type_name)
class PromptPiece(models.Model):
# Hidden: PK or id. This is auto generated by django when making models
piece_type = models.ForeignKey(PieceType, on_delete=models.CASCADE)
piece_genre = models.ForeignKey(Genre, on_delete=models.CASCADE)
piece_name = models.CharField(max_length=256, null=False, blank=False)
piece_description = models.TextField(max_length=256, null=True, blank=True)
def get_random_by_genre(self):
pass
def __str__(self):
return str(self.piece_name)
class BuiltPrompt(models.Model):
prompt_name = models.CharField(max_length=256, null=True, blank=True)
prompt_person = models.ForeignKey(PromptPiece, null=False, blank=False, related_name='prompt_person')
prompt_place = models.ForeignKey(PromptPiece, null=False, blank=False, related_name='prompt_place')
prompt_thing = models.ForeignKey(PromptPiece, null=False, blank=False, related_name='prompt_thing')
prompt_scenario = models.ForeignKey(PromptPiece, null=False, blank=False, related_name='prompt_scenario')
def __str__(self):
return str(
self.prompt_name + "\n\n " +
"\tPerson:\t\t\t" + self.prompt_person.piece_name + " \n " +
"\tPlace:\t\t\t" + self.prompt_place.piece_name + " \n " +
"\tThing:\t\t\t" + self.prompt_thing.piece_name + " \n " +
"\tScenario:\t\t" + self.prompt_scenario.piece_name
)
| [
"adam.b.ganong@gmail.com"
] | adam.b.ganong@gmail.com |
cb867fdd9d29b3f242b3d3f58d314ad45a8a2c10 | 66d7de296991792b0b0c9a8288cb24ae994e327a | /pytorchtocaffe/pytorch2caffe/caffe_prototxt_op.py | 755712be933ee9acfe9c927de7e3fc4d7aa7dcaf | [] | no_license | Wangyf46/PyTorch2Caffe | 6b9911d6feb15f2f1897205232fbece97623d313 | c18f89fe571145f7063bc2118d0fe4e9a25bcad0 | refs/heads/master | 2020-09-09T07:12:57.180456 | 2019-11-13T07:34:31 | 2019-11-13T07:34:31 | 221,385,158 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,676 | py | #!/usr/bin/python
#_*_ coding:UTF-8 _*_
#pytorch version = 1.0.0
from __future__ import print_function
import torch
import torch.nn as nn
from Caffe import caffe_net
import torch.nn.functional as F
from torch.autograd import Variable
from Caffe import layer_param
from torch.nn.modules.utils import _pair
import numpy as np
'''
def getOutputBlobs(prototxt_file):
prototxt=caffe_net.Prototxt(prototxt_file)
bottoms={}
tops={}
#print(prototxt.layers())
for layer in prototxt.layers():
# help(layer)
for bottom in layer.bottom:
bottoms[bottom]=1
for top in layer.top:
tops[top]=1
#print(layer.name)
#print(bottoms)
#print(tops)
only_top=[]
for key in tops:
if (key not in bottoms):
only_top.append(key)
print(only_top)
name_list = []
for outname in prototxt.layers():
#print(outname.top[0])
for top in only_top:
#print(top)
if (top == outname.top[0]):
#print(outname.name)
name_list.append(outname.name)
print(name_list)
# print(only_top)
return only_top
def main(argv):
# getOutputBlobs()
getOutputBlobs("")
if __name__ == "__main__":
import sys
main(sys.argv)
'''
'''
def getOutputBlobsandNames(prototxt_file):
bottoms = {}
tops = {}
only_top = []
name_list = []
prototxt = caffe_net.Prototxt(prototxt_file)
for layer in prototxt.layers():
for bottom in layer.bottom:
bottoms[bottom] = 1
for top in layer.top:
tops[top] = 1
for key in tops:
if (key not in bottoms):
only_top.append(key)
print(bottoms)
print(tops)
print(only_top)
for outname in prototxt.layers():
for top in only_top:
if (top == outname.top[0]):
name_list.append(outname.name)
return only_top, name_list
'''
def getOutputBlobsandNames(prototxt_file):
only_top = []
output_name = []
prototxt = caffe_net.Prototxt(prototxt_file)
#print(type(prototxt.layers()))
#print(prototxt.layers()[0].top[0])
number = len(prototxt.layers())
#print(len(prototxt.layers()))
temp_top = 0
temp_bottom = 1
while(temp_top < number - 1):
while(temp_bottom < number):
if (prototxt.layers()[temp_top].top[0] in prototxt.layers()[temp_bottom].bottom):
break
if (temp_bottom == number - 1):
only_top.append(prototxt.layers()[temp_top].top[0])
output_name.append(prototxt.layers()[temp_top].name)
temp_bottom += 1
temp_top += 1
temp_bottom = temp_top + 1
only_top.append(prototxt.layers()[number - 1].top[0])
output_name.append(prototxt.layers()[number - 1].name)
#print(only_top)
#print(output_name)
return only_top, output_name
if __name__ == "__main__":
path = "/data/wuh/project/model_transform_test/trans_result/vehicle_model_new/model_model.prototxt"
getOutputBlobsandNames(path)
| [
"noreply@github.com"
] | Wangyf46.noreply@github.com |
8025babf0e4a1c958a945d138f5f490480bc4922 | ce02062c2a592a95d589be426f82e05c23cf251d | /bubbleSort.py | 1210145336966c0affe7bbc1b5a046e642ce60bf | [] | no_license | icaromsc/PythonExamples | cc46fd889dda5fede2031259305a5d1f2b66a5a5 | 58a9818a706c79ecaec1189e11584c21b55ae762 | refs/heads/master | 2020-04-02T18:57:40.366936 | 2016-07-27T05:45:36 | 2016-07-27T05:45:36 | 64,279,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | #recebe um vetor de inteiros por parametro
def bubbleSort(v):
for i in range(len(v)):
for atual in range(len(v)-1): #realiza for encadeado e compara elemento n com todos os outros do vetor
if(v[atual]>v[atual+1]):#compara se o elemento da posição atual é maior que o próximo
v[atual],v[atual+1]=v[atual+1],v[atual]#realiza a troca
return v #retorna vetor ordenado
| [
"noreply@github.com"
] | icaromsc.noreply@github.com |
70591458bc7b1c652055f61ef7c7e70680bfdac8 | e1fa5f913b1ff635a44f7e9db42baad07f9edfc9 | /app/app.py | 0616e1e7e66dec2a07243ec1c76b1d15ab6e3adb | [] | no_license | ariakerstein/NotesApp-flask | 935edf3c34f2b4997a1b331894bccf0dd711cd00 | 995f8d695d51435a0a406e9cbef74c93897e5f01 | refs/heads/master | 2016-08-12T03:58:27.509303 | 2015-11-19T15:31:07 | 2015-11-19T15:31:07 | 45,791,819 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | import os
from flask import Flask
from micawber import bootstrap_basic
from peewee import SqliteDatabase
APP_ROOT = os.path.dirname(os.path.realpath(__file__))
DATABASE = os.path.join(APP_ROOT, 'notes.db')
DEBUG = False
app = Flask(__name__)
app.config.from_object(__name__)
db = SqliteDatabase(app.config['DATABASE'], threadlocals=True)
oembed = bootstrap_basic() | [
"aakerstein@walmart.com"
] | aakerstein@walmart.com |
61778768f52595b4b43735fbfb3782cbaa5d6f95 | 2d82b8b45fd1bb6ebac355151529d072568434d0 | /util.py | 33d0398c02530439f57eb923c188402f190a6210 | [
"MIT"
] | permissive | Minhphuong1989/phuongle89 | f0e3917142fc336710d324581ae082f14aff785f | 8f83b9ddb5e144e97b8483033480e726d9c6abd8 | refs/heads/main | 2023-06-03T17:41:22.519850 | 2021-06-21T08:41:56 | 2021-06-21T08:41:56 | 365,459,138 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,098 | py | import torch
import torch.nn.functional as F
from torch.utils import data
from torch import nn, autograd
import os
import matplotlib.pyplot as plt
google_drive_paths = {
"GNR_checkpoint.pt": "https://drive.google.com/uc?id=1IMIVke4WDaGayUa7vk_xVw1uqIHikGtC",
}
# https://drive.google.com/uc?id=1IMIVke4WDaGayUa7vk_xVw1uqIHikGtC
def ensure_checkpoint_exists(model_weights_filename):
if not os.path.isfile(model_weights_filename) and (
model_weights_filename in google_drive_paths
):
gdrive_url = google_drive_paths[model_weights_filename]
try:
from gdown import download as drive_download
drive_download(gdrive_url, model_weights_filename, quiet=False)
except ModuleNotFoundError:
print(
"gdown module not found.",
"pip3 install gdown or, manually download the checkpoint file:",
gdrive_url
)
if not os.path.isfile(model_weights_filename) and (
model_weights_filename not in google_drive_paths
):
print(
model_weights_filename,
" not found, you may need to manually download the model weights."
)
def shuffle_batch(x):
return x[torch.randperm(x.size(0))]
def data_sampler(dataset, shuffle, distributed):
if distributed:
return data.distributed.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return data.RandomSampler(dataset)
else:
return data.SequentialSampler(dataset)
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(1 - decay, par2[k].data)
def sample_data(loader):
while True:
for batch in loader:
yield batch
def d_logistic_loss(real_pred, fake_pred):
loss = 0
for real, fake in zip(real_pred, fake_pred):
real_loss = F.softplus(-real)
fake_loss = F.softplus(fake)
loss += real_loss.mean() + fake_loss.mean()
return loss
def d_r1_loss(real_pred, real_img):
grad_penalty = 0
for real in real_pred:
grad_real, = autograd.grad(
outputs=real.mean(), inputs=real_img, create_graph=True, only_inputs=True
)
grad_penalty += grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_nonsaturating_loss(fake_pred, weights):
loss = 0
for fake, weight in zip(fake_pred, weights):
loss += weight*F.softplus(-fake).mean()
return loss / len(fake_pred)
def display_image(image, size=None, mode='nearest', unnorm=False, title=''):
# image is [3,h,w] or [1,3,h,w] tensor [0,1]
if image.is_cuda:
image = image.cpu()
if size is not None and image.size(-1) != size:
image = F.interpolate(image, size=(size,size), mode=mode)
if image.dim() == 4:
image = image[0]
image = image.permute(1, 2, 0).detach().numpy()
plt.figure()
plt.title(title)
plt.axis('off')
plt.imshow(image)
def normalize(x):
return ((x+1)/2).clamp(0,1)
def get_boundingbox(face, width, height, scale=1.3, minsize=None):
"""
Expects a dlib face to generate a quadratic bounding box.
:param face: dlib face class
:param width: frame width
:param height: frame height
:param scale: bounding box size multiplier to get a bigger face region
:param minsize: set minimum bounding box size
:return: x, y, bounding_box_size in opencv form
"""
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
size_bb = int(max(x2 - x1, y2 - y1) * scale)
if minsize:
if size_bb < minsize:
size_bb = minsize
center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2
# Check for out of bounds, x-y top left corner
x1 = max(int(center_x - size_bb // 2), 0)
y1 = max(int(center_y - size_bb // 2), 0)
# Check for too big bb size for given x, y
size_bb = min(width - x1, size_bb)
size_bb = min(height - y1, size_bb)
return x1, y1, size_bb
def preprocess_image(image, cuda=True):
"""
Preprocesses the image such that it can be fed into our network.
During this process we envoke PIL to cast it into a PIL image.
:param image: numpy image in opencv form (i.e., BGR and of shape
:return: pytorch tensor of shape [1, 3, image_size, image_size], not
necessarily casted to cuda
"""
# Revert from BGR
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Preprocess using the preprocessing function used during training and
# casting it to PIL image
preprocess = xception_default_data_transforms['test']
preprocessed_image = preprocess(pil_image.fromarray(image))
# Add first dimension as the network expects a batch
preprocessed_image = preprocessed_image.unsqueeze(0)
if cuda:
preprocessed_image = preprocessed_image.cuda()
return preprocessed_image
def truncate(x, truncation, mean_style):
return truncation*x + (1-truncation)*mean_style
| [
"noreply@github.com"
] | Minhphuong1989.noreply@github.com |
02405e0001cf5846244e9d69773d9a9e7158254b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/SSD_for_PyTorch/configs/paa/paa_r50_fpn_1.5x_coco.py | 816c773695c011d9bf568083b9cd4e991e0abf1e | [
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 713 | py | # Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './paa_r50_fpn_1x_coco.py'
lr_config = dict(step=[12, 16])
runner = dict(type='EpochBasedRunner', max_epochs=18)
| [
"chenyong84@huawei.com"
] | chenyong84@huawei.com |
d8c486377235fc2e4c1cceea40d16614e0a55c16 | dcba8e9d5e3661f3f1af5121e5d00a80c7107ba5 | /light_sensor_server.py | 1873c93c3c8bf091c9767e4ee083fd92056dc59d | [] | no_license | mmclsntr/grove_server | 206aaa83fd86aaeb320f3f0cad066f984f52ab4e | 80be473c5b500c7888c3b67a8e2a074d5644cca3 | refs/heads/master | 2021-07-16T18:05:26.782273 | 2017-10-24T12:22:05 | 2017-10-24T12:22:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | from datetime import datetime
import socket
import time
import grovepi
light_sensor = 1
grovepi.pinMode(light_sensor,"INPUT")
address = ('0.0.0.0', 4011)
max_size = 1000
print('Starting the service at', datetime.now())
print('Wating for a client to call')
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(address)
server.listen(5)
while True:
client, addr = server.accept()
data = client.recv(max_size)
print('At', datetime.now(), client, 'said', data)
if data.decode('utf-8') == 'read':
try:
sensor_value = grovepi.analogRead(light_sensor)
resistance = (float)(1023 - sensor_value) * 10 / sensor_value
print(str(resistance) + ' K')
client.sendall(str(int(resistance)).encode('utf-8'))
except KeyboardInterrupt:
break
except IOError:
print ("Error")
elif data.decode('utf-8') == 'write':
data = client.recv(max_size)
print (data)
client.close()
time.sleep(.5)
server.close()
| [
"hitorans@icloud.com"
] | hitorans@icloud.com |
46056e465412c8a0915a39adf4b3e83624dc34d3 | 98af46eeb20987d67e09adb4553e713a2fba6c36 | /perfectpitch/onsetsdetector/model.py | fbf2603ffd3f1b66142e6cd5f5986f87793a315b | [
"MIT"
] | permissive | ArmanMazdaee/perfectpitch | 108102d08aef53799a44aac540fb698da853bb52 | 2ea9ca47945a664c20d168bef4f4a15c7a5f9fb8 | refs/heads/master | 2021-07-11T05:43:46.521676 | 2020-08-10T05:30:38 | 2020-08-10T05:30:38 | 199,030,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,004 | py | import math
import torch
from perfectpitch import constants
DROPOUT = 0.1
def positional_encoding(x):
length = x.shape[0]
dimension = x.shape[2]
position = torch.arange(0, length, 1, dtype=torch.float, device=x.device)
div_term = torch.arange(0, dimension, 2, dtype=torch.float, device=x.device)
points = position.unsqueeze(1) * torch.exp(
div_term * -math.log(10000.0) / dimension
)
sin = torch.sin(points)
cos = torch.cos(points)
encoding = torch.cat([sin, cos], dim=1).unsqueeze(1)
return x + encoding
class TransformerConvEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_conv, dropout):
super().__init__()
self.self_attn = torch.nn.MultiheadAttention(d_model, nhead, dropout)
self.dropout1 = torch.nn.Dropout(dropout)
self.norm1 = torch.nn.LayerNorm(d_model)
self.conv1ds = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels=d_model, out_channels=dim_conv, kernel_size=3, padding=1
),
torch.nn.ReLU(),
torch.nn.Dropout(dropout),
torch.nn.Conv1d(
in_channels=dim_conv, out_channels=d_model, kernel_size=3, padding=1
),
)
self.dropout2 = torch.nn.Dropout(dropout)
self.norm2 = torch.nn.LayerNorm(d_model)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
x = self.self_attn(
query=src,
key=src,
value=src,
attn_mask=src_mask,
key_padding_mask=src_key_padding_mask,
)[0]
x = self.dropout1(x)
x = self.norm1(src + x)
y = x.permute(1, 2, 0)
y = self.conv1ds(y)
y = y.permute(2, 0, 1)
y = self.dropout2(y)
return self.norm2(x + y)
class OnsetsDetector(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1d = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels=constants.SPEC_DIM,
out_channels=512,
kernel_size=3,
padding=1,
),
torch.nn.ReLU(),
torch.nn.Dropout(DROPOUT),
)
self.sequential = torch.nn.TransformerEncoder(
encoder_layer=TransformerConvEncoderLayer(
d_model=512, nhead=4, dim_conv=2048, dropout=DROPOUT,
),
num_layers=8,
)
self.linear = torch.nn.Linear(
in_features=512, out_features=constants.MAX_PITCH - constants.MIN_PITCH + 1
)
def forward(self, spec, mask=None):
if mask is not None:
mask = ~mask.T
conv1d_input = spec.permute(1, 2, 0)
conv1d_output = self.conv1d(conv1d_input)
sequential_input = positional_encoding(conv1d_output.permute(2, 0, 1))
sequential_output = self.sequential(sequential_input, src_key_padding_mask=mask)
return self.linear(sequential_output)
| [
"arman.maz1373@gmail.com"
] | arman.maz1373@gmail.com |
b5c74ec8026d3982cfa0c6ecd74fa67b38b3ffb8 | 18ad603848489e5d32e7b6784841fbec1fa5ef3f | /parse_content.py | 63631b7271fb44e1dd087a7b5e8b137912b36a4a | [] | no_license | garg-ankush/notes-email-sender | 898e12dd109288916529695e41713e80da8fa75f | 32cd53e991d52ee4f02522860658610d1957b415 | refs/heads/master | 2023-03-07T02:30:44.216938 | 2020-06-20T19:41:04 | 2020-06-20T19:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | from selector_service import SelectorService
class ContentParser:
def __init__(self):
self.sample_entries = SelectorService().select_random_entries()
self.content = None
def parse_selected_entries(self):
content = ''
for item_index in range(len(self.sample_entries)):
item = "DATE-ADDED: " + self.sample_entries[item_index]['date_added']
content = content + item + "\n"
item = "HIGHLIGHT: " + self.sample_entries[item_index]['highlight']
content = content + item + "\n"
item = "TITLE: " + self.sample_entries[item_index]['title']
content = content + item + "\n"
item = "CHAPTER: " + self.sample_entries[item_index]['chapter']
content = content + item + "\n"
item = "SOURCE: " + self.sample_entries[item_index]['source']
content = content + item + "\n"
item = "PAGE-NUMBER: " + self.sample_entries[item_index]['page_number']
content = content + item + "\n" + "------------" + "\n"
self.content = content
return self.content
| [
"unkushgarg@gmail.com"
] | unkushgarg@gmail.com |
6204778bccce5acd82eee6997003e783a16005fd | a939e018333a9ecd26ddc618f99835b7eb381686 | /.svn/tmp/tempfile.2.tmp | 509885ba67010786fd018501957f1787d480a5c8 | [] | no_license | cash2one/crawl_youtube | bff5ba254001c2f31f770e55a4aca39bc54e45ee | 0dc40186a1d89da2b00f29d4f4edfdc5470eb4fc | refs/heads/master | 2021-01-16T22:30:17.800282 | 2016-02-18T11:50:09 | 2016-02-18T11:50:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,117 | tmp | #!/usr/bin/python
# coding=utf8
# Copyright 2015 LeTV Inc. All Rights Reserved.
# author: gaoqiang@letv.com (Qiang Gao)
import os
import signal
from le_crawler.common.logutil import Log
thrift_logger = Log('thrift.server.TServer', 'log/thrift_filter.error').log
from optparse import OptionParser
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TCompactProtocol
from thrift.server import TServer
from pybloom import ScalableBloomFilter
from le_crawler.proto.filter import UrlFilterService
class FilterHandler(object):
def __init__(self, logger):
self.logger_ = logger
self._load_from_file()
def url_seen(self, url):
if self.deduper_.add(url):
self.logger_.info('url duplicated: %s', url)
return True
return False
def _load_from_file(self):
self.logger_.info('loading data from cache file...')
if not os.path.isfile('data/bloom.data'):
self.logger_.error('bloom cache file not found, create one instead.')
self.deduper_ = ScalableBloomFilter(100000000, 0.0001, 4)
else:
with open('data/bloom.data', 'r') as f:
self.deduper_ = ScalableBloomFilter.fromfile(f)
def _dump_to_file(self):
self.logger_.info('dumping data...')
if not os.path.isdir('data'):
os.mkdir('data')
with open('data/bloom.data', 'w') as f:
self.deduper_.tofile(f)
self.logger_.info('dump data finished.')
def close(self):
self._dump_to_file()
class FilterServiceMain(object):
def __init__(self):
self.logger_ = Log('filter_log', 'log/filter.log').log
self.exit_ = False
def close(self, num, fram):
self.exit_ = True
try:
self.socket_.close()
self.handler_.close()
self.logger_.info('close transport')
except:
self.logger_.exception('failed to close transport.')
def run(self, host, port):
# this flag daemon set true is for stop service by outside signal
self.socket_ = TSocket.TServerSocket(host, port)
self.handler_ = FilterHandler(self.logger_)
self.service = TServer.TThreadedServer(UrlFilterService.Processor(self.handler_),
self.socket_,
TTransport.TBufferedTransportFactory(),
TCompactProtocol.TCompactProtocolFactory(),
daemon=True)
self.logger_.info('begin server on %s, %s' % (host, port))
print 'begin server on %s, %s' % (host, port)
self.service.serve()
scheduler = FilterServiceMain()
signal.signal(signal.SIGINT, scheduler.close)
signal.signal(signal.SIGTERM, scheduler.close)
if __name__ == '__main__':
option_parser = OptionParser()
option_parser.add_option('-H', '--host', type='string', dest='host',
default='10.150.140.84', help="service host")
option_parser.add_option('-p', '--port', type='int', dest='port',
default=8089, help="service port")
options, _ = option_parser.parse_args()
scheduler.run(options.host, options.port)
| [
"zjc0516@126.com"
] | zjc0516@126.com |
6951735b5119448cb7a86cf403b941f92733e4b0 | f46966a5e49a6138182635a4850738a18eec01e5 | /scripts/utils/bcbio_prep_cwl_genomes.py | d704120ef6fa0e7407cca8ec06c5c6a3272e0319 | [
"MIT"
] | permissive | jchenpku/bcbio-nextgen | 44a9247a0e1314aaba66d1f9941540ddb2993bde | 9ddbfcc6f2595298ae8aad3adfa6a568a2a4c62f | refs/heads/master | 2020-08-01T03:06:30.695158 | 2019-10-07T00:21:32 | 2019-10-07T00:21:32 | 73,585,332 | 1 | 0 | MIT | 2019-10-07T00:21:33 | 2016-11-12T23:49:31 | Python | UTF-8 | Python | false | false | 2,642 | py | #!/usr/bin/env python
"""Clean and prepare a set of genomes for CWL usage and upload.
bcbio with CWL can read directly from a reference genome folder
without using Galaxy location files. This allows both local and
remote usage on object stores (Arvados, DNAnexus, SevenBridges, Synapse, S3).
This copies from an existing bcbio genome installation, cleaning
and packing directories to be ready for CWL usage and upload.
Usage:
bcbio_prep_cwl_genomes.py <genome_dir>
"""
import glob
import os
import shutil
import subprocess
import sys
import tarfile
from bcbio import utils
def main(base_dir):
for genome_dir in sorted(glob.glob(os.path.join(base_dir, "*", "*"))):
if os.path.isdir(genome_dir):
genome_name = os.path.basename(genome_dir)
genome_out_dir = utils.safe_makedir(os.path.join(os.path.join(os.getcwd(), "genomes", genome_name)))
copy_genome(genome_dir, genome_out_dir)
def copy_genome(orig_dir, out_dir):
print(orig_dir, out_dir)
to_copy = ["versions.csv", "bwa", "config", "coverage", "rnaseq", "rtg", "seq", "snpeff",
"ucsc", "validation", "variation", "viral"]
excludes = {"seq": ["*.fa.gz*", "*.old*", "perl"],
"rnaseq": ["ericscript", "tophat", "kallisto"],
"snpeff": ["transcripts"],
"variation": ["genesplicer", "dbNSFP*"]}
to_tar = ["bwa", "rtg", "snpeff"]
for copy in to_copy:
if os.path.isfile(os.path.join(orig_dir, copy)):
shutil.copy(os.path.join(orig_dir, copy), out_dir)
elif copy in to_tar and len(glob.glob(os.path.join(out_dir, "%s*-wf.tar.gz" % copy))) == 1:
print("already prepped: %s" % glob.glob(os.path.join(out_dir, "%s*-wf.tar.gz" % copy)))
else:
cmd = ["rsync", "-avz"]
for e in excludes.get(copy, []):
cmd += ["--exclude", e]
cmd += ["%s/%s/" % (orig_dir, copy), "%s/%s/" % (out_dir, copy)]
print " ".join(cmd)
subprocess.check_call(cmd)
if copy in to_tar:
with utils.chdir(out_dir):
out_file = copy
dir_files = os.listdir(copy)
if len(dir_files) == 1 and os.path.isdir(os.path.join(copy, dir_files[0])):
out_file += "--%s" % (dir_files[0])
out_file += "-wf.tar.gz"
print("tarball", out_file)
with tarfile.open(out_file, "w:gz") as tar:
tar.add(copy)
shutil.rmtree(copy)
if __name__ == "__main__":
main(*sys.argv[1:])
| [
"chapmanb@50mail.com"
] | chapmanb@50mail.com |
000ad2bfe0221337ebe78b33b4c1046aed21085d | 46b432cd3557038c454601367b878f889c9b6a8f | /kiyuna/tutorial04/test_hmm.py | b2b0fc5a973faf6fbfb2ad7d8772238651f39b66 | [] | no_license | tmu-nlp/NLPtutorial2019 | 84ceec06568fd9d899a686658fb8851466133375 | d77d199c50cd37d70e462209a7bfcd4dee9140a1 | refs/heads/master | 2020-05-14T13:34:05.336594 | 2019-09-25T02:25:41 | 2019-09-25T02:25:41 | 181,814,723 | 1 | 0 | null | 2019-08-01T18:53:54 | 2019-04-17T04:04:06 | Python | UTF-8 | Python | false | false | 3,896 | py | '''
隠れマルコフモデルによる品詞推定
'''
import os
import sys
import subprocess
from collections import defaultdict
from math import log2
os.chdir(os.path.dirname(os.path.abspath(__file__))) # cd .
def message(text):
print("\33[92m" + text + "\33[0m")
def load_model(model_file):
possible_tags = defaultdict(int)
emission = defaultdict(float)
transition = defaultdict(float)
with open(model_file) as f:
for line in f:
type, context, word, prob = line.split()
possible_tags[context] += 1
if type == 'T':
transition[f"{context} {word}"] = float(prob)
else:
emission[f"{context} {word}"] = float(prob)
return possible_tags, emission, transition
def test_hmm(model_path, test_path, output_path):
λ_1 = 0.90
λ_unk = 1 - λ_1
V = 1e6
possible_tags, emission, transition = load_model(model_path)
res = []
with open(test_path) as f:
for line in f:
words = line.split()
# 最小化DP(viterbi)
best_score = defaultdict(lambda: float('inf'))
best_edge = defaultdict(str)
best_score["0 <s>"] = 0
best_edge["0 <s>"] = None
for i, word in enumerate(words):
for prev in possible_tags:
for next in possible_tags:
if f"{i} {prev}" not in best_score:
continue
if f"{prev} {next}" not in transition:
continue
score = best_score[f"{i} {prev}"]
Pt = transition[f"{prev} {next}"]
score += -log2(Pt)
Pe = λ_1 * emission[f"{next} {word}"] + λ_unk / V
score += -log2(Pe)
if best_score[f"{i+1} {next}"] > score:
best_score[f"{i+1} {next}"] = score
best_edge[f"{i+1} {next}"] = f"{i} {prev}"
l = len(words)
for tag in possible_tags:
if f"{l} {tag}" not in best_score:
continue
if f"{tag} </s>" not in transition:
continue
Pt = transition[f"{tag} </s>"]
score = best_score[f"{l} {tag}"] + -log2(Pt)
if best_score[f"{l+1} </s>"] > score:
best_score[f"{l+1} </s>"] = score
best_edge[f"{l+1} </s>"] = f"{l} {tag}"
tags = []
next_edge = best_edge[f"{l+1} </s>"]
while next_edge != "0 <s>":
pos, tag = next_edge.split()
tags.append(tag)
next_edge = best_edge[next_edge]
tags.reverse()
res.append(" ".join(tags) + '\n')
with open(output_path, 'w') as f:
f.writelines(res)
if __name__ == '__main__':
is_test = sys.argv[1:] == ["test"]
if is_test:
message("[*] test")
model = './model_test.txt'
test = '../../test/05-test-input.txt'
res = './result_test.pos'
ans = '../../test/05-test-answer.txt'
else:
message("[*] wiki")
model = './model_wiki.txt'
test = '../../data/wiki-en-test.norm'
res = './result_wiki.pos'
ans = '../../data/wiki-en-test.pos'
test_hmm(model, test, res)
if is_test:
subprocess.run(f'diff -s {res} {ans}'.split())
else:
subprocess.run(f'perl ../../script/gradepos.pl {ans} {res}'.split())
message("[+] Done!")
'''
Accuracy: 90.82% (4144/4563)
Most common mistakes:
NNS --> NN 45
NN --> JJ 27
JJ --> DT 22
NNP --> NN 22
VBN --> NN 12
JJ --> NN 12
NN --> IN 11
NN --> DT 10
NNP --> JJ 8
VBP --> VB 7
'''
| [
"kyuna.prog@gmail.com"
] | kyuna.prog@gmail.com |
d7b308df0f872cf4225de6e4cc5033a3f2c886e2 | c55758fe1b61828d4e8e46787e6c1683a5244c9b | /netCompile/server.py | c01460aeff9f71d6d37cd289667e9124d9551c29 | [] | no_license | jiyudonggithub/WebSpider | b5625bb2a2c4b448ff4d7c66ebb70c0e16da1e8d | cf171e501ed75efedadeff80abcf33605041cd58 | refs/heads/master | 2023-01-03T11:10:47.516326 | 2020-11-04T03:26:38 | 2020-11-04T03:26:38 | 306,371,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : server.py
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/10/31 10:10 yudong 1.0 None
'''
import socket
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 绑定ip端口
sk.bind(('localhost', 8080))
# 监听
sk.listen(5)
# 等待连接
print('服务器启动成功......')
clientSocket, clientAdress = sk.accept()
print('{} -- {} 连接成功'.format(str(clientSocket), str(clientAdress)))
while True:
data = clientSocket.recv(1024)
print('收到数据')
data = data.decode('utf-8')
print(data)
clientSocket.send('你好'.encode('utf-8'))
| [
"yudong.j@icloud.com"
] | yudong.j@icloud.com |
0ac777ad94ace5e2bee7f20e8f1433d23de99aa1 | ad9b830df69c9005cc7c27b4c873adb89b1cd04c | /python_code.py | e6d8fce5e513b71add2165b3e3687489f96ee16e | [] | no_license | Zaheerabbas14/Coursera-Capstone-Projects | 5cab070573f63027db7396c346bf2e3cccc32b89 | a27ca5d97e2a3d0ac2c3705f27f581eeb6ac36c5 | refs/heads/master | 2020-06-16T19:26:40.099175 | 2019-08-18T13:45:32 | 2019-08-18T13:45:32 | 195,677,972 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | print ("Hello Github!")
| [
"zaheerabbas14@gmail.com"
] | zaheerabbas14@gmail.com |
b7c583ef1f04bd6898e7d5f5f6e32b370f517dec | c4f60b760b87b3db7fd225246d9baaf1417fa06f | /practiceLearning.py | b8272e81127f8fe54869a09f50107a8ceaa9aef4 | [] | no_license | joeythaman/collatzConjectureML | b0fa23263cd31a24d3c5db889eeff1cce0c0132c | c0cb9e391e26f0f20a9a204217a51af37b137049 | refs/heads/master | 2022-11-24T23:37:42.243949 | 2020-08-02T02:57:03 | 2020-08-02T02:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | import tensorflow as tf
from tensorflow import keras
# first neural network with keras tutorial
from numpy import loadtxt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# load the dataset
dataset = loadtxt('data_1000.csv', delimiter=',')
# split into input (X) and output (y) variables
X = dataset[:,0]
y = dataset[:,3]
# define the keras model
model = Sequential()
model.add(Dense(12, input_dim=1, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='linear'))
# compile the keras model
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
# fit the keras model on the dataset
model.fit(X, y, epochs=1500, batch_size=10)
# evaluate the keras model
_, accuracy = model.evaluate(X, y)
print('Accuracy: %.2f' % (accuracy*100))
print(model.predict([7607])) | [
"jthaman@stanford.edu"
] | jthaman@stanford.edu |
9eaca329896f918cafe90043d0c64b601edc3691 | 7218199f8a9dc8442a273dd0ec2341e5658ac4c8 | /situacao/migrations/0010_auto_20190412_1704.py | f1c3efb10d12b9fdb8d2d5966d53cfc644365d03 | [] | no_license | catalunha/cata_django | c96fddb36941cc84f3abd91491dcac217cfa3034 | 47056d5ba760346e24861fdcc595fb816eeae597 | refs/heads/master | 2023-05-25T17:30:34.451476 | 2019-04-23T21:27:16 | 2019-04-23T21:27:16 | 180,234,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | # Generated by Django 2.1.3 on 2019-04-12 20:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('situacao', '0009_auto_20190412_1704'),
]
operations = [
migrations.RemoveField(
model_name='arquivo',
name='ativo',
),
migrations.RemoveField(
model_name='indice',
name='ativo',
),
migrations.RemoveField(
model_name='resposta',
name='ativo',
),
migrations.RemoveField(
model_name='simulacao',
name='ativo',
),
migrations.RemoveField(
model_name='teste',
name='ativo',
),
migrations.RemoveField(
model_name='texto',
name='ativo',
),
migrations.RemoveField(
model_name='valoraleatorio',
name='ativo',
),
]
| [
"nextlevel.servidores@gmail.com"
] | nextlevel.servidores@gmail.com |
4a569db90d1f01612e008fe7a0aa4dd730cbbd65 | 059686d16aeea5629e306c1215e225e53ba5eebd | /build_index.py | 009c54c5f7291312f6f00e644dd3c55e7e99f572 | [
"MIT"
] | permissive | Raj-S-Singh/Vector-Space-based-Information-Retrieval-System | 2d27467869335d523a8cca4df25b3098764dd77e | a32f5d62c05893a450cdd3a36fb262e5f4843dfd | refs/heads/master | 2023-04-29T07:55:44.811770 | 2021-05-18T18:51:39 | 2021-05-18T18:51:39 | 368,621,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,764 | py |
# Wikipedia files used: AA - wiki_10 to wiki_29 (10 files)
# Group 3
# Lakshya Agarwal 2017B5A70904P
# Anuj Hydrabadi 2017A8PS0420P
# Samarth Gupta 2017B4A70467P
# Raj Shree Singh 2017B4A70808P
# Aditya Vishwakarma 2017B5A70954P
import pandas as pd
import numpy as np
import re
import pickle
import nltk
import os
import timeit
import string
import math
from nltk.tokenize import word_tokenize
from bs4 import BeautifulSoup
from collections import Counter
from collections import defaultdict
from nltk.corpus import wordnet
CHMP_LST_COUNT = 200
WIKIS_FOLDER = './wikis'
PICKLE_FILE = 'index.pickle'
MODIFIED_PICKLE_FILE = 'modified_index.pickle'
# One time download
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
# Instead of only one wiki, added 10 wikis for better results
response = ""
entries = os.listdir(WIKIS_FOLDER)
for entry in entries:
f = open(os.path.join(WIKIS_FOLDER, entry), "r", encoding='utf8')
response = response + f.read().lower()
# print(len(response))
doc_dicts = []
doc_id_ttl_map = {}
idf = dict() # A dictionary is a collection which is unordered, changeable and indexed
trm_freq = {} # Creating dictionary of term frequencies
invtd_idx = {} # Inverted index containing document-wise frequency
chmp_dcs = set()
chmp_lst = {}
# qry_typ = 1
# qry_typ = 0 => No improvement, part-1
# qry_typ = 1 => improvements
"""## Parsing the text"""
def preprocess(qry_typ = 0):
"""
The first step is to create the index of words. First, we parse the content
using BeautifulSoup library and tokenize the raw text.
"""
soup = BeautifulSoup(response, 'html.parser')
TAG_RE = re.compile(r'<[^>]+>')
all_docs = soup.find_all('doc')
# print(all_docs[0])
for doc in all_docs:
doc_cnt = TAG_RE.sub('', ''.join(map(lambda x: str(x), doc.contents)))
# print(doc_cnt)
# break
doc_cnt = doc_cnt.translate(str.maketrans('', '', string.punctuation)) # Remove punctuations from the doc_cnt
# print(doc_cnt)
# break
doc_cnt = doc_cnt.replace("\n", " ") # Remove unnecessary newlines
doc_cnt = ''.join(i for i in doc_cnt if ord(i)<128)
doc_cnt = " ".join(doc_cnt.split())
# print(doc_cnt)
# break
if qry_typ == 1:
doc_cnt = word_tokenize(doc_cnt)
doc_cnt = ' '.join(doc_cnt)
doc_dict = {
'id': doc['id'],
'title': doc['title'],
'url': doc['url'],
'content': doc_cnt
}
doc_id_ttl_map[doc['id']] = doc['title']
doc_dicts.append(doc_dict)
print(len(doc_dicts))
"""## Build the Index - Inverted Index Construction"""
def build_index(qry_typ = 0):
"""
Dictionary data structure is used because of its constant look-up time.
For each document parsed, the tokens are populated in the dictionary
‘trm_freq’ and parallely necessary changes are made in the posting list
to create inverted index, ‘invtd_idx’. The dictionary consists of the term
as the key and the value as another dictionary with document ids where
the term appears as the key along with the frequency of the term in that
document as the value.
"""
for doc_dict in doc_dicts:
print("Building_index for doc_id: {0}".format(doc_dict['id']))
for word in word_tokenize(doc_dict['content']):
if word in trm_freq:
trm_freq[word] = trm_freq[word] + 1
else:
trm_freq[word] = 1
if word in invtd_idx:
pstg_lst = invtd_idx[word]
if doc_dict['id'] in pstg_lst:
pstg_lst[doc_dict['id']] = pstg_lst[doc_dict['id']] + 1
else:
pstg_lst[doc_dict['id']] = 1
else:
invtd_idx[word] = {doc_dict['id']:1}
for word in word_tokenize(doc_dict['title']):
if word in title_trm_freq:
title_trm_freq[word] = title_trm_freq[word] + 1
else:
title_trm_freq[word] = 1
if word in title_invtd_idx:
ttl_pstg_lst = title_invtd_idx[word]
if doc_dict['id'] in ttl_pstg_lst:
ttl_pstg_lst[doc_dict['id']] = ttl_pstg_lst[doc_dict['id']] + 1
else:
ttl_pstg_lst[doc_dict['id']] = 1
else:
title_invtd_idx[word] = {doc_dict['id']:1}
def get_term_document_weights(invtd_idx):
"""
This function uses lnc model for calculating term weights for documents.
‘l’ represents term frequency is considered using a logarithmic model
that is the value of 1 plus logarithm of frequency of occurrence of the
term in the document. ‘n’ represents document frequency is not considered.
‘c’ represents normalisation is done using cosine normalization. idf is
also created here which will be used for querylater in
get_term_weights_for_query().
"""
documents_count = len(doc_dicts)
# print(documents_count)
document_length = defaultdict(int) # Used as normalization factor (Cosine Similarity)
# Defaultdict is a container like dictionaries present in the module collections. Defaultdict is a sub-class of the dict class that returns a dictionary-like object. The functionality of both dictionaries and defualtdict are almost same except for the fact that defualtdict never raises a KeyError
trm_doc_wgt = defaultdict(dict)
for term, pstg_lst in invtd_idx.items():
idf[term] = math.log10(documents_count / len(pstg_lst))
for doc_id, tf in pstg_lst.items():
weight = 1 + math.log10(tf)
trm_doc_wgt[term][doc_id] = weight
document_length[doc_id] += weight ** 2
# Use sqrt of weighted square distance for cosine normalization
for doc_id in document_length:
document_length[doc_id] = math.sqrt(document_length[doc_id])
# normalization
for term in trm_doc_wgt:
for doc_id in trm_doc_wgt[term]:
trm_doc_wgt[term][doc_id] /= document_length[doc_id]
return trm_doc_wgt
def get_trm_ttl_wgt(title_invtd_idx):
"""
Similar to get_term_document_weights
"""
documents_count = len(doc_dicts)
title_length = defaultdict(int) # Used as normalization factor (Cosine Similarity)
# Defaultdict is a container like dictionaries present in the module collections. Defaultdict is a sub-class of the dict class that returns a dictionary-like object. The functionality of both dictionaries and defualtdict are almost same except for the fact that defualtdict never raises a KeyError
trm_ttl_wgt = defaultdict(dict)
for term, pstg_lst in title_invtd_idx.items():
title_idf[term] = math.log10(documents_count / len(pstg_lst))
for doc_id, tf in pstg_lst.items():
weight = 1 + math.log10(tf)
trm_ttl_wgt[term][doc_id] = weight
title_length[doc_id] += weight ** 2
# Use sqrt of weighted square distance for cosine normalization
for doc_id in title_length:
title_length[doc_id] = math.sqrt(title_length[doc_id])
# normalization
for term in trm_ttl_wgt:
for doc_id in trm_ttl_wgt[term]:
trm_ttl_wgt[term][doc_id] /= title_length[doc_id]
return trm_ttl_wgt
def create_chmp_lst():
"""
Creates champion list for each word in the corpus.
Gets the posting list corresponding to each word and get a minimum of
most common top 200 documents or length posting list after sorting them
in reverse order of number of occurrences of word in each doc.
"""
for word in trm_freq:
pstg_lst = invtd_idx[word]
c = Counter(pstg_lst)#Counter is a sub-class which is used to count hashable objects. It implicitly creates a hash table of an iterable when invoked
mc = c.most_common(min(CHMP_LST_COUNT, len(pstg_lst)))
most_common_docs = [i[0] for i in mc]
chmp_lst[word] = most_common_docs
def main():
global invtd_idx
global idf
global title_idf
global trm_freq
global doc_id_ttl_map
global title_invtd_idx
global chmp_lst
global title_trm_freq
idf = dict()
invtd_idx = {}
trm_freq = {}
title_trm_freq={}
doc_id_ttl_map = {}
chmp_lst = {}
title_invtd_idx={}
title_idf = dict()
qry_typ = 0
print("\nBuilding_index for normal search")
preprocess(qry_typ)#giving list of dictionaries
build_index(qry_typ)#term freq and std inverted index
trm_doc_wgt = get_term_document_weights(invtd_idx)
trm_ttl_wgt = get_trm_ttl_wgt(title_invtd_idx)
create_chmp_lst()
pkld_obj = {}
pkld_obj['trm_freq'] = trm_freq
pkld_obj['invtd_idx'] = invtd_idx
pkld_obj['trm_doc_wgt'] = trm_doc_wgt
pkld_obj['title_idf'] = title_idf
pkld_obj['doc_id_ttl_map'] = doc_id_ttl_map
pkld_obj['chmp_lst'] = chmp_lst
pkld_obj['idf'] = idf
pkld_obj['title_invtd_idx'] = title_invtd_idx
pkld_obj['title_trm_freq'] = title_trm_freq
pkld_obj['trm_ttl_wgt'] = trm_ttl_wgt
with open(PICKLE_FILE, 'wb') as f:
pickle.dump(pkld_obj, f)
idf = dict()
invtd_idx = {}
trm_freq = {}
chmp_lst = {}
doc_id_ttl_map = {}
qry_typ = 1
print("\nBuilding_index for improved search")
preprocess(qry_typ)
build_index(qry_typ)
trm_doc_wgt = get_term_document_weights(invtd_idx)
create_chmp_lst()
pkld_obj_modified = {}
pkld_obj_modified['trm_doc_wgt'] = trm_doc_wgt
pkld_obj_modified['idf'] = idf
pkld_obj_modified['chmp_lst'] = chmp_lst
pkld_obj_modified['doc_id_ttl_map'] = doc_id_ttl_map
pkld_obj_modified['title_idf'] = title_idf
pkld_obj_modified['title_invtd_idx'] = title_invtd_idx
pkld_obj_modified['title_trm_freq'] = title_trm_freq
pkld_obj_modified['trm_ttl_wgt'] = trm_ttl_wgt
with open(MODIFIED_PICKLE_FILE, 'wb') as f:
pickle.dump(pkld_obj_modified, f)
print('Index created')
if __name__ == '__main__':
main() | [
"rssingh1999david@gmail.com"
] | rssingh1999david@gmail.com |
c697740729c72361e89fa3f8b66eec1705d07e84 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R3/benchmark/startPyquil348.py | c41069924278a31fe96eac76877e55e4208814cf | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # qubit number=4
# total number=13
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += CNOT(1,0) # number=10
prog += X(0) # number=11
prog += CNOT(1,0) # number=12
prog += X(0) # number=9
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil348.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
1af9746ec4cafd840ab09d82afe8460f8f91246c | 27ece9ab880a0bdba4b2c053eccda94602c716d5 | /.history/tf_regression_logistic_20181130085723.py | 63a5da989d271aad51d7c4b5ba99da863f646db0 | [] | no_license | Symfomany/keras | 85e3ad0530837c00f63e14cee044b6a7d85c37b2 | 6cdb6e93dee86014346515a2017652c615bf9804 | refs/heads/master | 2020-04-08T20:21:35.991753 | 2018-11-30T08:23:36 | 2018-11-30T08:23:36 | 159,695,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,658 | py | import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os, argparse
"""
Any interaction with your filesystem to save persistent data in TF needs a Saver object and a Session object.
The Saver constructor allows you to control many things among which 1 is important:
The var_list: Default to None, this is the list of variables you want to persist to your filesystem.
You can either choose to save all the variables, some variables or even a dictionary to give custom names to your variables.
The Session constructor allows you to control 3 things:
+ The var_list: This is used in case of a distributed architecture to handle computation. You can specify which TF server or ‘target’ you want to compute on.
+ The graph: the graph you want the Session to handle. The tricky thing for beginners is the fact that there is always a default Graph in TF where all operations are set by default, so you are always in a “default Graph scope”.
+ The config: You can use ConfigProto to configure TF. Check the linked source for more details.
The Saver can handle the saving and loading (called restoring) of your Graph metadata and your Variables data.
To do that, it adds operations inside the current Graph that will be evaluated within a session.
By default, the Saver will handle the default Graph and all its included Variables,
but you can create as much Savers as you want to control any graph or subgraph and their variables.
If you look at your folder, it actually creates 3 files per save call and a checkpoint file,
I’ll go into more details about this in the annexe.
You can go on just by understanding that weights are saved into .data files and your graph
and metadata are saved into the .meta file.
Note: You must be careful to use a Saver with a Session linked to the Graph containing all the variables the Saver is handling.😨
To restore a meta checkpoint, use the TF helper import_meta_graph:
import tensorflow as tf
# This function returns a Saver
saver = tf.train.import_meta_graph('results/model.ckpt-1000.meta')
graph = tf.get_default_graph()
# Finally we can retrieve tensors, operations, collections, etc.
global_step_tensor = graph.get_tensor_by_name('loss/global_step:0')
train_op = graph.get_operation_by_name('loss/train_op')
hyperparameters = tf.get_collection('hyperparameters')
Restoring the weights:
with tf.Session() as sess:
# To initialize values with saved data
saver.restore(sess, 'results/model.ckpt.data-1000-00000-of-00001')
print(sess.run(global_step_tensor)) # returns 1000
Using a pre-trained graph in a new graph:
Now that you know how to save and load, you can probably figure out how to do it. Yet, there might be some tricks that could help you go faster.
The good point is that this method simplifies everything: you can load a pre-trained VGG-16,
access any nodes in the graph, plug your own operations and train the whole thing!
If you only want to fine-tune your own nodes, you can stop the gradients anywhere you want,
to avoid training the whole graph.
Files architecture
Getting back to TF, when you save your data the usual way, you end up with 5 different type of files:
+ A “checkpoint” file
+ Some “data” files
+ A “meta” file
+ An “index” file
+ If you use Tensorboard, an “events” file
+ If you dump the human-friendly version: a“textual Protobufs” file
+ The checkckpoint file is just a bookkeeping file that you can use in combination of high-level helper for loading different time saved chkp files.
+ The .meta file holds the compressed Protobufs graph of your model and all the metadata associated (collections, learning rate, operations, etc.)
+ The .index file holds an immutable key-value table linking a serialised tensor name and where to find its data in the chkp.data files
+ The .data files hold the data (weights) itself (this one is usually quite big in size). There can be many data files because they can be sharded and/or created on multiple timesteps while training.
I provide a slightly different version which is simpler and that I found handy. The original freeze_graph function provided by TF is installed in your bin dir and can be called directly if you used PIP to install TF. If not you can call it directly from its folder (see the commented import in the gist).
https://www.tensorflow.org/guide/saved_model
How to use the frozen model
Naturally, after knowing how to freeze a model, one might wonder how to use it.
Wee need to:
+ Import a graph_def ProtoBuf first
+ Load this graph_def into an actual Graph
"""
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_dir, output_node_names):
"""Extract the sub graph defined by the output nodes and convert
all its variables into constant
Args:
model_dir: the root folder containing the checkpoint state file
output_node_names: a string, containing all the output node's names,
comma separated
"""
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
print(absolute_model_dir)
output_graph = absolute_model_dir + "/models/frozen_model.pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
return output_graph_def
def get_dataset():
"""
Method used to generate the dataset
"""
# Numbers of row per class
row_per_class = 100
# Generate rows
sick = np.random.randn(row_per_class, 2) + np.array([-2, -2])
sick_2 = np.random.randn(row_per_class, 2) + np.array([2, 2])
healthy = np.random.randn(row_per_class, 2) + np.array([-2, 2])
healthy_2 = np.random.randn(row_per_class, 2) + np.array([2, -2])
features = np.vstack([sick, sick_2, healthy, healthy_2])
targets = np.concatenate((np.zeros(row_per_class * 2), np.zeros(row_per_class * 2) + 1))
targets = targets.reshape(-1, 1)
return features, targets
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="models", help="Model folder to export")
parser.add_argument("--output_node_names", type=str, default="frozen_model", help="The name of the output nodes, comma separated.")
args = parser.parse_args()
features, targets = get_dataset()
# Plot points
#plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)
#plt.show()
tf_features = tf.placeholder(tf.float32, shape=[None, 2])
tf_targets = tf.placeholder(tf.float32, shape=[None, 1])
# First
w1 = tf.Variable(tf.random_normal([2, 3]))
b1 = tf.Variable(tf.zeros([3]))
# Operations
z1 = tf.matmul(tf_features, w1) + b1
a1 = tf.nn.sigmoid(z1)
# Output neuron
w2 = tf.Variable(tf.random_normal([3, 1]))
b2 = tf.Variable(tf.zeros([1]))
# Operations
z2 = tf.matmul(a1, w2) + b2
py = tf.nn.sigmoid(z2)
cost = tf.reduce_mean(tf.square(py - tf_targets))
correct_prediction = tf.equal(tf.round(py), tf_targets)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for e in range(100):
sess.run(train, feed_dict={
tf_features: features,
tf_targets: targets
})
print("accuracy =", sess.run(accuracy, feed_dict={
tf_features: features,
tf_targets: targets
}))
# We can check easily that we are indeed in the default graph
print(z1.graph == tf.get_default_graph())
# By default, the Saver handles every Variables related to the default graph
all_saver = tf.train.Saver()
all_saver.save(sess, args.model_dir + '/models')
#save a checkpoint file, which will store the above assignment
tf.saved_model.simple_save(sess,"models/model.ckpt",
inputs={
"features_data": tf_features,
}, outputs={
"targets_data": tf_targets
})
#freeze_graph(args.model_dir, args.output_node_names)
| [
"julien@meetserious.com"
] | julien@meetserious.com |
dbc1bede0bc2172802ac379f9150af11a08b2ccd | 4d0ddcfdf6c790fa2ce3d0402e02fc771348c472 | /power recur.py | 47626be07569432e931009edb25980b81ee3cae1 | [] | no_license | shaikhjawad94/MITx-6.00.1x | 2106010595d0c4bde7fdf81d2c32a50d2f9d80fb | a14e8d864ea7c9decfef0391dd2e319aafc6b8f2 | refs/heads/main | 2023-06-15T14:19:45.550876 | 2021-07-02T22:32:26 | 2021-07-02T22:32:26 | 382,181,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | #Write a function recurPower(base, exp) which computes base^exp by recursively calling itself to solve a smaller version of the same problem,
#and then multiplying the result by base to solve the initial problem.
#This function should take in two values - base can be a float or an integer; exp will be an integer >= 0.
#It should return one numerical value. Your code must be recursive - use of the ** operator or looping constructs is not allowed.
def recurPower(base, exp):
'''
base: int or float.
exp: int >= 0
returns: int or float, base^exp
'''
if exp == 1:
return base
elif exp == 0:
return 1
else:
return base*recurPower(base, exp-1)
| [
"noreply@github.com"
] | shaikhjawad94.noreply@github.com |
63c5242d62ac92687e767228024604acd3a0151f | 8d67510844545dfe97689941083c792e16c4f6dc | /src/preprocessing/00_7_dataScope.py | 179a3d3cf23ac967accc33c0405db458e5260b94 | [] | no_license | kyungeuuun/trafficPrediction | 40bd8be1f4d32ebf71e58b5c1f0b13469c6b50f9 | a7cae1dce098ac58336d07cdeeebd0cce1affd5d | refs/heads/master | 2020-04-08T22:01:41.025785 | 2018-11-30T04:47:55 | 2018-11-30T04:47:55 | 159,768,317 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
links = np.load('data/all_links_afterFiltering2.npy')
spd = np.load('data/spd_interpolated_ignoreErrTimes.npy').item()
spd_array = np.load('data/spdArray.npy')
coords = pd.read_csv('rawdata/LINK_VERTEX_seoulonly.csv', index_col=0)
coords = coords[coords['LINK_ID'].isin(links)]
info = pd.read_csv('rawdata/link_information.csv')
info = info[info['LINK_ID'].isin(links)]
# dt = info['LINK_ID'].values
# for l in links:
# if l not in dt:
# print l
# all data is in info file
p = coords[(coords['GRS80TM_X'] > 203000) & (coords['GRS80TM_X'] < 210000) & (coords['GRS80TM_Y'] > 442500) & (coords['GRS80TM_Y'] < 449500)]
plt.figure(figsize=(10,10))
plt.scatter(p['GRS80TM_X'], p['GRS80TM_Y'], s=1)
plt.savefig('images/00_7_dataScope_gangnam_cx1.png', dpi=244)
# plt.show()
print len(np.unique(p['LINK_ID'])) #457
cx1_links = np.unique(p['LINK_ID'])
np.save('data/linkIds_cx1.npy', cx1_links)
p = coords[(coords['GRS80TM_X'] > 187000) & (coords['GRS80TM_X'] < 194000) & (coords['GRS80TM_Y'] > 444000) & (coords['GRS80TM_Y'] < 451000)]
plt.figure(figsize=(10,10))
plt.scatter(p['GRS80TM_X'], p['GRS80TM_Y'], s=1)
plt.savefig('images/00_7_dataScope_guro_cx2.png', dpi=244)
# plt.show()
print len(np.unique(p['LINK_ID'])) #589
cx2_links = np.unique(p['LINK_ID'])
np.save('data/linkIds_cx2.npy', cx2_links) | [
"keun415@gmail.com"
] | keun415@gmail.com |
a65a59dbf1af8f08123c5f31484ad27f1cdb164c | b004a0d561ab1d7b3a20336c4dd561ea5d607bdd | /python snippets/py_json.py | 6bdb313ce37aa6dde4cffb7a02619e9db7fd3691 | [] | no_license | Md-Mudassir/PracticeSnippets | e878ba1861f155ff2cc2093935b5ddea6cfc4c9f | d60bd2d454aa5cedea6e071057e3984efad06434 | refs/heads/master | 2021-07-06T00:06:57.176732 | 2020-12-06T17:56:58 | 2020-12-06T17:56:58 | 206,281,660 | 0 | 1 | null | 2019-10-29T13:35:40 | 2019-09-04T09:24:45 | JavaScript | UTF-8 | Python | false | false | 247 | py | # JSON is commonly used with data APIS. Here how we can parse JSON into a Python dictionary
import json
# sample
userJSON = '{"first_name": "John", "last_name": "Doe", "age": 30}'
user = json.loads(userJSON)
print(user)
print(user['last_name'])
| [
"mdmudassirpro@gmail.com"
] | mdmudassirpro@gmail.com |
66c35ef831aaa59121f0b9b48d719fee7b050b34 | 078686dd88ff399cb3f9f773d237a7b18adf513a | /fund_crawl.py | 2e11bb1c2315571f53e2f78a3e04f58a7555f55c | [] | no_license | kh7160/lotto | b1995bb9488a02f9c0656779cb6bb118aa1d66b0 | 9c6b764bcc7244729d8ad39637de3d029f8f4b26 | refs/heads/master | 2023-02-28T00:12:27.295284 | 2021-02-01T10:49:20 | 2021-02-01T10:49:20 | 334,917,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import requests
from bs4 import BeautifulSoup
import fund_parse
url = 'https://dhlottery.co.kr/common.do?method=main'
resp = requests.get(url)
soup = BeautifulSoup(resp.text, 'html.parser')
# 7 num crawling
group = soup.select('.group .num span')
group = group[0].text
num = []
num.append(int(soup.find_all('span', {'class' : 'num al720_color1'})[0].text))
num.append(int(soup.find_all('span', {'class' : 'num al720_color2'})[0].text))
num.append(int(soup.find_all('span', {'class' : 'num al720_color3'})[0].text))
num.append(int(soup.find_all('span', {'class' : 'num al720_color4'})[0].text))
num.append(int(soup.find_all('span', {'class' : 'num al720_color5'})[0].text))
num.append(int(soup.find_all('span', {'class' : 'num al720_color6'})[0].text))
# mysql update
fund_parse.fund_update_group(group)
fund_parse.fund_update_number(num) | [
"kh7160@naver.com"
] | kh7160@naver.com |
87372704d860ee5b12915a2510e6eeb65b6a7399 | c18ffe72aca00d1a55d47cefa7f2f784c138896e | /faster-rcnn_vgg16/model/region_proposal_network.py | fe66bb27bd310d7527b7c75c16492afb62c4dfc5 | [
"MIT"
] | permissive | fengkaibit/faster-rcnn_vgg16 | 0438ab568dbcff31e5f86381f9bbf4b131e60684 | 8d1709bd170d69115041ca76bd38c8514cdb8fa7 | refs/heads/master | 2020-05-17T23:13:33.315214 | 2019-04-29T08:05:47 | 2019-04-29T08:05:47 | 184,023,750 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,559 | py | import numpy as np
from torch.nn import functional
import torch
from model.utils.bbox_tools import generate_anchor_base
from model.utils.creator_tool import ProposalCreator
class RegionProposalNetwork(torch.nn.Module):
def __init__(
self, in_channels=512, mid_channels=512, ratios=[0.5,1,2],
anchor_scales=[8,16,32], feat_stride=16,
proposal_creator_params=dict()):
super(RegionProposalNetwork, self).__init__()
self.anchor_base = generate_anchor_base(anchor_scales=anchor_scales,ratios=ratios)
self.feat_stride = feat_stride
self.proposal_layer = ProposalCreator(self, **proposal_creator_params)
num_anchor_base = self.anchor_base.shape[0] #9
self.conv1 = torch.nn.Conv2d(in_channels, mid_channels, 3, 1, 1)
self.score = torch.nn.Conv2d(mid_channels, num_anchor_base*2, 1, 1, 0) #二分类,obj or nobj
self.loc = torch.nn.Conv2d(mid_channels, num_anchor_base*4, 1, 1, 0) #坐标回归
normal_init(self.conv1, 0, 0.01)
normal_init(self.score, 0, 0.01)
normal_init(self.loc, 0, 0.01)
def forward(self, x, img_size, scale=1.):
n, _, hh, ww = x.shape # x为feature map, n为batch_size,此版本代码为1. _为512, hh, ww即为特征图宽高
anchor = _enumerate_shifted_anchor(
np.array(self.anchor_base), self.feat_stride, hh, ww)
num_anchor = anchor.shape[0] // (hh * ww) #
h = functional.relu(self.conv1(x)) #(batch_size, 512, hh, ww)
rpn_locs = self.loc(h) #(batch_size, 9*4, hh, ww)
rpn_locs = rpn_locs.permute(0, 2, 3, 1).contiguous().view(n,-1, 4) #转换为(batch_size,hh, ww, 9*4)在转换为(batch_size, hh*ww*9, 4)
rpn_scores = self.score(h)
rpn_scores = rpn_scores.permute(0, 2, 3, 1).contiguous() #转换为(batch_size,hh, ww, 9*2)
rpn_softmax_scores = functional.softmax(rpn_scores.view(n, hh, ww, num_anchor, 2), dim=4) #TODO 维度问题
rpn_fg_scores = rpn_softmax_scores[:, :, :, :, 1].contiguous() #得到前景的分类概率
rpn_fg_scores = rpn_fg_scores.view(n, -1) #得到所有anchor的前景分类概率
rpn_scores = rpn_scores.view(n, -1, 2)
rois = list()
roi_indices = list()
for i in range(n):
roi = self.proposal_layer(
rpn_locs[i].cpu().data.numpy(),
rpn_fg_scores[i].cpu().data.numpy(),
anchor, img_size, scale=scale)
#rpn_locs维度(hh * ww * 9,4),rpn_fg_scores维度为(hh * ww * 9),
#anchor的维度为(hh * ww * 9,4), img_size的维度为(3,H,W),H和W是经过数据预处理后的。
#计算(H / 16)x( W / 16)x9(大概20000)
#个anchor属于前景的概率,取前12000个并经过NMS得到2000个近似目标框G ^ 的坐标。roi的维度为(2000, 4)
batch_index = i * np.ones((len(roi),),dtype=np.int32) #(len(roi), )
rois.append(roi)
roi_indices.append(batch_index) #记录roi的batch批次
rois = np.concatenate(rois,axis=0) #按列排所有的roi, rois格式(R, 4),R为所有batch的roi数量
roi_indices = np.concatenate(roi_indices, axis=0) #按列排所有roi的批次编号,格式同rois
# rpn_locs的维度(hh*ww*9,4),rpn_scores维度为(hh*ww*9,2),
# rois的维度为(2000,4),roi_indices用不到(因为此代码训练时batch为1),anchor的维度为(hh*ww*9,4)
return rpn_locs, rpn_scores, rois, roi_indices, anchor
def normal_init(m, mean, stddev, truncated=False):
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) #截断产生正态分布
else:
m.weight.data.normal_(mean, stddev) #普通产生正态分布
m.bias.data.zero_()
def _enumerate_shifted_anchor(anchor_base, feat_stride, height, width):
shift_y = np.arange(0, height * feat_stride, feat_stride)
shift_x = np.arange(0, width * feat_stride, feat_stride)
shift_x, shift_y = np.meshgrid(shift_x, shift_y) #产生x,y坐标网格
shift = np.stack((shift_y.ravel(), shift_x.ravel(),
shift_y.ravel(), shift_x.ravel()), axis=1) #产生坐标偏移矩阵(w*h, 4)
A = anchor_base.shape[0] #特征图上每一个点产生anchor数目,9
K = shift.shape[0] #坐标偏移矩阵行数(即特征图的像素点个数, w*h)
#(1, A ,4) + (K, 1, 4) = (K, A, 4)
anchor = anchor_base.reshape(1, A, 4) + shift.reshape((1, K, 4)).transpose((1,0,2))
anchor = anchor.reshape((K * A, 4)).astype(np.float32) #修改尺寸为(K * A, 4)
return anchor
def _enumerate_shifted_anchor_torch(anchor_base, feat_stride, height, width):
shift_y = torch.arange(0, height * feat_stride, feat_stride)
shift_x = torch.arange(0, width * feat_stride, feat_stride)
shift_x, shift_y = np.meshgrid(shift_x, shift_y) #产生x,y坐标网格
shift = np.stack((shift_y.ravel(), shift_x.ravel(),
shift_y.ravel(), shift_x.ravel()), axis=1) #产生坐标偏移矩阵(w*h, 4)
A = anchor_base.shape[0] #特征图上每一个点产生anchor数目,9
K = shift.shape[0] #坐标偏移矩阵行数(即特征图的像素点个数, w*h)
#(1, A ,4) + (K, 1, 4) = (K, A, 4)
anchor = anchor_base.reshape(1, A, 4) + shift.reshape((1, K, 4)).transpose((1,0,2))
anchor = anchor.reshape((K * A, 4)).astype(np.float32) #修改尺寸为(K * A, 4)
return anchor
| [
"noreply@github.com"
] | fengkaibit.noreply@github.com |
13a1f0df061c5cda379d81c09de77a221a6b7deb | 2a23bb3d1372dd69dedb1964ab23d2ac7e4588a0 | /src/CG_AMSREMOVALDATE.py | 09b5517cfa2f8bad2679601418dc9e0f22e9ba79 | [] | no_license | git786hub/Dynatrace_python | 6fa8e15963e54d8d89a4bb56e247f2e4c62f8823 | cffe77be8712947c873d4857970c16aee4a04711 | refs/heads/master | 2020-05-31T19:13:56.510436 | 2019-06-05T19:12:26 | 2019-06-05T19:12:26 | 190,453,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | # AUTOSCRIPT NAME: CG_AMSREMOVALDATE
# CREATEDDATE: 2015-12-18 22:29:45
# CREATEDBY: U03V
# CHANGEDATE: 2016-01-25 12:30:25
# CHANGEBY: UFAP
# SCRIPTLANGUAGE: jython
# STATUS: Draft
from psdi.app.asset import AssetRemote
from psdi.mbo import MboConstants
from java.util import Date
from psdi.mbo import SqlFormat
if interactive:
if isinstance(mbo,AssetRemote):
assettype= mbo.getString("ASSETTYPE")
assetStatus = mbo.getString("STATUS")
if assetStatus == "AD" and (assettype== "C" or assettype== "O" or assettype == "T" or assettype== "U" or assettype == "V"):
location = mbo.getString("LOCATION")
print "@@@@@@@@@@@@@@@@@@"
locMboSet = mbo.getMboSet("LOCATION")
locType = locMboSet.getMbo(0).getString("TYPE")
if locType !="PREMISE":
mbo.setValue("EQ23", Date(), MboConstants.NOACCESSCHECK | MboConstants.NOVALIDATION_AND_NOACTION) | [
"Pulkit.Agarwal@oncor.com"
] | Pulkit.Agarwal@oncor.com |
a7bdc47dcee65f2f36d2fbd29a75e05ed9618340 | 02f03f54c4acd517e5c10757540eeca453815980 | /app.py | c3b576a85127550f779eece457ab8b9ae2807ca2 | [] | no_license | eddiexunyc/web_scraping_challenge | a33cdcdd0698148d4e92f44598950d06d483c7bc | b94dc1e7f03712012fda07b1737b2933529e5596 | refs/heads/main | 2023-01-19T16:50:59.791356 | 2020-11-23T14:44:59 | 2020-11-23T14:44:59 | 308,758,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
mar_dicts = mongo.db.mar_dicts.find_one()
return render_template("index.html", mars = mar_dicts)
@app.route("/scrape")
def scrape():
mar_dicts = mongo.db.mar_dicts
mars_info = scrape_mars.scrape()
mar_dicts.update({}, mars_info, upsert = True)
return redirect("/", code = 302)
if __name__ == "__main__":
app.run(debug=True) | [
"eddie.xu.nyc@gmail.com"
] | eddie.xu.nyc@gmail.com |
b93044e9ad964479e2545705a211c20474cf51be | b8c8fb24086151d8cc021020b1967cb4783358ba | /rprecorder/cli/track.py | 1c1bd6b5b85e9aaf1be46e620cde68a7facdc663 | [
"BSD-2-Clause"
] | permissive | sniner/rp-recorder | d78bd27ffe7a3b53982e98db42a8156c7125ca22 | 4266c6533c3076d7f903f5dea6b43d3797ef62cc | refs/heads/main | 2023-08-15T02:08:01.539439 | 2021-09-28T19:00:13 | 2021-09-28T19:00:13 | 411,406,075 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,396 | py | #!/usr/bin/python3
import json
import logging
import signal
import sqlite3
import sys
import time
import threading
from datetime import datetime
import requests
logger = logging.getLogger(__name__)
# Radio Paradise API:
#
# * https://github.com/marco79cgn/radio-paradise
# * http://moodeaudio.org/forum/showthread.php?tid=2172&page=2
class RPTrackDatabase:
def __init__(self, path=None):
self.connection = None
self.path = path
self.lock = threading.Lock()
if path:
self._open()
self._setup()
def _open(self):
self.connection = sqlite3.connect(self.path, check_same_thread=False)
self.connection.row_factory = sqlite3.Row
def _close(self):
if self.connection:
self.connection.close()
self.connection = None
def _setup(self):
self.connection.execute("""
CREATE TABLE IF NOT EXISTS tracks
(track INTEGER PRIMARY KEY,
artist TEXT NOT NULL,
title TEXT NOT NULL,
album TEXT NOT NULL,
year INTEGER NOT NULL,
cover TEXT,
UNIQUE (artist, title, album, year) ON CONFLICT IGNORE)
""")
self.connection.execute("CREATE UNIQUE INDEX IF NOT EXISTS idx_tracks_1 ON tracks(track)")
# self.connection.execute("CREATE UNIQUE INDEX IF NOT EXISTS idx_tracks_2 ON tracks(artist, title, album, year)")
self.connection.execute("""
CREATE TABLE IF NOT EXISTS playlists
(time TEXT NOT NULL, channel INTEGER NOT NULL, track INTEGER NOT NULL)
""")
self.connection.execute("CREATE INDEX IF NOT EXISTS idx_playlists_1 ON playlists(time, channel)")
self.connection.execute("""
CREATE TABLE IF NOT EXISTS played
(channel INTEGER NOT NULL,
track INTEGER NOT NULL,
PRIMARY KEY (channel, track) ON CONFLICT IGNORE)
WITHOUT ROWID
""")
# self.connection.execute("CREATE UNIQUE INDEX IF NOT EXISTS idx_played_1 ON played(channel, track)")
self.connection.execute("""
CREATE TABLE IF NOT EXISTS channels
(channel INTEGER PRIMARY KEY,
name TEXT NOT NULL,
UNIQUE (channel, name) ON CONFLICT IGNORE)
""")
self.connection.execute("CREATE UNIQUE INDEX IF NOT EXISTS idx_channels_1 ON channels(channel)")
with self.connection:
self.connection.execute("""
INSERT OR IGNORE INTO channels(channel, name) VALUES
(0, 'Main'),
(1, 'Mellow'),
(2, 'Rock'),
(3, 'World/Etc')
""")
def row_to_dict(self, row:sqlite3.Row) -> dict:
return {k:row[k] for k in row.keys()}
def open(self, path):
self._close()
self.path = path
self._open()
self._setup()
return self
def close(self):
self._close()
return self
def query(self, statement, *args):
with self.lock:
return self.connection.execute(statement, *args)
def commit(self, statement, *args):
with self.lock:
with self.connection:
self.connection.execute(statement, *args)
def get_track(self, artist, title, album, year):
return self.query(
"SELECT * FROM tracks WHERE artist=? AND title=? AND album=? AND year=?",
(artist, title, album, year)
).fetchone()
def get_tracks(self, channel:int=None):
if channel:
return self.query(
"SELECT t.* FROM tracks AS t INNER JOIN played AS p ON p.track=t.track WHERE p.channel=?",
(channel,)
).fetchall()
else:
return self.query("SELECT * FROM tracks").fetchall()
def add_track(self, artist:str, title:str, album:str, year:int, cover:str=""):
self.commit(
"INSERT OR IGNORE INTO tracks(artist, title, album, year, cover) VALUES (?, ?, ?, ?, ?)",
(artist, title, album, year, cover),
)
def add_played(self, channel:int, track:int):
self.commit(
"INSERT OR IGNORE INTO played(channel, track) VALUES (?, ?)",
(channel, track)
)
def set_cover(self, track:int, cover:str):
self.commit("UPDATE tracks SET cover=? WHERE track=?", (cover, track))
def add_to_playlist(self, channel:int, track:int):
row = self.query(
"SELECT track FROM playlists WHERE channel=? ORDER BY time DESC LIMIT 1",
(channel,)
).fetchone()
if row is None or row["track"]!=track:
self.commit(
"INSERT OR IGNORE INTO playlists(time, channel, track) VALUES (?, ?, ?)",
(datetime.now().isoformat(), channel, track)
)
class RPTrackRecorder:
def __init__(self, db, channel, lock=None):
self.db = db
self.channel = channel or 0
self.active = True
def stop(self):
self.active = False
def _api_url(self, n):
return f"https://api.radioparadise.com/api/now_playing?chan={n}"
def _track_playing(self, wait=30):
url = self._api_url(self.channel)
current = {}
while self.active:
r = requests.get(url)
if r.status_code==200:
playing = json.loads(r.text)
if "time" in playing:
try:
duration = max(5, int(playing["time"])+1)
except:
duration = wait
del playing["time"]
else:
duration = wait
if "year" in playing:
try:
playing["year"] = int(playing["year"])
except:
playing["year"] = 0
playing["channel"] = self.channel
if playing!=current:
current = playing
yield current
logger.info(f"Channel {self.channel}: Waiting for {duration} seconds")
time.sleep(duration)
def _record(self):
for track in self._track_playing():
row = self.db.get_track(track['artist'], track['title'], track['album'], track['year'])
if row is None:
self.db.add_track(track["artist"], track["title"], track["album"], track["year"], track["cover"])
row = self.db.get_track(track['artist'], track['title'], track['album'], track['year'])
if row:
track_id, cover_url = row["track"], row["cover"]
logger.info(f"Channel {self.channel}: Now playing: #{track_id}: {dict(row)}")
self.db.add_played(self.channel, track_id)
if cover_url!=track["cover"]:
logger.info(f"Channel {self.channel}: Updating cover url on {track_id}")
self.db.set_cover(track_id, cover_url)
self.db.add_to_playlist(self.channel, track_id)
else:
logger.warning(f"Channel {self.channel}: Unable to retrieve/insert title record")
def record(self):
logger.info(f"Channel {self.channel}: tracking started")
self.active = True
while self.active:
try:
self._record()
except Exception as exc:
logger.error(f"Channel {self.channel}: Exception occured: {exc}", exc_info=True)
time.sleep(10)
logger.info(f"Channel {self.channel}: tracking stopped")
def record(path:str, daemon:bool=True):
db = RPTrackDatabase(path)
tracker = [RPTrackRecorder(db, channel) for channel in range(4)]
threads = [threading.Thread(target=t.record, daemon=daemon) for t in tracker]
for t in threads:
t.start()
return db, tracker, threads
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s -- %(message)s")
db, _tracker, _threads = record("rp_tracks.db")
def signal_handler(sig, frame):
logger.warning("Signal received, stopping now!")
db.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.pause()
if __name__=="__main__":
main()
# vim: set et sw=4 ts=4 ft=python:
| [
"mail@sniner.dev"
] | mail@sniner.dev |
ddff08d9864dfe1076ecf400d73e63b3b20a37df | 1a663b69c47ac56c38aed5704fc403df82b48491 | /teafacto/scripts/theanowrap.py | 331e4da792e0cef33af941df2b7e907443d1db42 | [
"MIT"
] | permissive | lukovnikov/teafacto | 9c0dda1dbb1abbcff795097a3522178ad5395852 | 5e863df8d061106ad705c0837f2d2ca4e08db0e4 | refs/heads/master | 2020-04-04T05:53:56.616520 | 2017-02-08T21:03:17 | 2017-02-08T21:03:17 | 46,288,607 | 2 | 5 | null | 2016-04-13T12:25:47 | 2015-11-16T16:52:23 | Python | UTF-8 | Python | false | false | 710 | py | from teafacto.core.base import tensorops as T, Val, param
import numpy as np
import sys
x = Val(np.random.random((10,10)))
#y = Val(np.random.random((10,10)))
y = param((10, 10), name="y").uniform()
w = param((10, 10), name="w").uniform()
#z = T.dot(x, y)
z = (x + y)
u = z * w
s = T.nnet.sigmoid
s2 = T.nnet.sigmoid
print s == s2
sys.exit()
print z.allparams
print T.dot
print z.ndim
print z.dimswap
zd = z.dimswap(1,0)
print z.dimswap(0, 1).allparams
print y.dimswap(0, 1).allparams
print T.nnet.conv.conv2d
print u.norm(2).allparams
print u.dimswap(0, 1).allparams
print T.nnet.softmax(z).allparams
zs = T.nnet.sigmoid(z)
zs = zs + x
zs.autobuild()
zs.autobuild()
us = T.nnet.sigmoid(u)
print us.allparams | [
"lukovnikov@outlook.com"
] | lukovnikov@outlook.com |
037565994254c8953f954ce3a3106794d5570f56 | e00d41c9f4045b6c6f36c0494f92cad2bec771e2 | /hardware/graphics/intel-media-sdk/actions.py | db40590dc372fba8b36c3ecadcf4cca3946cc0a2 | [] | no_license | pisilinux/main | c40093a5ec9275c771eb5fb47a323e308440efef | bfe45a2e84ea43608e77fb9ffad1bf9850048f02 | refs/heads/master | 2023-08-19T00:17:14.685830 | 2023-08-18T20:06:02 | 2023-08-18T20:06:02 | 37,426,721 | 94 | 295 | null | 2023-09-14T08:22:22 | 2015-06-14T19:38:36 | Python | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file https://www.gnu.org/licenses/gpl-3.0.txt
from pisi.actionsapi import cmaketools, mesontools
j = ''.join([
' -DCMAKE_BUILD_TYPE=Release',
' -DBUILD_TOOLS=ON',
' -DENABLE_X11_DRI3=ON',
' -DENABLE_OPENCL=ON',
' -DENABLE_WAYLAND=ON',
' -B_build -G Ninja -L '
])
def setup():
cmaketools.configure(j)
def build():
mesontools.build("-C _build")
def install():
mesontools.install("-C _build")
| [
"uglyside@yandex.ru"
] | uglyside@yandex.ru |
bec8e911e2a334b2cc574ae87877c2355faa1f17 | 5270649aba7f75bfc3ea759db33e095f7e29b191 | /lab2/q2/ball.py | f873e92690eac795aab52db77548e3c829e134b0 | [] | no_license | iamnidheesh/Artificial-Life-Simulation-Assignment | 4a028231ebd108d4f4b91ca156adfce0a35f50d2 | e787aefe8b6b77fcc581f93f39eec980363fb9cf | refs/heads/master | 2020-04-30T06:40:01.054549 | 2019-03-20T05:14:44 | 2019-03-20T05:14:44 | 176,658,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py |
from agents import agentList
import math
class ball :
def assignColor(self,t) :
if(t == 1) :
return 'black'
elif(t == 2) :
return 'red'
else:
return 'grey'
def attraction(self,other,constant) :
pos = self.canvas.coords(self.shape)
posH = self.canvas.coords(other.shape)
dis = math.sqrt((posH[0]-pos[0])**2 + (posH[1]-pos[1])**2 )
xdis = posH[0]-pos[0]
ydis = posH[1]-pos[1]
return (constant*dis*xdis,constant*dis*ydis)
def repulsion(self,other,constant) :
pos = self.canvas.coords(self.shape)
posH = self.canvas.coords(other.shape)
dis = math.sqrt((posH[0]-pos[0])**2 + (posH[1]-pos[1])**2 )
xdis = posH[0]-pos[0]
ydis = posH[1]-pos[1]
return (-constant*xdis/(1 + dis*dis*dis),-constant*ydis/(1 + dis*dis*dis))
def __init__(self,startx,starty,size,canvas,velocity,t) :
self.startx = startx
self.starty = starty
self.canvas = canvas
self.velocity = velocity
self.angle = 0
self.size = size
self.t = t
self.shape = canvas.create_oval(startx,starty,startx+size,starty+size,fill = self.assignColor(self.t))
def agentFun(self) :
xdis = 0
ydis = 0
posForce = 0
negForce = 0
#pos = self.canvas.coords(self.oval)
for i in agentList :
if(i == self) :
continue
if(i.t == self.t) :
posForce = self.attraction(i,10**-4) + self.repulsion(i,10**-7)
else :
negForce = self.repulsion(i,10**-4) + self.attraction(i,10**-7)
xdis = posForce[0] + negForce[0]
ydis = posForce[1] + negForce[1]
self.angle = math.atan2(xdis,ydis)
self.canvas.move(self.shape,xdis,ydis)
| [
"nidheeshpandey@gmail.com"
] | nidheeshpandey@gmail.com |
8d68ed2fc5394b6fea01b046cd15e38fca6c21f2 | 8745739cba807efe822814f8ffbf454e98f81000 | /Auth_App/migrations/0001_initial.py | efcfeed595945165d63f296b094321e20050b515 | [] | no_license | MehediHasan96/Account | 01e336a5c46fb96fc2fc09c8426868c0101c0a6d | 359fe5b1642123b4ac11f13541019e988a026bdc | refs/heads/main | 2023-07-09T16:30:35.741082 | 2021-08-23T05:43:56 | 2021-08-23T05:43:56 | 398,984,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,912 | py | # Generated by Django 3.2.6 on 2021-08-22 17:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log in this site', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts', verbose_name='active')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(blank=True, max_length=264)),
('full_name', models.CharField(blank=True, max_length=264)),
('address_1', models.TextField(blank=True, max_length=300)),
('city', models.CharField(blank=True, max_length=40)),
('zipcode', models.CharField(blank=True, max_length=10)),
('country', models.CharField(blank=True, max_length=50)),
('phone', models.CharField(blank=True, max_length=20)),
('date_joined', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"mehedicse96@gmail.com"
] | mehedicse96@gmail.com |
59c6f29c6c88c672ad008ad803c796881d0de0c6 | 938a089e9b5e876a3b48932274171da7a4e7aa42 | /bench/genesys2.py | 2332f797a6bd9cebe7f8ad88338e320f41377567 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | rprinz08/liteeth | aa94e0eb790ba571ea59e98697d11300a57b3d03 | dc10f82753efd236e1811a72c4be2c27cefd2c68 | refs/heads/master | 2023-07-18T17:17:06.441779 | 2021-09-10T08:06:47 | 2021-09-10T08:06:47 | 260,763,015 | 0 | 0 | NOASSERTION | 2020-05-02T19:47:32 | 2020-05-02T19:47:32 | null | UTF-8 | Python | false | false | 2,680 | py | #!/usr/bin/env python3
#
# This file is part of LiteEth.
#
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
from migen import *
from litex_boards.platforms import genesys2
from litex_boards.targets.genesys2 import _CRG
from litex.soc.cores.clock import *
from litex.soc.interconnect.csr import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from liteeth.phy.s7rgmii import LiteEthPHYRGMII
# Bench SoC ----------------------------------------------------------------------------------------
class BenchSoC(SoCCore):
def __init__(self, sys_clk_freq=int(50e6)):
platform = genesys2.Platform()
# SoCMini ----------------------------------------------------------------------------------
SoCMini.__init__(self, platform, clk_freq=sys_clk_freq,
ident = "LiteEth bench on Genesys2",
ident_version = True
)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# Etherbone --------------------------------------------------------------------------------
self.submodules.ethphy = LiteEthPHYRGMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"),
with_hw_init_reset = False)
self.add_etherbone(phy=self.ethphy, buffer_depth=255)
# SRAM -------------------------------------------------------------------------------------
self.add_ram("sram", 0x20000000, 0x1000)
# Leds -------------------------------------------------------------------------------------
from litex.soc.cores.led import LedChaser
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Main ---------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteEth Bench on Genesys2")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
args = parser.parse_args()
soc = BenchSoC()
builder = Builder(soc, csr_csv="csr.csv")
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
6387f24c6cee7a4d44c898fadc2886bc1358fc85 | cb3d1b072391b07ef0e9596df7f223f37683e970 | /[0333]_Largest_BST_Subtree/Largest_BST_Subtree.py | 20ac486fdae272035ca2cdb53f05e32e45ab550b | [] | no_license | kotori233/LeetCode | 99620255a64c898457901602de5db150bc35aabb | 996f9fcd26326db9b8f49078d9454fffb908cafe | refs/heads/master | 2021-09-10T18:00:56.968949 | 2018-03-30T14:38:27 | 2018-03-30T14:38:27 | 103,036,334 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def largestBSTSubtree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.res = 0
def dfs(root):
if root is None:
return (0, float('-inf'), float('inf'))
left = dfs(root.left)
right = dfs(root.right)
if root.val > left[1] and root.val < right[2]:
temp = left[0] + right[0] + 1
self.res = max(temp, self.res)
return (temp, max(root.val, right[1]), min(root.val, left[2]))
return (0, float('-inf'), float('inf'))
dfs(root)
return self.res
| [
"cycycy3333@163.com"
] | cycycy3333@163.com |
ead9ad64b8365540807e891bd0144812da5bfef0 | c856b321e7f2cb7da64df1a86c5f254dfd968548 | /tests/testCallSocketio.py | 48805e46c37eeb8ad89af148bafbde2ad0c7c951 | [] | no_license | tunchunairarko/hwu_animus_robot | e8213d0d02061b58c1f8a4afb3f2afb8746a19c6 | 29cbcd8a9960787854f3489e492e2715db88b72c | refs/heads/main | 2023-08-14T00:48:53.152991 | 2021-09-15T08:02:46 | 2021-09-15T08:02:46 | 340,909,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | import socketio
print(socketio.__version__)
sio = socketio.Client()
sio.connect('https://hwu-telepresence-room.herokuapp.com')
# if(sio.connected):
# print("*****************YES*****************")
# else:
# print("*****************NO*******************")
@sio.event
def connect():
print('connected to server')
sr = sio.sockets.adapter.rooms["9dd0ee98-d035-4e20-a95b-65c117b95a59"]
@sio.event
def disconnect():
print('disconnected from server')
def main():
print("destroy")
if __name__=='__main__':
main()
| [
"tunchunairarko@gmail.com"
] | tunchunairarko@gmail.com |
e37d054c46e1a06d4dda2b789a93aff3e2ccb1a1 | 4a0765fb810f64030cd3a6a356bdb34fbeae20aa | /models/store.py | 172c43fe0bd9e669f243875e0a267d9ab8c7f34d | [] | no_license | NynkeVollema/stores-rest-api | 28ae30738914e83918fdaac633cdf2723253ac76 | 88e91f75f43c3e3f130de909fc29cb69c934fc45 | refs/heads/master | 2021-01-03T01:39:46.748412 | 2020-02-16T15:40:29 | 2020-02-16T15:40:29 | 239,862,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | from db import db
class StoreModel(db.Model):
__tablename__ = "stores"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
# lazy="dynamic" means that the item list is not (yet) retrieved from the database if the store
# is created. Otherwise, the script would make an object for each item in the database, which
# can be quite expensive when a lot of items are present.
items = db.relationship("ItemModel", lazy="dynamic")
def __init__(self, name):
self.name = name
def json(self):
# self.items = query builder because lazy is set to "dynamic"
# .all() is needed to look into the table and retrieve a list of items
# this means that the json method is slower (but creating a store is faster)
return {"name": self.name, "items": [item.json() for item in self.items.all()]}
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first() # SQL: SELECT * FROM items WHERE name=name LIMIT 1
def save_to_db(self):
db.session.add(self) # inserts or, if it already exists, updates the object "self"
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
| [
"nynke.vollema@rhdhv.com"
] | nynke.vollema@rhdhv.com |
dbc0c27afd8670f2879a4311628eb29b3134a236 | b0ba1585b2f65b2ba0ce98d4186d1d33b91f27cb | /src/containerapp/azext_containerapp/containerapp_decorator.py | 243d3be45515bfeec28eb8c29d18137123008b91 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | anagg929/azure-cli-extensions | dce128c9a5105a2c5f510081ec0f521cf5720b55 | ec02d4c83bd8d5ece829abd75b3030142c67aa3a | refs/heads/main | 2023-09-03T19:43:24.099198 | 2023-08-29T12:57:13 | 2023-08-29T12:57:13 | 299,980,394 | 0 | 0 | MIT | 2020-09-30T16:21:59 | 2020-09-30T16:21:58 | null | UTF-8 | Python | false | false | 77,574 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument, expression-not-assigned, too-many-locals, logging-fstring-interpolation, broad-except, pointless-statement, bare-except
from typing import Dict, Any
from urllib.parse import urlparse
from azure.cli.core.commands import AzCliCommand
import time
from azure.cli.core.azclierror import (
RequiredArgumentMissingError,
ValidationError,
ArgumentUsageError,
ResourceNotFoundError)
from azure.cli.core.commands.client_factory import get_subscription_id
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.tools import parse_resource_id, is_valid_resource_id
from msrest.exceptions import DeserializationError
from .base_resource import BaseResource
from ._clients import ManagedEnvironmentClient, ConnectedEnvironmentClient, ManagedEnvironmentPreviewClient
from ._client_factory import handle_raw_exception, handle_non_404_status_code_exception
from ._models import (
Ingress as IngressModel,
Configuration as ConfigurationModel,
Template as TemplateModel,
RegistryCredentials as RegistryCredentialsModel,
ContainerApp as ContainerAppModel,
Dapr as DaprModel,
ContainerResources as ContainerResourcesModel,
Scale as ScaleModel,
Service as ServiceModel,
Container as ContainerModel,
ManagedServiceIdentity as ManagedServiceIdentityModel,
ScaleRule as ScaleRuleModel,
Volume as VolumeModel,
VolumeMount as VolumeMountModel)
from ._decorator_utils import (create_deserializer,
process_loaded_yaml,
load_yaml_file)
from ._utils import (_ensure_location_allowed,
parse_secret_flags, store_as_secret_and_return_secret_ref, parse_env_var_flags,
_convert_object_from_snake_to_camel_case,
_object_to_dict, _remove_additional_attributes,
_remove_readonly_attributes,
_infer_acr_credentials,
_ensure_identity_resource_id,
validate_container_app_name,
set_managed_identity,
create_acrpull_role_assignment, is_registry_msi_system,
safe_set, parse_metadata_flags, parse_auth_flags,
get_default_workload_profile_name_from_env,
ensure_workload_profile_supported, _generate_secret_volume_name,
parse_service_bindings, check_unique_bindings, AppType, get_linker_client,
safe_get, _update_revision_env_secretrefs, _add_or_update_tags, _populate_secret_values,
clean_null_values, _add_or_update_env_vars, _remove_env_vars, _get_existing_secrets, _get_acr_cred)
from ._validators import validate_create, validate_revision_suffix
from ._constants import (CONTAINER_APPS_RP,
HELLO_WORLD_IMAGE,
CONNECTED_ENVIRONMENT_TYPE,
CONNECTED_ENVIRONMENT_RESOURCE_TYPE,
MANAGED_ENVIRONMENT_TYPE,
MANAGED_ENVIRONMENT_RESOURCE_TYPE, ACR_IMAGE_SUFFIX)
logger = get_logger(__name__)
class BaseContainerAppDecorator(BaseResource):
def __init__(self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str):
super().__init__(cmd, client, raw_parameters, models)
def list(self):
containerapps = super().list()
managed_env = self.get_argument_managed_env()
if managed_env:
env_name = parse_resource_id(managed_env)["name"].lower()
if "resource_group" in parse_resource_id(managed_env):
self.get_environment_client().show(self.cmd, parse_resource_id(managed_env)["resource_group"],
parse_resource_id(managed_env)["name"])
containerapps = [c for c in containerapps if
c["properties"]["environmentId"].lower() == managed_env.lower()]
else:
containerapps = [c for c in containerapps if
parse_resource_id(c["properties"]["environmentId"])["name"].lower() == env_name]
return containerapps
def show(self):
try:
r = super().show()
if self.get_param("show_secrets"):
self.set_up_get_existing_secrets(r)
return r
except CLIError as e:
handle_raw_exception(e)
def list_secrets(self):
containerapp_def = None
try:
containerapp_def = self.client.show(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name())
except Exception as e:
handle_non_404_status_code_exception(e)
if not containerapp_def:
raise ResourceNotFoundError("The containerapp '{}' does not exist".format(self.get_argument_name()))
if not self.get_argument_show_values():
return safe_get(containerapp_def, "properties", "configuration", "secrets", default=[])
try:
return self.client.list_secrets(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name())["value"]
except Exception as e:
handle_non_404_status_code_exception(e)
def get_environment_client(self):
return ManagedEnvironmentClient
def set_up_get_existing_secrets(self, containerapp_def):
if "secrets" not in containerapp_def["properties"]["configuration"]:
containerapp_def["properties"]["configuration"]["secrets"] = []
else:
secrets = None
try:
secrets = self.client.list_secrets(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name())
except Exception as e: # pylint: disable=broad-except
handle_non_404_status_code_exception(e)
containerapp_def["properties"]["configuration"]["secrets"] = secrets["value"]
safe_set(containerapp_def, "properties", "configuration", "secrets", value=secrets["value"])
def get_param(self, key) -> Any:
return self.raw_param.get(key)
def set_param(self, key, value):
self.raw_param[key] = value
def get_argument_name(self):
return self.get_param("name")
def get_argument_resource_group_name(self):
return self.get_param("resource_group_name")
def get_argument_no_wait(self):
return self.get_param("no_wait")
def get_argument_yaml(self):
return self.get_param("yaml")
def get_argument_image(self):
return self.get_param("image")
def set_argument_image(self, image):
self.set_param("image", image)
def get_argument_container_name(self):
return self.get_param("container_name")
def set_argument_container_name(self, container_name):
self.set_param("container_name", container_name)
def get_argument_managed_env(self):
return self.get_param("managed_env")
def set_argument_managed_env(self, managed_env):
self.set_param("managed_env", managed_env)
def get_argument_min_replicas(self):
return self.get_param("min_replicas")
def get_argument_max_replicas(self):
return self.get_param("max_replicas")
def get_argument_scale_rule_name(self):
return self.get_param("scale_rule_name")
def get_argument_scale_rule_type(self):
return self.get_param("scale_rule_type")
def set_argument_scale_rule_type(self, scale_rule_type):
self.set_param("scale_rule_type", scale_rule_type)
def get_argument_scale_rule_http_concurrency(self):
return self.get_param("scale_rule_http_concurrency")
def get_argument_scale_rule_metadata(self):
return self.get_param("scale_rule_metadata")
def get_argument_scale_rule_auth(self):
return self.get_param("scale_rule_auth")
def get_argument_target_port(self):
return self.get_param("target_port")
def get_argument_exposed_port(self):
return self.get_param("exposed_port")
def get_argument_transport(self):
return self.get_param("transport")
def get_argument_ingress(self):
return self.get_param("ingress")
def get_argument_allow_insecure(self):
return self.get_param("allow_insecure")
def get_argument_revisions_mode(self):
return self.get_param("revisions_mode")
def get_argument_secrets(self):
return self.get_param("secrets")
def get_argument_env_vars(self):
return self.get_param("env_vars")
def get_argument_cpu(self):
return self.get_param("cpu")
def get_argument_memory(self):
return self.get_param("memory")
def get_argument_registry_server(self):
return self.get_param("registry_server")
def get_argument_registry_user(self):
return self.get_param("registry_user")
def set_argument_registry_user(self, registry_user):
self.set_param("registry_user", registry_user)
def get_argument_registry_pass(self):
return self.get_param("registry_pass")
def set_argument_registry_pass(self, registry_pass):
self.set_param("registry_pass", registry_pass)
def get_argument_dapr_enabled(self):
return self.get_param("dapr_enabled")
def get_argument_dapr_app_port(self):
return self.get_param("dapr_app_port")
def get_argument_dapr_app_id(self):
return self.get_param("dapr_app_id")
def get_argument_dapr_app_protocol(self):
return self.get_param("dapr_app_protocol")
def get_argument_dapr_http_read_buffer_size(self):
return self.get_param("dapr_http_read_buffer_size")
def get_argument_dapr_http_max_request_size(self):
return self.get_param("dapr_http_max_request_size")
def get_argument_dapr_log_level(self):
return self.get_param("dapr_log_level")
def get_argument_dapr_enable_api_logging(self):
return self.get_param("dapr_enable_api_logging")
def get_argument_service_type(self):
return self.get_param("service_type")
def get_argument_service_bindings(self):
return self.get_param("service_bindings")
def get_argument_revision_suffix(self):
return self.get_param("revision_suffix")
def get_argument_startup_command(self):
return self.get_param("startup_command")
def get_argument_args(self):
return self.get_param("args")
def get_argument_tags(self):
return self.get_param("tags")
def get_argument_system_assigned(self):
return self.get_param("system_assigned")
def get_argument_disable_warnings(self):
return self.get_param("disable_warnings")
def get_argument_user_assigned(self):
return self.get_param("user_assigned")
def get_argument_registry_identity(self):
return self.get_param("registry_identity")
def get_argument_workload_profile_name(self):
return self.get_param("workload_profile_name")
def set_argument_workload_profile_name(self, workload_profile_name):
self.set_param("workload_profile_name", workload_profile_name)
def get_argument_secret_volume_mount(self):
return self.get_param("secret_volume_mount")
def get_argument_service_connectors_def_list(self):
return self.get_param("service_connectors_def_list")
def set_argument_service_connectors_def_list(self, service_connectors_def_list):
self.set_param("service_connectors_def_list", service_connectors_def_list)
def get_argument_termination_grace_period(self):
return self.get_param("termination_grace_period")
def get_argument_show_values(self):
return self.get_param("show_values")
def get_argument_set_env_vars(self):
return self.get_param("set_env_vars")
def get_argument_remove_env_vars(self):
return self.get_param("remove_env_vars")
def get_argument_replace_env_vars(self):
return self.get_param("replace_env_vars")
def get_argument_remove_all_env_vars(self):
return self.get_param("remove_all_env_vars")
def get_argument_from_revision(self):
return self.get_param("from_revision")
def get_argument_unbind_service_bindings(self):
return self.get_param("unbind_service_bindings")
class ContainerAppCreateDecorator(BaseContainerAppDecorator):
def __init__(
self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str
):
super().__init__(cmd, client, raw_parameters, models)
self.containerapp_def = ContainerAppModel
def validate_arguments(self):
validate_container_app_name(self.get_argument_name(), AppType.ContainerApp.name)
validate_create(self.get_argument_registry_identity(), self.get_argument_registry_pass(), self.get_argument_registry_user(), self.get_argument_registry_server(), self.get_argument_no_wait())
validate_revision_suffix(self.get_argument_revision_suffix())
def construct_payload(self):
if self.get_argument_registry_identity() and not is_registry_msi_system(self.get_argument_registry_identity()):
logger.info("Creating an acrpull role assignment for the registry identity")
create_acrpull_role_assignment(self.cmd, self.get_argument_registry_server(), self.get_argument_registry_identity(), skip_error=True)
if self.get_argument_yaml():
return self.set_up_create_containerapp_yaml(name=self.get_argument_name(), file_name=self.get_argument_yaml())
if not self.get_argument_image():
self.set_argument_image(HELLO_WORLD_IMAGE)
if self.get_argument_managed_env() is None:
raise RequiredArgumentMissingError('Usage error: --environment is required if not using --yaml')
# Validate managed environment
parsed_managed_env = parse_resource_id(self.get_argument_managed_env())
managed_env_name = parsed_managed_env['name']
managed_env_rg = parsed_managed_env['resource_group']
managed_env_info = None
try:
managed_env_info = self.get_environment_client().show(cmd=self.cmd, resource_group_name=managed_env_rg, name=managed_env_name)
except Exception as e:
handle_non_404_status_code_exception(e)
if not managed_env_info:
raise ValidationError("The environment '{}' does not exist. Specify a valid environment".format(self.get_argument_managed_env()))
while not self.get_argument_no_wait() and safe_get(managed_env_info, "properties", "provisioningState", default="").lower() in ["inprogress", "updating"]:
logger.info("Waiting for environment provisioning to finish before creating container app")
time.sleep(5)
managed_env_info = self.get_environment_client().show(cmd=self.cmd, resource_group_name=managed_env_rg, name=managed_env_name)
location = managed_env_info["location"]
_ensure_location_allowed(self.cmd, location, CONTAINER_APPS_RP, "containerApps")
if not self.get_argument_workload_profile_name() and "workloadProfiles" in managed_env_info:
workload_profile_name = get_default_workload_profile_name_from_env(self.cmd, managed_env_info, managed_env_rg)
self.set_argument_workload_profile_name(workload_profile_name)
external_ingress = None
if self.get_argument_ingress() is not None:
if self.get_argument_ingress().lower() == "internal":
external_ingress = False
elif self.get_argument_ingress().lower() == "external":
external_ingress = True
ingress_def = None
if self.get_argument_target_port() is not None and self.get_argument_ingress() is not None:
ingress_def = IngressModel
ingress_def["external"] = external_ingress
ingress_def["targetPort"] = self.get_argument_target_port()
ingress_def["transport"] = self.get_argument_transport()
ingress_def["exposedPort"] = self.get_argument_exposed_port() if self.get_argument_transport() == "tcp" else None
ingress_def["allowInsecure"] = self.get_argument_allow_insecure()
secrets_def = None
if self.get_argument_secrets() is not None:
secrets_def = parse_secret_flags(self.get_argument_secrets())
registries_def = None
if self.get_argument_registry_server() is not None and not is_registry_msi_system(self.get_argument_registry_identity()):
registries_def = RegistryCredentialsModel
registries_def["server"] = self.get_argument_registry_server()
# Infer credentials if not supplied and its azurecr
if (self.get_argument_registry_user() is None or self.get_argument_registry_pass() is None) and self.get_argument_registry_identity() is None:
registry_user, registry_pass = _infer_acr_credentials(self.cmd, self.get_argument_registry_server(), self.get_argument_disable_warnings())
self.set_argument_registry_user(registry_user)
self.set_argument_registry_pass(registry_pass)
if not self.get_argument_registry_identity():
registries_def["username"] = self.get_argument_registry_user()
if secrets_def is None:
secrets_def = []
registries_def["passwordSecretRef"] = store_as_secret_and_return_secret_ref(secrets_def, self.get_argument_registry_user(),
self.get_argument_registry_server(),
self.get_argument_registry_pass(),
disable_warnings=self.get_argument_disable_warnings())
else:
registries_def["identity"] = self.get_argument_registry_identity()
dapr_def = None
if self.get_argument_dapr_enabled():
dapr_def = DaprModel
dapr_def["enabled"] = True
dapr_def["appId"] = self.get_argument_dapr_app_id()
dapr_def["appPort"] = self.get_argument_dapr_app_port()
dapr_def["appProtocol"] = self.get_argument_dapr_app_protocol()
dapr_def["httpReadBufferSize"] = self.get_argument_dapr_http_read_buffer_size()
dapr_def["httpMaxRequestSize"] = self.get_argument_dapr_http_max_request_size()
dapr_def["logLevel"] = self.get_argument_dapr_log_level()
dapr_def["enableApiLogging"] = self.get_argument_dapr_enable_api_logging()
service_def = None
if self.get_argument_service_type():
service_def = ServiceModel
service_def["type"] = self.get_argument_service_type()
config_def = ConfigurationModel
config_def["secrets"] = secrets_def
config_def["activeRevisionsMode"] = self.get_argument_revisions_mode()
config_def["ingress"] = ingress_def
config_def["registries"] = [registries_def] if registries_def is not None else None
config_def["dapr"] = dapr_def
config_def["service"] = service_def if service_def is not None else None
# Identity actions
identity_def = ManagedServiceIdentityModel
identity_def["type"] = "None"
assign_system_identity = self.get_argument_system_assigned()
if self.get_argument_user_assigned():
assign_user_identities = [x.lower() for x in self.get_argument_user_assigned()]
else:
assign_user_identities = []
if assign_system_identity and assign_user_identities:
identity_def["type"] = "SystemAssigned, UserAssigned"
elif assign_system_identity:
identity_def["type"] = "SystemAssigned"
elif assign_user_identities:
identity_def["type"] = "UserAssigned"
if assign_user_identities:
identity_def["userAssignedIdentities"] = {}
subscription_id = get_subscription_id(self.cmd.cli_ctx)
for r in assign_user_identities:
r = _ensure_identity_resource_id(subscription_id, self.get_argument_resource_group_name(), r)
identity_def["userAssignedIdentities"][r] = {} # pylint: disable=unsupported-assignment-operation
scale_def = self.set_up_scale_rule()
resources_def = None
if self.get_argument_cpu() is not None or self.get_argument_memory() is not None:
resources_def = ContainerResourcesModel
resources_def["cpu"] = self.get_argument_cpu()
resources_def["memory"] = self.get_argument_memory()
container_def = ContainerModel
container_def["name"] = self.get_argument_container_name() if self.get_argument_container_name() else self.get_argument_name()
container_def["image"] = self.get_argument_image() if not is_registry_msi_system(self.get_argument_registry_identity()) else HELLO_WORLD_IMAGE
if self.get_argument_env_vars() is not None:
container_def["env"] = parse_env_var_flags(self.get_argument_env_vars())
if self.get_argument_startup_command() is not None:
container_def["command"] = self.get_argument_startup_command()
if self.get_argument_args() is not None:
container_def["args"] = self.get_argument_args()
if resources_def is not None:
container_def["resources"] = resources_def
template_def = TemplateModel
template_def["containers"] = [container_def]
template_def["scale"] = scale_def
if self.get_argument_secret_volume_mount() is not None:
volume_def = VolumeModel
volume_mount_def = VolumeMountModel
# generate a volume name
volume_def["name"] = _generate_secret_volume_name()
volume_def["storageType"] = "Secret"
# mount the volume to the container
volume_mount_def["volumeName"] = volume_def["name"]
volume_mount_def["mountPath"] = self.get_argument_secret_volume_mount()
container_def["volumeMounts"] = [volume_mount_def]
template_def["volumes"] = [volume_def]
if self.get_argument_revision_suffix() is not None and not is_registry_msi_system(self.get_argument_registry_identity()):
template_def["revisionSuffix"] = self.get_argument_revision_suffix()
if self.get_argument_termination_grace_period() is not None:
template_def["terminationGracePeriodSeconds"] = self.get_argument_termination_grace_period()
self.containerapp_def["location"] = location
self.containerapp_def["identity"] = identity_def
self.containerapp_def["properties"]["environmentId"] = self.get_argument_managed_env()
self.containerapp_def["properties"]["configuration"] = config_def
self.containerapp_def["properties"]["template"] = template_def
self.containerapp_def["tags"] = self.get_argument_tags()
if self.get_argument_workload_profile_name():
self.containerapp_def["properties"]["workloadProfileName"] = self.get_argument_workload_profile_name()
ensure_workload_profile_supported(self.cmd, managed_env_name, managed_env_rg, self.get_argument_workload_profile_name(),
managed_env_info)
if self.get_argument_registry_identity():
if is_registry_msi_system(self.get_argument_registry_identity()):
set_managed_identity(self.cmd, self.get_argument_resource_group_name(), self.containerapp_def, system_assigned=True)
else:
set_managed_identity(self.cmd, self.get_argument_resource_group_name(), self.containerapp_def, user_assigned=[self.get_argument_registry_identity()])
def create(self):
try:
r = self.client.create_or_update(
cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name(), container_app_envelope=self.containerapp_def,
no_wait=self.get_argument_no_wait())
return r
except Exception as e:
handle_raw_exception(e)
def construct_for_post_process(self, r):
if is_registry_msi_system(self.get_argument_registry_identity()):
while r["properties"]["provisioningState"] == "InProgress":
r = self.client.show(self.cmd, self.get_argument_resource_group_name(), self.get_argument_name())
time.sleep(10)
logger.info("Creating an acrpull role assignment for the system identity")
system_sp = r["identity"]["principalId"]
create_acrpull_role_assignment(self.cmd, self.get_argument_registry_server(), registry_identity=None, service_principal=system_sp)
containers_def = safe_get(self.containerapp_def, "properties", "template", "containers")
containers_def[0]["image"] = self.get_argument_image()
safe_set(self.containerapp_def, "properties", "template", "revisionSuffix", value=self.get_argument_revision_suffix())
registries_def = RegistryCredentialsModel
registries_def["server"] = self.get_argument_registry_server()
registries_def["identity"] = self.get_argument_registry_identity()
safe_set(self.containerapp_def, "properties", "configuration", "registries", value=[registries_def])
def post_process(self, r):
if is_registry_msi_system(self.get_argument_registry_identity()):
r = self.create()
if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "waiting" and not self.get_argument_no_wait():
not self.get_argument_disable_warnings() and logger.warning('Containerapp creation in progress. Please monitor the creation using `az containerapp show -n {} -g {}`'.format(self.get_argument_name(), self.get_argument_resource_group_name()))
if "configuration" in r["properties"] and "ingress" in r["properties"]["configuration"] and r["properties"]["configuration"]["ingress"] and "fqdn" in r["properties"]["configuration"]["ingress"]:
not self.get_argument_disable_warnings() and logger.warning("\nContainer app created. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"]))
else:
target_port = self.get_argument_target_port() or "<port>"
not self.get_argument_disable_warnings() and logger.warning("\nContainer app created. To access it over HTTPS, enable ingress: "
"az containerapp ingress enable -n %s -g %s --type external --target-port %s"
" --transport auto\n", self.get_argument_name(), self.get_argument_resource_group_name(), target_port)
return r
def set_up_create_containerapp_yaml(self, name, file_name):
if self.get_argument_image() or self.get_argument_min_replicas() or self.get_argument_max_replicas() or self.get_argument_target_port() or self.get_argument_ingress() or \
self.get_argument_revisions_mode() or self.get_argument_secrets() or self.get_argument_env_vars() or self.get_argument_cpu() or self.get_argument_memory() or self.get_argument_registry_server() or \
self.get_argument_registry_user() or self.get_argument_registry_pass() or self.get_argument_dapr_enabled() or self.get_argument_dapr_app_port() or self.get_argument_dapr_app_id() or \
self.get_argument_startup_command() or self.get_argument_args() or self.get_argument_tags():
not self.get_argument_disable_warnings() and logger.warning(
'Additional flags were passed along with --yaml. These flags will be ignored, and the configuration defined in the yaml will be used instead')
yaml_containerapp = process_loaded_yaml(load_yaml_file(file_name))
if type(yaml_containerapp) != dict: # pylint: disable=unidiomatic-typecheck
raise ValidationError(
'Invalid YAML provided. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.')
if not yaml_containerapp.get('name'):
yaml_containerapp['name'] = name
elif yaml_containerapp.get('name').lower() != name.lower():
logger.warning(
'The app name provided in the --yaml file "{}" does not match the one provided in the --name flag "{}". The one provided in the --yaml file will be used.'.format(
yaml_containerapp.get('name'), name))
name = yaml_containerapp.get('name')
if not yaml_containerapp.get('type'):
yaml_containerapp['type'] = 'Microsoft.App/containerApps'
elif yaml_containerapp.get('type').lower() != "microsoft.app/containerapps":
raise ValidationError('Containerapp type must be \"Microsoft.App/ContainerApps\"')
# Deserialize the yaml into a ContainerApp object. Need this since we're not using SDK
try:
deserializer = create_deserializer(self.models)
self.containerapp_def = deserializer('ContainerApp', yaml_containerapp)
except DeserializationError as ex:
raise ValidationError(
'Invalid YAML provided. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.') from ex
# Remove tags before converting from snake case to camel case, then re-add tags. We don't want to change the case of the tags. Need this since we're not using SDK
tags = None
if yaml_containerapp.get('tags'):
tags = yaml_containerapp.get('tags')
del yaml_containerapp['tags']
self.containerapp_def = _convert_object_from_snake_to_camel_case(_object_to_dict(self.containerapp_def))
self.containerapp_def['tags'] = tags
# After deserializing, some properties may need to be moved under the "properties" attribute. Need this since we're not using SDK
self.containerapp_def = process_loaded_yaml(self.containerapp_def)
# Remove "additionalProperties" and read-only attributes that are introduced in the deserialization. Need this since we're not using SDK
_remove_additional_attributes(self.containerapp_def)
_remove_readonly_attributes(self.containerapp_def)
# Remove extra workloadProfileName introduced in deserialization
if "workloadProfileName" in self.containerapp_def:
del self.containerapp_def["workloadProfileName"]
# Validate managed environment
env_id = self.containerapp_def["properties"]['environmentId']
env_info = None
if self.get_argument_managed_env():
if not self.get_argument_disable_warnings() and env_id is not None and env_id != self.get_argument_managed_env():
logger.warning('The environmentId was passed along with --yaml. The value entered with --environment will be ignored, and the configuration defined in the yaml will be used instead')
if env_id is None:
env_id = self.get_argument_managed_env()
safe_set(self.containerapp_def, "properties", "environmentId", value=env_id)
if not self.containerapp_def["properties"].get('environmentId'):
raise RequiredArgumentMissingError(
'environmentId is required. This can be retrieved using the `az containerapp env show -g MyResourceGroup -n MyContainerappEnvironment --query id` command. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.')
if is_valid_resource_id(env_id):
parsed_managed_env = parse_resource_id(env_id)
env_name = parsed_managed_env['name']
env_rg = parsed_managed_env['resource_group']
else:
raise ValidationError('Invalid environmentId specified. Environment not found')
try:
env_info = self.get_environment_client().show(cmd=self.cmd, resource_group_name=env_rg, name=env_name)
except Exception as e:
handle_non_404_status_code_exception(e)
if not env_info:
raise ValidationError("The environment '{}' in resource group '{}' was not found".format(env_name, env_rg))
# Validate location
if not self.containerapp_def.get('location'):
self.containerapp_def['location'] = env_info['location']
def set_up_scale_rule(self):
scale_def = None
if self.get_argument_min_replicas() is not None or self.get_argument_max_replicas() is not None:
scale_def = ScaleModel
scale_def["minReplicas"] = self.get_argument_min_replicas()
scale_def["maxReplicas"] = self.get_argument_max_replicas()
scale_rule_type = self.get_argument_scale_rule_type()
scale_rule_name = self.get_argument_scale_rule_name()
scale_rule_auth = self.get_argument_scale_rule_auth()
scale_rule_metadata = self.get_argument_scale_rule_metadata()
scale_rule_http_concurrency = self.get_argument_scale_rule_http_concurrency()
if self.get_argument_scale_rule_name():
if not scale_rule_type:
scale_rule_type = "http"
scale_rule_type = scale_rule_type.lower()
scale_rule_def = ScaleRuleModel
curr_metadata = {}
if self.get_argument_scale_rule_http_concurrency():
if scale_rule_type in ('http', 'tcp'):
curr_metadata["concurrentRequests"] = str(scale_rule_http_concurrency)
metadata_def = parse_metadata_flags(scale_rule_metadata, curr_metadata)
auth_def = parse_auth_flags(scale_rule_auth)
if scale_rule_type == "http":
scale_rule_def["name"] = scale_rule_name
scale_rule_def["custom"] = None
scale_rule_def["http"] = {}
scale_rule_def["http"]["metadata"] = metadata_def
scale_rule_def["http"]["auth"] = auth_def
else:
scale_rule_def["name"] = scale_rule_name
scale_rule_def["http"] = None
scale_rule_def["custom"] = {}
scale_rule_def["custom"]["type"] = scale_rule_type
scale_rule_def["custom"]["metadata"] = metadata_def
scale_rule_def["custom"]["auth"] = auth_def
if not scale_def:
scale_def = ScaleModel
scale_def["rules"] = [scale_rule_def]
return scale_def
class ContainerAppUpdateDecorator(BaseContainerAppDecorator):
def __init__(
self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str
):
super().__init__(cmd, client, raw_parameters, models)
self.containerapp_def = {}
self.new_containerapp = {}
def validate_arguments(self):
validate_revision_suffix(self.get_argument_revision_suffix())
# Validate that max_replicas is set to 0-1000
if self.get_argument_max_replicas() is not None:
if self.get_argument_max_replicas() < 1 or self.get_argument_max_replicas() > 1000:
raise ArgumentUsageError('--max-replicas must be in the range [1,1000]')
def update(self):
try:
r = self.client.update(
cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name(), container_app_envelope=self.new_containerapp,
no_wait=self.get_argument_no_wait())
if not self.get_argument_no_wait() and "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "waiting":
logger.warning('Containerapp update in progress. Please monitor the update using `az containerapp show -n {} -g {}`'.format(self.get_argument_name(), self.get_argument_resource_group_name()))
return r
except Exception as e:
handle_raw_exception(e)
def set_up_from_revision(self):
if self.get_argument_from_revision():
r = None
try:
r = self.client.show_revision(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), container_app_name=self.get_argument_name(), name=self.get_argument_from_revision())
except CLIError as e:
handle_non_404_status_code_exception(e)
_update_revision_env_secretrefs(r["properties"]["template"]["containers"], self.get_argument_name())
safe_set(self.new_containerapp, "properties", "template", value=r["properties"]["template"])
def _need_update_container(self):
return self.get_argument_image() or self.get_argument_container_name() or self.get_argument_set_env_vars() is not None or self.get_argument_remove_env_vars() is not None or self.get_argument_replace_env_vars() is not None or self.get_argument_remove_all_env_vars() or self.get_argument_cpu() or self.get_argument_memory() or self.get_argument_startup_command() is not None or self.get_argument_args() is not None or self.get_argument_secret_volume_mount() is not None
def construct_payload(self):
# construct from yaml
if self.get_argument_yaml():
return self.set_up_update_containerapp_yaml(name=self.get_argument_name(), file_name=self.get_argument_yaml())
self.containerapp_def = None
try:
self.containerapp_def = self.client.show(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name())
except Exception as e:
handle_non_404_status_code_exception(e)
if not self.containerapp_def:
raise ResourceNotFoundError("The containerapp '{}' does not exist".format(self.get_argument_name()))
self.new_containerapp["properties"] = {}
self.set_up_from_revision()
# Doing this while API has bug. If env var is an empty string, API doesn't return "value" even though the "value" should be an empty string
for container in safe_get(self.containerapp_def, "properties", "template", "containers", default=[]):
if "env" in container:
for e in container["env"]:
if "value" not in e:
e["value"] = ""
update_map = {}
update_map['scale'] = self.get_argument_min_replicas() or self.get_argument_max_replicas() or self.get_argument_scale_rule_name()
update_map['container'] = self._need_update_container()
update_map['ingress'] = self.get_argument_ingress() or self.get_argument_target_port()
update_map['registry'] = self.get_argument_registry_server() or self.get_argument_registry_user() or self.get_argument_registry_pass()
if self.get_argument_tags():
_add_or_update_tags(self.new_containerapp, self.get_argument_tags())
if self.get_argument_revision_suffix() is not None:
self.new_containerapp["properties"]["template"] = {} if "template" not in self.new_containerapp["properties"] else self.new_containerapp["properties"]["template"]
self.new_containerapp["properties"]["template"]["revisionSuffix"] = self.get_argument_revision_suffix()
if self.get_argument_termination_grace_period() is not None:
safe_set(self.new_containerapp, "properties", "template", "terminationGracePeriodSeconds",
value=self.get_argument_termination_grace_period())
if self.get_argument_workload_profile_name():
self.new_containerapp["properties"]["workloadProfileName"] = self.get_argument_workload_profile_name()
parsed_managed_env = parse_resource_id(self.containerapp_def["properties"]["environmentId"])
managed_env_name = parsed_managed_env['name']
managed_env_rg = parsed_managed_env['resource_group']
managed_env_info = None
try:
managed_env_info = self.get_environment_client().show(cmd=self.cmd, resource_group_name=managed_env_rg, name=managed_env_name)
except Exception as e:
handle_non_404_status_code_exception(e)
if not managed_env_info:
raise ValidationError(
"Error parsing the managed environment '{}' from the specified containerapp".format(
managed_env_name))
ensure_workload_profile_supported(self.cmd, managed_env_name, managed_env_rg, self.get_argument_workload_profile_name(),
managed_env_info)
# Containers
if update_map["container"]:
self.new_containerapp["properties"]["template"] = {} if "template" not in self.new_containerapp["properties"] else self.new_containerapp["properties"]["template"]
self.new_containerapp["properties"]["template"]["containers"] = self.containerapp_def["properties"]["template"]["containers"]
if not self.get_argument_container_name():
if len(self.new_containerapp["properties"]["template"]["containers"]) == 1:
container_name = self.new_containerapp["properties"]["template"]["containers"][0]["name"]
self.set_argument_container_name(container_name)
else:
raise ValidationError(
"Usage error: --container-name is required when adding or updating a container")
# Check if updating existing container
updating_existing_container = False
for c in self.new_containerapp["properties"]["template"]["containers"]:
if c["name"].lower() == self.get_argument_container_name().lower():
updating_existing_container = True
if self.get_argument_image() is not None:
c["image"] = self.get_argument_image()
if self.get_argument_set_env_vars() is not None:
if "env" not in c or not c["env"]:
c["env"] = []
# env vars
_add_or_update_env_vars(c["env"], parse_env_var_flags(self.get_argument_set_env_vars()))
if self.get_argument_replace_env_vars() is not None:
# Remove other existing env_vars, then add them
c["env"] = []
_add_or_update_env_vars(c["env"], parse_env_var_flags(self.get_argument_replace_env_vars()))
if self.get_argument_remove_env_vars() is not None:
if "env" not in c or not c["env"]:
c["env"] = []
# env vars
_remove_env_vars(c["env"], self.get_argument_remove_env_vars())
if self.get_argument_remove_all_env_vars():
c["env"] = []
if self.get_argument_startup_command() is not None:
if isinstance(self.get_argument_startup_command(), list) and not self.get_argument_startup_command():
c["command"] = None
else:
c["command"] = self.get_argument_startup_command()
if self.get_argument_args() is not None:
if isinstance(self.get_argument_args(), list) and not self.get_argument_args():
c["args"] = None
else:
c["args"] = self.get_argument_args()
if self.get_argument_cpu() is not None or self.get_argument_memory() is not None:
if "resources" in c and c["resources"]:
if self.get_argument_cpu() is not None:
c["resources"]["cpu"] = self.get_argument_cpu()
if self.get_argument_memory() is not None:
c["resources"]["memory"] = self.get_argument_memory()
else:
c["resources"] = {
"cpu": self.get_argument_cpu(),
"memory": self.get_argument_memory()
}
if self.get_argument_secret_volume_mount() is not None:
self.new_containerapp["properties"]["template"]["volumes"] = self.containerapp_def["properties"]["template"]["volumes"]
if "volumeMounts" not in c or not c["volumeMounts"]:
# if no volume mount exists, create a new volume and then mount
volume_def = VolumeModel
volume_mount_def = VolumeMountModel
volume_def["name"] = _generate_secret_volume_name()
volume_def["storageType"] = "Secret"
volume_mount_def["volumeName"] = volume_def["name"]
volume_mount_def["mountPath"] = self.get_argument_secret_volume_mount()
if "volumes" not in self.new_containerapp["properties"]["template"]:
self.new_containerapp["properties"]["template"]["volumes"] = [volume_def]
else:
self.new_containerapp["properties"]["template"]["volumes"].append(volume_def)
c["volumeMounts"] = volume_mount_def
else:
if len(c["volumeMounts"]) > 1:
raise ValidationError(
"Usage error: --secret-volume-mount can only be used with a container that has a single volume mount, to define multiple volumes and mounts please use --yaml")
else:
# check that the only volume is of type secret
volume_name = c["volumeMounts"][0]["volumeName"]
for v in self.new_containerapp["properties"]["template"]["volumes"]:
if v["name"].lower() == volume_name.lower():
if v["storageType"] != "Secret":
raise ValidationError(
"Usage error: --secret-volume-mount can only be used to update volume mounts with volumes of type secret. To update other types of volumes please use --yaml")
break
c["volumeMounts"][0]["mountPath"] = self.get_argument_secret_volume_mount()
# If not updating existing container, add as new container
if not updating_existing_container:
if self.get_argument_image() is None:
raise ValidationError("Usage error: --image is required when adding a new container")
resources_def = None
if self.get_argument_cpu() is not None or self.get_argument_memory() is not None:
resources_def = ContainerResourcesModel
resources_def["cpu"] = self.get_argument_cpu()
resources_def["memory"] = self.get_argument_memory()
container_def = ContainerModel
container_def["name"] = self.get_argument_container_name()
container_def["image"] = self.get_argument_image()
container_def["env"] = []
if self.get_argument_set_env_vars() is not None:
# env vars
_add_or_update_env_vars(container_def["env"], parse_env_var_flags(self.get_argument_set_env_vars()))
if self.get_argument_replace_env_vars() is not None:
# env vars
_add_or_update_env_vars(container_def["env"], parse_env_var_flags(self.get_argument_replace_env_vars()))
if self.get_argument_remove_env_vars() is not None:
# env vars
_remove_env_vars(container_def["env"], self.get_argument_remove_env_vars())
if self.get_argument_remove_all_env_vars():
container_def["env"] = []
if self.get_argument_startup_command() is not None:
if isinstance(self.get_argument_startup_command(), list) and not self.get_argument_startup_command():
container_def["command"] = None
else:
container_def["command"] = self.get_argument_startup_command()
if self.get_argument_args() is not None:
if isinstance(self.get_argument_args(), list) and not self.get_argument_args():
container_def["args"] = None
else:
container_def["args"] = self.get_argument_args()
if resources_def is not None:
container_def["resources"] = resources_def
if self.get_argument_secret_volume_mount() is not None:
self.new_containerapp["properties"]["template"]["volumes"] = self.containerapp_def["properties"]["template"]["volumes"]
# generate a new volume name
volume_def = VolumeModel
volume_mount_def = VolumeMountModel
volume_def["name"] = _generate_secret_volume_name()
volume_def["storageType"] = "Secret"
# mount the volume to the container
volume_mount_def["volumeName"] = volume_def["name"]
volume_mount_def["mountPath"] = self.get_argument_secret_volume_mount()
container_def["volumeMounts"] = [volume_mount_def]
if "volumes" not in self.new_containerapp["properties"]["template"]:
self.new_containerapp["properties"]["template"]["volumes"] = [volume_def]
else:
self.new_containerapp["properties"]["template"]["volumes"].append(volume_def)
self.new_containerapp["properties"]["template"]["containers"].append(container_def)
# Scale
if update_map["scale"]:
self.new_containerapp["properties"]["template"] = {} if "template" not in self.new_containerapp["properties"] else self.new_containerapp["properties"]["template"]
if "scale" not in self.new_containerapp["properties"]["template"]:
self.new_containerapp["properties"]["template"]["scale"] = {}
if self.get_argument_min_replicas() is not None:
self.new_containerapp["properties"]["template"]["scale"]["minReplicas"] = self.get_argument_min_replicas()
if self.get_argument_max_replicas() is not None:
self.new_containerapp["properties"]["template"]["scale"]["maxReplicas"] = self.get_argument_max_replicas()
scale_def = None
if self.get_argument_min_replicas() is not None or self.get_argument_max_replicas() is not None:
scale_def = ScaleModel
scale_def["minReplicas"] = self.get_argument_min_replicas()
scale_def["maxReplicas"] = self.get_argument_max_replicas()
# so we don't overwrite rules
if safe_get(self.new_containerapp, "properties", "template", "scale", "rules"):
self.new_containerapp["properties"]["template"]["scale"].pop(["rules"])
scale_rule_type = self.get_argument_scale_rule_type()
if self.get_argument_scale_rule_name():
if not scale_rule_type:
scale_rule_type = "http"
scale_rule_type = scale_rule_type.lower()
scale_rule_def = ScaleRuleModel
curr_metadata = {}
if self.get_argument_scale_rule_http_concurrency():
if scale_rule_type in ('http', 'tcp'):
curr_metadata["concurrentRequests"] = str(self.get_argument_scale_rule_http_concurrency())
metadata_def = parse_metadata_flags(self.get_argument_scale_rule_metadata(), curr_metadata)
auth_def = parse_auth_flags(self.get_argument_scale_rule_auth())
if scale_rule_type == "http":
scale_rule_def["name"] = self.get_argument_scale_rule_name()
scale_rule_def["custom"] = None
scale_rule_def["http"] = {}
scale_rule_def["http"]["metadata"] = metadata_def
scale_rule_def["http"]["auth"] = auth_def
else:
scale_rule_def["name"] = self.get_argument_scale_rule_name()
scale_rule_def["http"] = None
scale_rule_def["custom"] = {}
scale_rule_def["custom"]["type"] = scale_rule_type
scale_rule_def["custom"]["metadata"] = metadata_def
scale_rule_def["custom"]["auth"] = auth_def
if not scale_def:
scale_def = ScaleModel
scale_def["rules"] = [scale_rule_def]
self.new_containerapp["properties"]["template"]["scale"]["rules"] = scale_def["rules"]
# Ingress
if update_map["ingress"]:
self.new_containerapp["properties"]["configuration"] = {} if "configuration" not in self.new_containerapp[
"properties"] else self.new_containerapp["properties"]["configuration"]
if self.get_argument_target_port() is not None or self.get_argument_ingress() is not None:
self.new_containerapp["properties"]["configuration"]["ingress"] = {}
if self.get_argument_ingress():
self.new_containerapp["properties"]["configuration"]["ingress"][
"external"] = self.get_argument_ingress().lower() == "external"
if self.get_argument_target_port():
self.new_containerapp["properties"]["configuration"]["ingress"]["targetPort"] = self.get_argument_target_port()
# Registry
if update_map["registry"]:
self.new_containerapp["properties"]["configuration"] = {} if "configuration" not in self.new_containerapp[
"properties"] else self.new_containerapp["properties"]["configuration"]
if "registries" in self.containerapp_def["properties"]["configuration"]:
self.new_containerapp["properties"]["configuration"]["registries"] = self.containerapp_def["properties"]["configuration"]["registries"]
if "registries" not in self.containerapp_def["properties"]["configuration"] or \
self.containerapp_def["properties"]["configuration"]["registries"] is None:
self.new_containerapp["properties"]["configuration"]["registries"] = []
registries_def = self.new_containerapp["properties"]["configuration"]["registries"]
self.set_up_get_existing_secrets(self.containerapp_def)
if "secrets" in self.containerapp_def["properties"]["configuration"] and self.containerapp_def["properties"]["configuration"]["secrets"]:
self.new_containerapp["properties"]["configuration"]["secrets"] = self.containerapp_def["properties"]["configuration"]["secrets"]
else:
self.new_containerapp["properties"]["configuration"]["secrets"] = []
if self.get_argument_registry_server():
if not self.get_argument_registry_pass() or not self.get_argument_registry_user():
if ACR_IMAGE_SUFFIX not in self.get_argument_registry_server():
raise RequiredArgumentMissingError(
'Registry url is required if using Azure Container Registry, otherwise Registry username and password are required if using Dockerhub')
logger.warning(
'No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(self.get_argument_registry_server())
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
registry_user, registry_pass, _ = _get_acr_cred(self.cmd.cli_ctx, registry_name)
self.set_argument_registry_user(registry_user)
self.set_argument_registry_pass(registry_pass)
# Check if updating existing registry
updating_existing_registry = False
for r in registries_def:
if r['server'].lower() == self.get_argument_registry_server().lower():
updating_existing_registry = True
if self.get_argument_registry_user():
r["username"] = self.get_argument_registry_user()
if self.get_argument_registry_pass():
r["passwordSecretRef"] = store_as_secret_and_return_secret_ref(
self.new_containerapp["properties"]["configuration"]["secrets"],
r["username"],
r["server"],
self.get_argument_registry_pass(),
update_existing_secret=True,
disable_warnings=True)
# If not updating existing registry, add as new registry
if not updating_existing_registry:
registry = RegistryCredentialsModel
registry["server"] = self.get_argument_registry_server()
registry["username"] = self.get_argument_registry_user()
registry["passwordSecretRef"] = store_as_secret_and_return_secret_ref(
self.new_containerapp["properties"]["configuration"]["secrets"],
self.get_argument_registry_user(),
self.get_argument_registry_server(),
self.get_argument_registry_pass(),
update_existing_secret=True,
disable_warnings=True)
registries_def.append(registry)
if not self.get_argument_revision_suffix():
self.new_containerapp["properties"]["template"] = {} if "template" not in self.new_containerapp["properties"] else self.new_containerapp["properties"]["template"]
self.new_containerapp["properties"]["template"]["revisionSuffix"] = None
def set_up_update_containerapp_yaml(self, name, file_name):
if self.get_argument_image() or self.get_argument_min_replicas() or self.get_argument_max_replicas() or \
self.get_argument_set_env_vars() or self.get_argument_remove_env_vars() or self.get_argument_replace_env_vars() or self.get_argument_remove_all_env_vars() or self.get_argument_cpu() or self.get_argument_memory() or \
self.get_argument_startup_command() or self.get_argument_args() or self.get_argument_tags():
logger.warning(
'Additional flags were passed along with --yaml. These flags will be ignored, and the configuration defined in the yaml will be used instead')
yaml_containerapp = process_loaded_yaml(load_yaml_file(file_name))
if type(yaml_containerapp) != dict: # pylint: disable=unidiomatic-typecheck
raise ValidationError(
'Invalid YAML provided. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.')
if not yaml_containerapp.get('name'):
yaml_containerapp['name'] = name
elif yaml_containerapp.get('name').lower() != name.lower():
logger.warning(
'The app name provided in the --yaml file "{}" does not match the one provided in the --name flag "{}". The one provided in the --yaml file will be used.'.format(
yaml_containerapp.get('name'), name))
name = yaml_containerapp.get('name')
if not yaml_containerapp.get('type'):
yaml_containerapp['type'] = 'Microsoft.App/containerApps'
elif yaml_containerapp.get('type').lower() != "microsoft.app/containerapps":
raise ValidationError('Containerapp type must be \"Microsoft.App/ContainerApps\"')
# Check if containerapp exists
try:
self.new_containerapp = self.client.show(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name())
except Exception as e:
handle_non_404_status_code_exception(e)
if not self.new_containerapp:
raise ValidationError("The containerapp '{}' does not exist".format(name))
existed_environment_id = self.new_containerapp['properties']['environmentId']
self.new_containerapp = None
# Deserialize the yaml into a ContainerApp object. Need this since we're not using SDK
try:
deserializer = create_deserializer(self.models)
self.new_containerapp = deserializer('ContainerApp', yaml_containerapp)
except DeserializationError as ex:
raise ValidationError(
'Invalid YAML provided. Please see https://aka.ms/azure-container-apps-yaml for a valid containerapps YAML spec.') from ex
# Remove tags before converting from snake case to camel case, then re-add tags. We don't want to change the case of the tags. Need this since we're not using SDK
tags = None
if yaml_containerapp.get('tags'):
tags = yaml_containerapp.get('tags')
del yaml_containerapp['tags']
self.new_containerapp = _convert_object_from_snake_to_camel_case(_object_to_dict(self.new_containerapp))
self.new_containerapp['tags'] = tags
# After deserializing, some properties may need to be moved under the "properties" attribute. Need this since we're not using SDK
self.new_containerapp = process_loaded_yaml(self.new_containerapp)
# Change which revision we update from
if self.get_argument_from_revision():
r = self.client.show_revision(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), container_app_name=name, name=self.get_argument_from_revision())
_update_revision_env_secretrefs(r["properties"]["template"]["containers"], name)
self.new_containerapp["properties"]["template"] = r["properties"]["template"]
# Remove "additionalProperties" and read-only attributes that are introduced in the deserialization. Need this since we're not using SDK
_remove_additional_attributes(self.new_containerapp)
_remove_readonly_attributes(self.new_containerapp)
secret_values = self.list_secrets()
_populate_secret_values(self.new_containerapp, secret_values)
# Clean null values since this is an update
self.new_containerapp = clean_null_values(self.new_containerapp)
# Fix bug with revisionSuffix when containers are added
if not safe_get(self.new_containerapp, "properties", "template", "revisionSuffix"):
if "properties" not in self.new_containerapp:
self.new_containerapp["properties"] = {}
if "template" not in self.new_containerapp["properties"]:
self.new_containerapp["properties"]["template"] = {}
self.new_containerapp["properties"]["template"]["revisionSuffix"] = None
# Remove the environmentId in the PATCH payload if it has not been changed
if safe_get(self.new_containerapp, "properties", "environmentId") and safe_get(self.new_containerapp, "properties", "environmentId").lower() == existed_environment_id.lower():
del self.new_containerapp["properties"]['environmentId']
# decorator for preview create
class ContainerAppPreviewCreateDecorator(ContainerAppCreateDecorator):
def __init__(
self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str
):
super().__init__(cmd, client, raw_parameters, models)
def construct_payload(self):
super().construct_payload()
self.set_up_service_binds()
self.set_up_extended_location()
def post_process(self, r):
if is_registry_msi_system(self.get_argument_registry_identity()):
r = self.create()
if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "waiting" and not self.get_argument_no_wait():
not self.get_argument_disable_warnings() and logger.warning('Containerapp creation in progress. Please monitor the creation using `az containerapp show -n {} -g {}`'.format(self.get_argument_name(), self.get_argument_resource_group_name()))
if "configuration" in r["properties"] and "ingress" in r["properties"]["configuration"] and \
r["properties"]["configuration"]["ingress"] and "fqdn" in r["properties"]["configuration"]["ingress"]:
not self.get_argument_disable_warnings() and logger.warning("\nContainer app created. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"]))
else:
target_port = self.get_argument_target_port() or "<port>"
not self.get_argument_disable_warnings() and logger.warning(
"\nContainer app created. To access it over HTTPS, enable ingress: "
"az containerapp ingress enable -n %s -g %s --type external --target-port %s"
" --transport auto\n", self.get_argument_name(), self.get_argument_resource_group_name(), target_port)
if self.get_argument_service_connectors_def_list() is not None:
linker_client = get_linker_client(self.cmd)
for item in self.get_argument_service_connectors_def_list():
while r is not None and r["properties"]["provisioningState"].lower() == "inprogress":
r = self.client.show(self.cmd, self.get_argument_resource_group_name(), self.get_argument_name())
time.sleep(1)
linker_client.linker.begin_create_or_update(resource_uri=r["id"],
parameters=item["parameters"],
linker_name=item["linker_name"]).result()
return r
def set_up_extended_location(self):
if self.get_argument_environment_type() == CONNECTED_ENVIRONMENT_TYPE:
if not self.containerapp_def.get('extendedLocation'):
env_id = safe_get(self.containerapp_def, "properties", 'environmentId') or self.get_argument_managed_env()
parsed_env = parse_resource_id(env_id)
env_name = parsed_env['name']
env_rg = parsed_env['resource_group']
env_info = self.get_environment_client().show(cmd=self.cmd, resource_group_name=env_rg, name=env_name)
self.containerapp_def["extendedLocation"] = env_info["extendedLocation"]
def set_up_service_binds(self):
if self.get_argument_service_bindings() is not None:
service_connectors_def_list, service_bindings_def_list = parse_service_bindings(self.cmd,
self.get_argument_service_bindings(),
self.get_argument_resource_group_name(),
self.get_argument_name())
self.set_argument_service_connectors_def_list(service_connectors_def_list)
unique_bindings = check_unique_bindings(self.cmd, service_connectors_def_list, service_bindings_def_list,
self.get_argument_resource_group_name(), self.get_argument_name())
if not unique_bindings:
raise ValidationError("Binding names across managed and dev services should be unique.")
safe_set(self.containerapp_def, "properties", "template", "serviceBinds", value=service_bindings_def_list)
def get_environment_client(self):
if self.get_argument_yaml():
env = safe_get(self.containerapp_def, "properties", "environmentId")
else:
env = self.get_argument_managed_env()
environment_type = self.get_argument_environment_type()
if not env and not environment_type:
return ManagedEnvironmentClient
parsed_env = parse_resource_id(env)
# Validate environment type
if parsed_env.get('resource_type').lower() == CONNECTED_ENVIRONMENT_RESOURCE_TYPE.lower():
if environment_type == MANAGED_ENVIRONMENT_TYPE:
logger.warning("User passed a connectedEnvironment resource id but did not specify --environment-type connected. Using environment type connected.")
environment_type = CONNECTED_ENVIRONMENT_TYPE
else:
if environment_type == CONNECTED_ENVIRONMENT_TYPE:
logger.warning("User passed a managedEnvironment resource id but specified --environment-type connected. Using environment type managed.")
environment_type = MANAGED_ENVIRONMENT_TYPE
self.set_argument_environment_type(environment_type)
self.set_argument_managed_env(env)
if environment_type == CONNECTED_ENVIRONMENT_TYPE:
return ConnectedEnvironmentClient
else:
return ManagedEnvironmentPreviewClient
def get_argument_environment_type(self):
return self.get_param("environment_type")
def set_argument_environment_type(self, environment_type):
self.set_param("environment_type", environment_type)
# decorator for preview update
class ContainerAppPreviewUpdateDecorator(ContainerAppUpdateDecorator):
def construct_payload(self):
super().construct_payload()
self.set_up_service_bindings()
self.set_up_unbind_service_bindings()
def post_process(self, r):
# Delete managed bindings
linker_client = None
if self.get_argument_unbind_service_bindings():
linker_client = get_linker_client(self.cmd)
for item in self.get_argument_unbind_service_bindings():
while r["properties"]["provisioningState"].lower() == "inprogress":
r = self.client.show(self.cmd, self.get_argument_resource_group_name(), self.get_argument_name())
time.sleep(1)
linker_client.linker.begin_delete(resource_uri=r["id"], linker_name=item).result()
# Update managed bindings
if self.get_argument_service_connectors_def_list() is not None:
linker_client = get_linker_client(self.cmd) if linker_client is None else linker_client
for item in self.get_argument_service_connectors_def_list():
while r["properties"]["provisioningState"].lower() == "inprogress":
r = self.client.show(self.cmd, self.get_argument_resource_group_name(), self.get_argument_name())
time.sleep(1)
linker_client.linker.begin_create_or_update(resource_uri=r["id"],
parameters=item["parameters"],
linker_name=item["linker_name"]).result()
return r
def set_up_service_bindings(self):
if self.get_argument_service_bindings() is not None:
linker_client = get_linker_client(self.cmd)
service_connectors_def_list, service_bindings_def_list = parse_service_bindings(self.cmd, self.get_argument_service_bindings(), self.get_argument_resource_group_name(), self.get_argument_name())
self.set_argument_service_connectors_def_list(service_connectors_def_list)
service_bindings_used_map = {update_item["name"]: False for update_item in service_bindings_def_list}
safe_set(self.new_containerapp, "properties", "template", "serviceBinds", value=self.containerapp_def["properties"]["template"]["serviceBinds"])
if self.new_containerapp["properties"]["template"]["serviceBinds"] is None:
self.new_containerapp["properties"]["template"]["serviceBinds"] = []
for item in self.new_containerapp["properties"]["template"]["serviceBinds"]:
for update_item in service_bindings_def_list:
if update_item["name"] in item.values():
item["serviceId"] = update_item["serviceId"]
service_bindings_used_map[update_item["name"]] = True
for update_item in service_bindings_def_list:
if service_bindings_used_map[update_item["name"]] is False:
# Check if it doesn't exist in existing service linkers
managed_bindings = linker_client.linker.list(resource_uri=self.containerapp_def["id"])
if managed_bindings:
managed_bindings_list = [item.name for item in managed_bindings]
if update_item["name"] in managed_bindings_list:
raise ValidationError("Binding names across managed and dev services should be unique.")
self.new_containerapp["properties"]["template"]["serviceBinds"].append(update_item)
if service_connectors_def_list is not None:
for item in service_connectors_def_list:
# Check if it doesn't exist in existing service bindings
service_bindings_list = []
for binds in self.new_containerapp["properties"]["template"]["serviceBinds"]:
service_bindings_list.append(binds["name"])
if item["linker_name"] in service_bindings_list:
raise ValidationError("Binding names across managed and dev services should be unique.")
def set_up_unbind_service_bindings(self):
if self.get_argument_unbind_service_bindings():
new_template = self.new_containerapp.setdefault("properties", {}).setdefault("template", {})
existing_template = self.containerapp_def["properties"]["template"]
if not self.get_argument_service_bindings():
new_template["serviceBinds"] = existing_template.get("serviceBinds", [])
service_bindings_dict = {}
if new_template["serviceBinds"]:
service_bindings_dict = {service_binding["name"]: index for index, service_binding in
enumerate(new_template.get("serviceBinds", []))}
for item in self.get_argument_unbind_service_bindings():
if item in service_bindings_dict:
new_template["serviceBinds"] = [binding for binding in new_template["serviceBinds"] if
binding["name"] != item]
# decorator for preview list
class ContainerAppPreviewListDecorator(BaseContainerAppDecorator):
def __init__(
self, cmd: AzCliCommand, client: Any, raw_parameters: Dict, models: str
):
super().__init__(cmd, client, raw_parameters, models)
def list(self):
containerapps = super().list()
if self.get_argument_environment_type() == CONNECTED_ENVIRONMENT_TYPE:
containerapps = [c for c in containerapps if CONNECTED_ENVIRONMENT_RESOURCE_TYPE in c["properties"]["environmentId"]]
if self.get_argument_environment_type() == MANAGED_ENVIRONMENT_TYPE:
containerapps = [c for c in containerapps if MANAGED_ENVIRONMENT_RESOURCE_TYPE in c["properties"]["environmentId"]]
return containerapps
def get_environment_client(self):
env = self.get_argument_managed_env()
if is_valid_resource_id(env):
parsed_env = parse_resource_id(env)
if parsed_env.get('resource_type').lower() == CONNECTED_ENVIRONMENT_RESOURCE_TYPE.lower():
return ConnectedEnvironmentClient
else:
return ManagedEnvironmentPreviewClient
if self.get_argument_environment_type() == CONNECTED_ENVIRONMENT_TYPE:
return ConnectedEnvironmentClient
else:
return ManagedEnvironmentPreviewClient
def get_argument_environment_type(self):
return self.get_param("environment_type")
| [
"noreply@github.com"
] | anagg929.noreply@github.com |
dc14cd97a015a86562025969e29bd2a6e3e6aa12 | fb05a8ef1b03448768f113ce760116090441b9c5 | /alembic/versions/6e71c29e6611_delete_created_for_user.py | 7d7a1d7a343a74a6bfadc35bb3e0c937781ed2d1 | [] | no_license | daimingfeng/picture-sharing-project | 54bec8ebda6ca6099f83db53d56fdcf67074e03e | 868c20c8a64578764f3aa043831f97bd0102fb87 | refs/heads/master | 2020-03-26T15:42:46.951891 | 2018-07-03T08:51:37 | 2018-07-03T08:51:37 | 138,396,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | """delete created for user
Revision ID: 6e71c29e6611
Revises: 6126ad9514aa
Create Date: 2018-06-09 10:41:35.911735
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '6e71c29e6611'
down_revision = '6126ad9514aa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'created')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('created', mysql.DATETIME(), nullable=True))
# ### end Alembic commands ###
| [
"1115683853@qq.com"
] | 1115683853@qq.com |
3d3f6135cd76c85d43c599a3085696737af60bb0 | e14fa184ec3cad0de83615011728176eaaa06626 | /blockchain.py | c89ade4316711cdad1f20040172222513087a751 | [] | no_license | sa2shun/Blockchain | 945e96760e6fa18100069c4029a3a9fd504cf904 | fbf030e8c13c9ce28598f38fc0e2f8e9ced2a8bc | refs/heads/master | 2020-11-29T23:46:05.984539 | 2020-07-23T04:17:01 | 2020-07-23T04:17:01 | 230,241,960 | 0 | 0 | null | 2019-12-26T10:28:05 | 2019-12-26T10:11:57 | Python | UTF-8 | Python | false | false | 8,363 | py | import contextlib
import hashlib
import json
import logging
import sys
import time
import threading
from ecdsa import NIST256p
from ecdsa import VerifyingKey
import requests
import utils
MINING_DIFFICULTY = 1
MINING_SENDER = 'THE BLOCKCHAIN'
MINING_REWARD = 1.0
MINING_TIMER_SEC = 20
time_data =[]
BLOCKCHAIN_PORT_RANGE = (5000, 5003)
NEIGHBOURS_IP_RANGE_NUM = (0, 1)
BLOCKCHAIN_NEIGHBOURS_SYNC_TIME_SEC = 20
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
class BlockChain(object):
def __init__(self, blockchain_address=None, port=None):
self.transaction_pool = []
self.chain = []
self.neighbours = []
self.create_block(0, self.hash({}))
self.blockchain_address = blockchain_address
self.port = port
self.mining_semaphore = threading.Semaphore(1)
self.sync_neighbours_semaphore = threading.Semaphore(1)
def run(self):
self.sync_neighbours()
self.resolve_conflicts()
#self.start_mining() #コメントすると自動マイニングがなくなる
def set_neighbours(self):
self.neighbours = utils.find_neighbours(
utils.get_host(), self.port,
NEIGHBOURS_IP_RANGE_NUM[0], NEIGHBOURS_IP_RANGE_NUM[1],
BLOCKCHAIN_PORT_RANGE[0], BLOCKCHAIN_PORT_RANGE[1])
logger.info({
'action': 'set_neighbours', 'neighbours': self.neighbours
})
def sync_neighbours(self):
is_acquire = self.sync_neighbours_semaphore.acquire(blocking=False)
if is_acquire:
with contextlib.ExitStack() as stack:
stack.callback(self.sync_neighbours_semaphore.release)
self.set_neighbours()
loop = threading.Timer(
BLOCKCHAIN_NEIGHBOURS_SYNC_TIME_SEC, self.sync_neighbours)
loop.start()
def create_block(self, nonce, previous_hash):
block = utils.sorted_dict_by_key({
'timestamp': time.time(),
'transactions': self.transaction_pool,
'nonce': nonce,
'previous_hash': previous_hash
})
self.chain.append(block)
self.transaction_pool = []
for node in self.neighbours:
requests.delete(f'http://{node}/transactions')
return block
def hash(self, block):
sorted_block = json.dumps(block, sort_keys=True)
return hashlib.sha256(sorted_block.encode()).hexdigest()
def add_transaction(self, sender_blockchain_address,
recipient_blockchain_address, value,
sender_public_key=None, signature=None):
transaction = utils.sorted_dict_by_key({
'sender_blockchain_address': sender_blockchain_address,
'recipient_blockchain_address': recipient_blockchain_address,
'value': int(value)
})
if sender_blockchain_address == MINING_SENDER:
self.transaction_pool.append(transaction)
return True
if self.verify_transaction_signature(
sender_public_key, signature, transaction):
self.transaction_pool.append(transaction)
return True
return False
def create_transaction(self, sender_blockchain_address,
recipient_blockchain_address, value,
sender_public_key, signature):
is_transacted = self.add_transaction(
sender_blockchain_address, recipient_blockchain_address,
value, sender_public_key, signature)
if is_transacted:
for node in self.neighbours:
requests.put(
f'http://{node}/transactions',
json={
'sender_blockchain_address': sender_blockchain_address,
'recipient_blockchain_address':
recipient_blockchain_address,
'value': value,
'sender_public_key': sender_public_key,
'signature': signature,
}
)
return is_transacted
def verify_transaction_signature(
self, sender_public_key, signature, transaction):
sha256 = hashlib.sha256()
sha256.update(str(transaction).encode('utf-8'))
message = sha256.digest()
signature_bytes = bytes().fromhex(signature)
verifying_key = VerifyingKey.from_string(
bytes().fromhex(sender_public_key), curve=NIST256p)
verified_key = verifying_key.verify(signature_bytes, message)
return verified_key
def valid_proof(self, transactions, previous_hash, nonce,
difficulty=MINING_DIFFICULTY):
guess_block = utils.sorted_dict_by_key({
'transactions': transactions,
'nonce': nonce,
'previous_hash': previous_hash
})
guess_hash = self.hash(guess_block)
return guess_hash[:difficulty] == '0'*difficulty
def proof_of_work(self):
transactions = self.transaction_pool.copy()
previous_hash = self.hash(self.chain[-1])
nonce = 0
while self.valid_proof(transactions, previous_hash, nonce) is False:
nonce += 1
return nonce
def mining(self):
if not self.transaction_pool:
return False
start_time = time.time()
self.add_transaction(
sender_blockchain_address=MINING_SENDER,
recipient_blockchain_address=self.blockchain_address,
value=MINING_REWARD)
nonce = self.proof_of_work()
previous_hash = self.hash(self.chain[-1])
self.create_block(nonce, previous_hash)
logger.info({'action': 'mining', 'status': 'success'})
action_time = time.time() - start_time
time_data.append(action_time)
print(time_data)
print(len(time_data))
print(action_time)
for node in self.neighbours:
requests.put(f'http://{node}/consensus')
return True
def start_mining(self):
is_acquire = self.mining_semaphore.acquire(blocking=False)
if is_acquire:
with contextlib.ExitStack() as stack:
stack.callback(self.mining_semaphore.release)
self.mining()
loop = threading.Timer(MINING_TIMER_SEC, self.start_mining)
loop.start()
def calculate_total_amount(self, blockchain_address):
total_amount = 0
for block in reversed(self.chain):
for transaction in block['transactions']:
value = int(transaction['value'])
if blockchain_address == transaction['recipient_blockchain_address']:
total_amount += value
# if blockchain_address == transaction['sender_blockchain_address']:
# total_amount -= value
return total_amount
def valid_chain(self, chain):
pre_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
if block['previous_hash'] != self.hash(pre_block):
return False
if not self.valid_proof(
block['transactions'], block['previous_hash'],
block['nonce'], MINING_DIFFICULTY):
return False
pre_block = block
current_index += 1
return True
def resolve_conflicts(self):
longest_chain = None
max_length = len(self.chain)
for node in self.neighbours:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
response_json = response.json()
chain = response_json['chain']
chain_length = len(chain)
if chain_length > max_length and self.valid_chain(chain):
max_length = chain_length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
logger.info({'action': 'resolve_conflicts', 'status': 'replaced'})
return True
logger.info({'action': 'resolve_conflicts', 'status': 'not_replaced'})
return False
| [
"shunlovessoccer1107@gmail.com"
] | shunlovessoccer1107@gmail.com |
2853da436c35c7c035dae92d2f60618f3869f870 | 09a940295274c4d9f668b39ef3d002ff2b26ea5b | /Graphs/primsMST.py | f31ebc0e9c6bc95aae7de06c6638fa58f4d201b3 | [] | no_license | sandeep-skb/Algorithms | 39d50f7e92211a69cac5a7e5e5358f99d623717d | 2859429cf6ec10a7c0b1f7e10e306a40b9a80a7c | refs/heads/master | 2020-05-26T19:17:24.972562 | 2019-11-30T06:30:26 | 2019-11-30T06:30:26 | 188,346,496 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | import sys
class PriorityQueue:
def __init__(self):
self.pq = {}
#To insert values into the PQ
def push(self, vtx, val):
self.pq[vtx] = val
#To Delete and return the minimum values from the PQ
def deleteMin(self):
min_val = min(self.pq.values())
for key in self.pq.keys():
if self.pq[key] == min_val:
vtx = key
break
self.pq.pop(vtx)
return vtx
#To check if the PQ is empty
def IsEmpty(self):
return (len(self.pq) == 0)
class Graph:
def __init__(self, vertices):
self.num_vtx = vertices
self.graph = [[0 for _ in range(vertices)]
for _ in range(vertices)]
self.pqueue = PriorityQueue()
self.distance = [sys.maxsize] * self.num_vtx
self.path = [0]*self.num_vtx
def find_shortest_dist(self, idx):
for col in range(self.num_vtx):
if (self.graph[idx][col] != 0):
temp_dist = self.graph[idx][col]
if (temp_dist < self.distance[col]):
self.distance[col] = temp_dist
self.pqueue.push(col, temp_dist)
self.path[col] = idx
def primMST(self, src):
orig_set = [i for i in range(self.num_vtx)]
mst_set = []
self.distance[src] = 0
self.pqueue.push(src, self.distance[src])
while(self.pqueue.IsEmpty() != True):
idx = self.pqueue.deleteMin()
if (idx not in mst_set):
mst_set.append(idx)
orig_set.remove(idx)
self.find_shortest_dist(idx)
for i in range(len(self.distance)):
print("distance from {} to {} is {}".format(self.path[i], i, self.distance[i]))
def main():
g = Graph(5)
g.graph = [ [0, 2, 0, 6, 0],
[2, 0, 3, 8, 5],
[0, 3, 0, 0, 7],
[6, 8, 0, 0, 9],
[0, 5, 7, 9, 0]]
g.primMST(0);
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | sandeep-skb.noreply@github.com |
8bad661b1a6ffb4facb6ee6322eb219f7857cfcd | 9defb97d343de65133f776caef4a53d4d4841a7b | /server/src/settings.py | 2403dda025aadd2e9073f6e2e0ea22655b3f7188 | [] | no_license | victorgevaerd/app-prime-numbers-bridge-2021.1 | ff635066c5de5ddcfe73e19144f9dc30b4b74567 | daf31ae56aa10f894acd349a9a4938359400b487 | refs/heads/main | 2023-03-22T23:31:01.353356 | 2021-03-16T23:03:34 | 2021-03-16T23:03:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | import sys
from os import getenv
from dotenv import load_dotenv
load_dotenv()
with_error = False
if getenv('PORT') is None:
print('Variável "PORT" não definida!. Defina no arquivo servidor/.env')
print('Exemplo: PORT=3000')
with_error = True
if getenv('DEBUG') is None:
print('Variável "DEBUG" não definida!. Defina no arquivo servidor/.env')
print('Exemplo: DEBUG=True')
with_error = True
if with_error:
raise SystemExit('Variáveis de ambiente não definidas!')
try:
PORT = int(getenv('PORT'))
except ValueError:
raise SystemExit('Variável PORTA deve ser um número natural!')
try:
DEBUG = bool(getenv('DEBUG'))
except ValueError:
raise SystemExit('Variável DEBUG deve ser do tipo boolean!')
| [
"victorgevaerd@gmail.com"
] | victorgevaerd@gmail.com |
4fccc4958d08996a263601b37e9b8b1a85416c19 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /stackoverflow_site__parsing/print__datetime_utc_and_reputation_change__with_missing_dates.py | 00e9926585256ee0385b09771cb3e87d0bf1e62c | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import datetime as DT
def generate_range_dates(start_date, end_date) -> list:
date_1 = min(start_date, end_date)
date_2 = max(start_date, end_date)
# Сразу добавляем стартовую дату
items = [date_1]
while date_1 < date_2:
date_1 += DT.timedelta(days=1)
items.append(date_1)
return items
if __name__ == '__main__':
url = 'https://ru.stackoverflow.com/users/201445/gil9red?tab=reputation'
from print__datetime_utc_and_reputation_change import get_day_by_rep
day_by_rep = get_day_by_rep(url)
start_date, end_date = min(day_by_rep), max(day_by_rep)
print('Start: {}, end: {}'.format(start_date, end_date))
print()
# Сгенерируем диапазон дат
dates = generate_range_dates(start_date, end_date)
# Print
for day in reversed(dates):
print('{:%d/%m/%Y} : {}'.format(day, day_by_rep.get(day, 0)))
| [
"ilya.petrash@inbox.ru"
] | ilya.petrash@inbox.ru |
4bdba1ed302a07e95891189723cb8e02be46a173 | 8806a17d66d7abb8434c879215dc09cbfc3b5a25 | /bin/log.py | 02e6764c09facc7e70ec062e7792b50d468208ef | [] | no_license | chenrun666/JW_purchase | f23d1719f447be669134c8fc02b1b8fd9d82cba8 | 9552920259f4014a08b38db88d0d48f0864822d3 | refs/heads/master | 2020-04-25T20:44:06.403805 | 2019-03-09T10:05:19 | 2019-03-09T10:05:19 | 173,057,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,963 | py | # coding:utf-8
import logging
from logging.handlers import RotatingFileHandler # 按文件大小滚动备份
import colorlog # 控制台日志输入颜色
import time
import datetime
import os
cur_path = os.path.dirname(os.path.realpath(__file__)) # log_path是存放日志的路径
log_path = os.path.join(os.path.dirname(cur_path), 'logs')
if not os.path.exists(log_path): os.mkdir(log_path) # 如果不存在这个logs文件夹,就自动创建一个
logName = os.path.join(log_path, '%s.log' % time.strftime('%Y-%m-%d')) # 文件的命名
log_colors_config = {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
class Log:
def __init__(self, logName=logName):
self.logName = logName
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
self.formatter = colorlog.ColoredFormatter(
'%(log_color)s[%(asctime)s] [%(filename)s:%(lineno)d] [%(module)s:%(funcName)s] [%(levelname)s]- %(message)s',
log_colors=log_colors_config) # 日志输出格式
self.handle_logs()
def get_file_sorted(self, file_path):
"""最后修改时间顺序升序排列 os.path.getmtime()->获取文件最后修改时间"""
dir_list = os.listdir(file_path)
if not dir_list:
return
else:
dir_list = sorted(dir_list, key=lambda x: os.path.getmtime(os.path.join(file_path, x)))
return dir_list
def TimeStampToTime(self, timestamp):
"""格式化时间"""
timeStruct = time.localtime(timestamp)
return str(time.strftime('%Y-%m-%d', timeStruct))
def handle_logs(self):
"""处理日志过期天数和文件数量"""
dir_list = ['report'] # 要删除文件的目录名
for dir in dir_list:
dirPath = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + '/' + dir # 拼接删除目录完整路径
file_list = self.get_file_sorted(dirPath) # 返回按修改时间排序的文件list
if file_list: # 目录下没有日志文件
for i in file_list:
file_path = os.path.join(dirPath, i) # 拼接文件的完整路径
t_list = self.TimeStampToTime(os.path.getctime(file_path)).split('-')
now_list = self.TimeStampToTime(time.time()).split('-')
t = datetime.datetime(int(t_list[0]), int(t_list[1]),
int(t_list[2])) # 将时间转换成datetime.datetime 类型
now = datetime.datetime(int(now_list[0]), int(now_list[1]), int(now_list[2]))
if (now - t).days > 7: # 创建时间大于6天的文件删除
self.delete_logs(file_path)
if len(file_list) > 10: # 限制目录下记录文件数量
file_list = file_list[0:-4]
for i in file_list:
file_path = os.path.join(dirPath, i)
print(file_path)
self.delete_logs(file_path)
def delete_logs(self, file_path):
try:
os.remove(file_path)
except PermissionError as e:
Log().warning('删除日志文件失败:{}'.format(e))
def __console(self, level, message):
# 创建一个FileHandler,用于写到本地
fh = RotatingFileHandler(filename=self.logName, mode='a', maxBytes=1024 * 1024 * 10, backupCount=10,
encoding='utf-8') # 使用RotatingFileHandler类,滚动备份日志
fh.suffix = "%Y%m%d.log"
fh.setLevel(logging.DEBUG)
fh.setFormatter(self.formatter)
self.logger.addHandler(fh)
# 创建一个StreamHandler,用于输出到控制台
ch = colorlog.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(self.formatter)
self.logger.addHandler(ch)
if level == 'info':
self.logger.info(message)
elif level == 'debug':
self.logger.debug(message)
elif level == 'warning':
self.logger.warning(message)
elif level == 'error':
self.logger.error(message)
# 这两行代码是为了避免日志输出重复问题
self.logger.removeHandler(ch)
self.logger.removeHandler(fh)
fh.close() # 关闭打开的文件
def debug(self, message):
self.__console('debug', message)
def info(self, message):
self.__console('info', message)
def warning(self, message):
self.__console('warning', message)
def error(self, message):
self.__console('error', message)
logger = Log()
if __name__ == "__main__":
log = Log()
log.debug("---测试开始----")
log.info("操作步骤")
log.warning("----测试结束----")
log.error("----测试错误----")
| [
"17610780919@163.com"
] | 17610780919@163.com |
859ca82edb05bc4fcce9dff212cb636c6f6b3e55 | e0568ef3af54278af2eff579c2c7d0920f8019e9 | /PDF Analytics/convert_pdf_to_text.py | 2cfaa14b92c1c9aee531998702a771f0f4396025 | [] | no_license | https-seyhan/TextMining | 57bde1b15a7ff31baf7e6d627efec3ee5185e962 | d2fd9fc3e3f01ede8c0e2289efb3028020c47609 | refs/heads/master | 2022-08-25T22:50:34.251152 | 2022-08-11T04:28:07 | 2022-08-11T04:28:07 | 178,507,673 | 3 | 0 | null | 2020-12-27T22:33:03 | 2019-03-30T03:42:09 | Python | UTF-8 | Python | false | false | 3,392 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: saul
"""
import os,time,PythonMagick,subprocess,shutil
import os
from PIL import Image, ImageChops
from optparse import OptionParser
def get_image_list_from_pdf(pdf_file):
#Return a list of images that resulted from running convert on a given pdf
pdf_name = pdf_file.split(os.sep)[-1].split('.pdf')[0]
print("PDF Name :", pdf_name)
#pdf_dir = pdf_file.split(pdf_name)[0]
jpg = pdf_file.split('.pdf')[0]+'.jpg'
# Convert the pdf file to jpg file
call_convert(pdf_file,jpg)
#Get all the jpg files after calling convert and store it in a list
image_list = []
file_list = os.listdir(pdf_dir)
for f in file_list:
if f[-4:]=='.jpg' and pdf_name in f:
#Make sure the file names of both pdf are not similar
image_list.append(f)
print('Total of %d jpgs produced after converting the pdf file: %s'%(len(image_list),pdf_file))
return image_list
def call_convert(src,dest):
#Call convert to convert pdf to jpg
print('About to call convert on %s'%src)
try:
subprocess.check_call(["convert",src,dest], shell=True)
except Exception as e:
print('Convert exception ... could be an ImageMagick bug')
print(e)
print('Finished calling convert on %s'%src)
def get_pdf_diff(pdf1, cleanup=True):
#Create a difference pdf by overlaying the two pdfs and generating an image difference.Returns True if the file matches else returns false
#Get the list of images using get_image_list_from_pdf which inturn calls convert on a given pdf
pdf1_list = get_image_list_from_pdf(pdf1)
#pdf2_list = self.get_image_list_from_pdf(self.pdf2)
#If diff directory already does exist - delete it
#Easier to simply nuke the folder and create it again than to check if its empty
# diff_image_dir = self.download_dir + os.sep+'diff_images
# if os.path.exists(diff_image_dir):
# print('diff_images directory exists ... about to nuke it')
# shutil.rmtree(diff_image_dir)
# #Create a new and empty diff directory
# os.mkdir(diff_image_dir)
# print('diff_images directory created')
# print('Total pages in pdf2: %d'%len(pdf2_list))
# print('Total pages in pdf1 : %d'%len(pdf1_list))
# #Verify that there are equal number pages in pdf1 and pdf2
# if len(pdf2_list)==len(pdf1_list) and len(pdf2_list) !=0:
# print('Check SUCCEEDED: There are an equal number of jpgs created from the pdf generated from pdf2 and pdf1')
# print('Total pages in images: %d'%len(pdf2_list))
# pdf1_list.sort()
# pdf2_list.sort()
# #Create the diffed images
# result_flag = self.create_diff_image(pdf1_list,pdf2_list,diff_image_dir)
# else:
# print('Check FAILED: There are an unequal number of jpgs created from the pdf generated from pdf2 and pdf1')
# print('Total pages in image2 : %d'%len(pdf2_list))
# print('Total pages in image1: %d'%len(pdf1_list))
# print('ERROR: Skipping image comparison between %s and %s'%(self.pdf1,self.pdf2))
# if cleanup:
# #Delete all the image files created
# self.cleanup(diff_image_dir,pdf1_list,pdf2_list)
#
# return result_flag
if __name__== '__main__':
get_pdf_diff('RCD.0001.0075.0002.pdf')
#get_image_list_from_pdf('RCD.0001.0075.0002.pdf')
| [
"noreply@github.com"
] | https-seyhan.noreply@github.com |
67f0f407c02ed6a600d887c4e946413b9dfb52b5 | dacc544088406d5c8f2b77e2c51a797094dd5b82 | /datasets/datafeeder_npy.py | 8afdc225185f13f8eb2600dfd4f7d8b26b677380 | [
"MIT",
"CC-BY-3.0",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] | permissive | Jim-Song/tacotron_multispeaker | 4edab891b56708696694df9a79736dee627e9e90 | fb159e162c3fcfaf4156bd8579655c590d0b6617 | refs/heads/master | 2020-03-25T23:26:49.052698 | 2018-08-27T10:54:06 | 2018-08-27T10:54:06 | 144,275,196 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,323 | py | import numpy as np
import os, json
import random
import tensorflow as tf
import threading
import time
import traceback
from text import cmudict, text_to_sequence, text_to_sequence2
from util.infolog import log
_batches_per_group = 20
_pad = 0
class DataFeeder(threading.Thread):
'''Feeds batches of data into a queue on a background thread.'''
def __init__(self, hparams, file_list, coordinator):
super(DataFeeder, self).__init__()
self._hparams = hparams
self._cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
self._offset = 0
self._metadata = []
self._coord = coordinator
self._p_phone_sub = 0.5
# Load metadata:
id_num = 0
for file in file_list:
with open(file, encoding='utf-8') as f:
id_num_crrt = 0
crrt_metadata = []
for line in f:
line = eval(line)
if line[4] > id_num_crrt:
id_num_crrt = line[4]
line[4] = line[4] + id_num
crrt_metadata.append(line)
id_num += id_num_crrt + 1
self._metadata = self._metadata + crrt_metadata
log('No. %d of samples from %s' % (len(crrt_metadata), file))
random.shuffle(self._metadata)
# Create placeholders for inputs and targets. Don't specify batch size because we want to
# be able to feed different sized batches at eval time.
self._placeholders = [
tf.placeholder(tf.int32, [None, None], 'inputs'),
tf.placeholder(tf.int32, [None], 'input_lengths'),
tf.placeholder(tf.float32, [None, None, hparams.num_mels], 'mel_targets'),
tf.placeholder(tf.float32, [None, None, hparams.num_freq], 'linear_targets'),
tf.placeholder(tf.float32, [None, None], 'wavs'),
tf.placeholder(tf.int32, [None], 'identities'),
]
# Create queue for buffering data:
queue = tf.FIFOQueue(8, [tf.int32, tf.int32, tf.float32, tf.float32, tf.float32, tf.int32], name='input_queue')
self._enqueue_op = queue.enqueue(self._placeholders)
self.inputs, self.input_lengths, self.mel_targets, self.linear_targets, self.wavs, self.identities = queue.dequeue()
self.inputs.set_shape(self._placeholders[0].shape)
self.input_lengths.set_shape(self._placeholders[1].shape)
self.mel_targets.set_shape(self._placeholders[2].shape)
self.linear_targets.set_shape(self._placeholders[3].shape)
self.wavs.set_shape(self._placeholders[4].shape)
self.identities.set_shape(self._placeholders[5].shape)
# Load phone dict: If enabled, this will randomly substitute some words in the training data with
# their ARPABet equivalents, which will allow you to also pass ARPABet to the model for
# synthesis (useful for proper nouns, etc.)
if hparams.per_cen_phone_input:
char_2_phone_dict_path = './datasets/char_2_phone_dict.json'
if not os.path.isfile(char_2_phone_dict_path):
raise Exception('no char_2_phone dict found')
with open(char_2_phone_dict_path, 'r') as f:
self._phone_dict = json.load(f)
log('Loaded characters to phones dict from %s' % char_2_phone_dict_path)
else:
self._phone_dict = None
def start_in_session(self, session):
self._session = session
self.start()
def run(self):
try:
while not self._coord.should_stop():
self._enqueue_next_group()
except Exception as e:
traceback.print_exc()
self._coord.request_stop(e)
def _enqueue_next_group(self):
start = time.time()
# Read a group of examples:
n = self._hparams.batch_size
r = self._hparams.outputs_per_step
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples based on similar output sequence length for efficiency:
examples.sort(key=lambda x: x[-3])
batches = [examples[i:i+n] for i in range(0, len(examples), n)]
random.shuffle(batches)
log('Generated %d batches of size %d in %.03f sec' % (len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, _prepare_batch(batch, r)))
self._session.run(self._enqueue_op, feed_dict=feed_dict)
def _get_next_example(self):
'''Loads a single example (input, mel_target, linear_target, cost) from disk'''
if self._offset >= len(self._metadata):
self._offset = 0
random.shuffle(self._metadata)
meta = self._metadata[self._offset]
# meta = ['/ssd1/npy_tacotron_THCHS_id_num_60/THCHS-spec-00151.npy',
# '/ssd1/npy_tacotron_THCHS_id_num_60/THCHS-mel-00151.npy',
# '/ssd1/npy_tacotron_THCHS_id_num_60/THCHS-wav-00151.npy',
# '职工 们 爱 厂 爱岗 爱 产品 心 往 一处 想 劲儿 往 一处 使',
# 25]
self._offset += 1
#preprocess text
text = meta[3]
if self._phone_dict :
self._p_phone_sub = random.random() - 0.5 + (self._hparams.per_cen_phone_input * 2 -0.5)
text2 = ''
for word in text.split(' '):
exist_alpha = False
for item in word:
if is_alphabet(item):
exist_alpha = True
break
phone = self._maybe_get_arpabet(word)
if not text2 and exist_alpha:
text2 = text2 + ' '
text2 += phone
text = text2
input_data = np.asarray(text_to_sequence2(text, self._cleaner_names), dtype=np.int32)
#preprocess other inputs
linear_target = np.load(meta[0])
mel_target = np.load(meta[1])
wav = np.load(meta[2])
identity = meta[4]
return (input_data, mel_target, linear_target, len(input_data), wav, identity)
def _maybe_get_arpabet(self, word):
try:
phone = self._phone_dict[word]
phone = ' '.join(phone)
except:
phone = None
#log('%s is not found in the char 2 phone dict' % word)
return '{%s}' % phone if phone is not None and random.random() < self._p_phone_sub else word
def _prepare_batch(batch, outputs_per_step):
random.shuffle(batch)
inputs = _prepare_inputs([x[0] for x in batch])
input_lengths = np.asarray([len(x[0]) for x in batch], dtype=np.int32)
mel_targets = _prepare_targets([x[1] for x in batch], outputs_per_step)
linear_targets = _prepare_targets([x[2] for x in batch], outputs_per_step)
wavs = _prepare_inputs([x[4] for x in batch])
identities = np.asarray([x[5] for x in batch], dtype=np.int32)
return (inputs, input_lengths, mel_targets, linear_targets, wavs, identities)
def _prepare_inputs(inputs):
max_len = max((len(x) for x in inputs))
return np.stack([_pad_input(x, max_len) for x in inputs])
def _prepare_targets(targets, alignment):
max_len = max((len(t) for t in targets)) + 1
return np.stack([_pad_target(t, _round_up(max_len, alignment)) for t in targets])
def _pad_input(x, length):
return np.pad(x, (0, length - x.shape[0]), mode='constant', constant_values=_pad)
def _pad_target(t, length):
return np.pad(t, [(0, length - t.shape[0]), (0,0)], mode='constant', constant_values=_pad)
def _round_up(x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
def is_chinese(uchar):
"""判断一个unicode是否是汉字"""
if uchar >= u'\u4e00' and uchar<=u'\u9fa5':
return True
else:
return False
def is_number(uchar):
"""判断一个unicode是否是数字"""
if uchar >= u'\u0030' and uchar<=u'\u0039':
return True
else:
return False
def is_alphabet(uchar):
"""判断一个unicode是否是英文字母"""
if (uchar >= u'\u0041' and uchar<=u'\u005a') or (uchar >= u'\u0061' and uchar<=u'\u007a'):
return True
else:
return False
| [
"sjmdhr@163.com"
] | sjmdhr@163.com |
06ea440e25ecbce5cdc3953966dabeb2f5c5c509 | 31f29b89ceeb60e4d377d2409b2a562798dfd931 | /permits/migrations/0020_auto_20180722_2339.py | 5ab7701d6491925fbf71c050453ae927befc37f7 | [] | no_license | skreimeyer/publicworkspermits | 1a35cfd9480e04c6c2797398d22d6e680a034559 | ce630d74b8541f462a826893f6844b5549252b8e | refs/heads/master | 2020-03-28T15:02:34.071263 | 2018-09-13T18:11:56 | 2018-09-13T18:11:56 | 148,549,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | # Generated by Django 2.0.5 on 2018-07-22 23:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('permits', '0019_auto_20180722_2328'),
]
operations = [
migrations.AlterField(
model_name='projectinformation',
name='zoning',
field=models.CharField(choices=[('O-3', 'O-3 - general office district'), ('DOD', 'DOD - design overlay district'), ('MF-6', 'MF-6 - multifamily district'), ('R-4', 'R-4 - two-family district'), ('I-1', 'I-1 - industrial park district'), ('FP', 'FP - floodplain district'), ('I-3', 'I-3 - heavy industrial district'), ('R-1', 'R-1 - single-family district'), ('MF-12', 'MF-12 - multifamily district'), ('C-1', 'C-1 - neighborhood commercial district'), ('I-2', 'I-2 - light industrial district'), ('R-7A', 'R-7A - manufactured home district'), ('PR', 'PR - Park and recreation district'), ('UU', 'UU - urban use district.'), ('OS', 'OS - open space district'), ('PID', 'PID - planned industrial district'), ('PCD', 'PCD - planned commercial district'), ('O-1', 'O-1 - quiet office district'), ('MF-24', 'MF-24 - multifamily district'), ('C-3', 'C-3 - general commercial district'), ('R-4A', 'R-4A - low density residential district'), ('O-2', 'O-2 - office and institutional district'), ('PRD', 'PRD - planned residential district'), ('M', 'M - mining district'), ('AF', 'AF - agriculture and forestry district'), ('POD', 'POD - planned office district'), ('R-2', 'R-2 - single-family district'), ('MF-18', 'MF-18 - multifamily district'), ('C-2', 'C-2 - shopping center district'), ('R-7', 'R-7 - manufactured home park district'), ('R-6', 'R-6 - high-rise apartment district'), ('PD', 'PD - industrial district'), ('R-5', 'R-5 - urban residence district'), ('C-4', 'C-4 - open display commercial district'), ('R-3', 'R-3 - single-family district')], max_length=20),
),
migrations.AlterField(
model_name='reviewcomment',
name='acknowledge',
field=models.BooleanField(default=False, help_text='Applicant must acknowledge comment'),
),
]
| [
"samuel.kreimeyer@gmail.com"
] | samuel.kreimeyer@gmail.com |
76f0db3ff3eb3950c75953ea5619bbcd4e1ee88c | 113bfeda578324908963307670718c5545f30e8b | /booksite/booksite/book/migrations/0011_auto_20171205_1611.py | c4e55389b90cee0a19f1960233318db14ed070c8 | [
"Apache-2.0"
] | permissive | tkliuxing/bookspider | f0989814716e38fa081cc300f92fc975ff8ac67d | bc7ba487f0ab6ea7782f5093bb1d074eac662bdf | refs/heads/master | 2021-01-18T23:31:26.566892 | 2020-03-14T04:04:48 | 2020-03-14T04:04:48 | 21,845,464 | 40 | 36 | null | 2015-11-06T03:58:04 | 2014-07-15T03:51:01 | CSS | UTF-8 | Python | false | false | 426 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-12-05 08:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('book', '0010_auto_20171205_1436'),
]
operations = [
migrations.AlterUniqueTogether(
name='bookpage',
unique_together=set([('page_number', 'site')]),
),
]
| [
"ouyanghongyu@gmail.com"
] | ouyanghongyu@gmail.com |
a856babe3469acfa7981edd09b94fb9d1eb0b028 | bb932c6ed17c14272a3b287acf1d6547aa2db09c | /clients/tcp_server_gui.py | dbe9c0ccca477ebdb6134733a5888320689e2d52 | [
"BSD-3-Clause"
] | permissive | bacetech/zen_controller | 44603935981497a748266ffe0660e208b6b6a98c | c463db72e592edc3dc0bc1146a6b73b4238bc9a1 | refs/heads/master | 2021-01-13T05:16:55.215333 | 2017-03-29T01:41:05 | 2017-03-29T01:41:05 | 86,523,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,074 | py | #!/usr/bin/env python
"""
Copyright (c) 2012, Bruce A. Corliss
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the BACETech Consulting nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Bruce A. Corliss BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import wx
import os
import shutil
import socket
import sys
import time
import socket
import atexit
import threading
# Global Variables
tcp_socket = None
tcp_conn = None
listen_thread = None
listen_thread_pid = None
# Parameters
ID_START = wx.NewId()
ID_STOP = wx.NewId()
# Define notification event for thread completion
EVT_RESULT_ID = wx.NewId()
CMD_DELIM = ';'
MSG_DELIM = ';;'
BUFFER_SIZE = 1024
APP_W = 700
APP_H = 350
PAD=10
class TextFrame(wx.Frame):
def __init__(self):
""" Initialize tcp server gui."""
wx.Frame.__init__(self, None, -1, 'Zen Controller Debug Server', size=(APP_W , APP_H))
# Add panel
self.panel = wx.Panel(self, wx.ID_ANY)
# TCP Connection Objects
self.hostAddress_text = wx.StaticText(self.panel, -1, "IP Address", pos=(10,10))
self.hostAddress_edit = wx.TextCtrl(self.panel, -1, "127.0.0.1", pos=(100, 10), size=(75, 15))
self.hostPort_text = wx.StaticText(self.panel, -1, "Port", pos=(10, 25), size=(20,20))
self.hostPort_edit = wx.TextCtrl(self.panel, -1, "22500", pos=(100, 25), size=(75, 15))
self.startserver_toggle = wx.ToggleButton(self.panel, -1, "Start Server", pos=(200, 8), size=(100,35))
# Command input
self.output_text = wx.StaticText(self.panel, -1, "Output", pos=(10,50))
self.output_edit = wx.TextCtrl(self.panel, -1,"",size=(APP_W - 3*PAD, 200),
style=wx.TE_MULTILINE, pos=(PAD,70))
self.output_edit.SetEditable(False)
# Callbacks
self.startserver_toggle.Bind(wx.EVT_TOGGLEBUTTON, self.StartServer_Callback, self.startserver_toggle)
# Recieve timer
self.recieve_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.TcpAcceptConnection, self.recieve_timer)
def StartServer_Callback(self, event):
""" Starts or stops acting as tcp server."""
global listen_thread
global listen_thread_pid
global tcp_socket
global tcp_conn
if self.startserver_toggle.GetValue():
self.LogThis("Starting server...")
self.startserver_toggle.SetLabel("Stop Server")
self.TcpServerConnect()
# Start new thread for listening
listen_thread = threading.Thread(target=self.TcpAcceptConnection)
listen_thread.setDaemon(True)
listen_thread.start()
else:
self.LogThis("Stopping server...")
self.startserver_toggle.SetLabel("Start Server")
# Close tcp connection if it exists
if tcp_conn is not None: tcp_conn.close()
if tcp_socket is not None: tcp_socket.close()
# Terminate listen thread if it exists
if listen_thread is not None and listen_thread.isAlive():
self.LogThis("Killing listen_thread: {0}".format(listen_thread_pid))
os.popen("kill -9 " + str(listen_thread_pid))
try: listen_thread._Thread__stop()
except: self.LogThis('Listen thread could not be terminated')
tcp_conn = None
tcp_socket = None
def TcpServerConnect(self):
""" Initialize tcp connection for server"""
global tcp_socket
global tcp_conn
# Initialize tcp socket
tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.LogThis("Binding and listening: " + self.hostAddress_edit.GetLabel() +
": " + self.hostPort_edit.GetLabel())
tcp_socket.bind((self.hostAddress_edit.GetLabel(), int(self.hostPort_edit.GetLabel())))
tcp_socket.listen(1)
def TcpAcceptConnection(self):
""" Monitors connection, collects message in buffer until message sent, responds, repeats."""
global tcp_socket
global tcp_conn
global listen_thread_pid
# Get PID
listen_thread_pid = os.getpid()
self.LogThis("Waiting for client connection...")
tcp_conn, addr = tcp_socket.accept()
self.LogThis("Client address: " + "".join(str(addr)))
while True:
msg_buffer = ''
while True:
if not self.startserver_toggle.GetValue(): return
wx.Yield()
self.LogThis("Waiting for client message...")
msg_buffer += tcp_conn.recv(BUFFER_SIZE)
if msg_buffer.find(MSG_DELIM) >= 0:
self.LogThis("Client:\t " + msg_buffer)
# Send RECIEVE
self.LogThis("Server:\t " + "RECIEVED" + MSG_DELIM)
tcp_conn.sendall("RECIEVED" + MSG_DELIM)
# Send DONE to terminate message group
self.LogThis("Server:\t " + "DONE" + MSG_DELIM)
tcp_conn.sendall("DONE" + MSG_DELIM)
break
time.sleep(.5)
def LogThis(self, output_str):
print output_str
self.output_edit.AppendText("\n" + output_str)
self.output_edit.ShowPosition(self.output_edit.GetLastPosition())
self.output_edit.Refresh()
def main():
app = wx.PySimpleApp()
frame = TextFrame()
frame.Show()
app.MainLoop()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | bacetech.noreply@github.com |
f1f795783eacbc2484ff019eda83c8c8f0b4877d | f0a4a8b8db004f9284b69d2bad5ddcbcb3258ddc | /src/motor.py | 7c6679b006f32dcbc823801b9ad5dcc703047d1a | [] | no_license | chhuang215/GreenPanel | 3e3e07bf2724b03baf0f807e59951e9a66024769 | 366bcaa1e3baf049acee1aa7f7f165baba7c2f92 | refs/heads/master | 2021-03-13T04:17:01.904054 | 2020-04-01T15:53:31 | 2020-04-01T15:53:31 | 91,508,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,845 | py | import datetime
import threading
import RPi.GPIO as GPIO
from controller import GPIOController
class Motor:
'''Motor class'''
PWM_FREQ = 75
PWM_DC = 75
PWM_DC_FAST = 75
DIR_CW = RIGHT = 1
DIR_CCW = LEFT = 2
def __init__(self, inp1, inp2, inppwm, enable_timer=True):
self.pin = (inp1, inp2, inppwm)
self.inp1 = inp1
self.inp2 = inp2
self.pwm = GPIO.PWM(inppwm, self.PWM_FREQ)
self.rotating = False
self.timer = MotorRotateTimer(self)
self.pwm.start(0)
if enable_timer:
self.timer.enabled = True
def manual_rotate(self, direction):
#lid = GPIOController.get_component(GPIOController.PIN.PUSH_BUTTON)
# if lid.status == lid.OPENED:
self.rotate(direction=direction, dutycycle=Motor.PWM_DC_FAST)
def manual_stop(self):
#lid = GPIOController.get_component(GPIOController.PIN.PUSH_BUTTON)
#if lid.status == lid.OPENED:
self.stop()
def rotate(self, direction=RIGHT, dutycycle=PWM_DC):
self.pwm.ChangeDutyCycle(dutycycle)
if(direction == Motor.RIGHT):
GPIO.output(self.inp1, GPIO.HIGH)
GPIO.output(self.inp2, GPIO.LOW)
else:
GPIO.output(self.inp1, GPIO.LOW)
GPIO.output(self.inp2, GPIO.HIGH)
def stop(self):
self.pwm.ChangeDutyCycle(0)
GPIO.output(self.inp1, GPIO.LOW)
GPIO.output(self.inp2, GPIO.LOW)
class MotorRotateTimer:
def __init__(self, motor):
self._timer = None
self.motor = motor
self.is_activated = False
self.enabled = False
# def check_timer(self):
# curr_dt = datetime.datetime.now()
# print("!MOTOR Check_Timer %s %s" % (self.motor.__class__.__name__, curr_dt), end='')
# minute = curr_dt.minute
# hour = curr_dt.hour
# #if minute % 30 >= 0 and minute % 30 < 15:
# if hour >= 7 and hour < 24 and (minute >= 0 and minute < 5):
# else:
def __check_timer_loop(self):
if not self.is_activated:
return
# self.check_timer()
now = datetime.datetime.now()
print("!MOTOR Check_Timer %s %s" % (self.motor.__class__.__name__, now), end='')
next_check_time = None
hour = now.hour
minute = now.minute
if hour >= 0 and hour < 7:
self.motor.stop()
print(" ! MOTOR_STOPPED")
next_check_time = now.replace(hour=7, minute=0, second=0, microsecond=0)
elif minute >= 0 and minute < 5:
self.motor.rotate(direction=Motor.DIR_CCW)
print(" ! MOTOR_ROTATING")
next_check_time = now.replace(minute=5, second=0, microsecond=0)
# tt = 5 - (now.minute % 5)
# next_check_time += datetime.timedelta(minutes=tt)
else:
self.motor.stop()
print(" ! MOTOR_STOPPED")
next_check_time = now.replace(minute=0, second=0, microsecond=0)
next_check_time += datetime.timedelta(hours=1)
interval = next_check_time - now
print("MOTOR next check time", next_check_time)
self._timer = threading.Timer(interval.total_seconds(), self.__check_timer_loop)
self._timer.start()
def activate(self):
if not self.is_activated and self.enabled:
print("MOTOR TIMER ACTIVATED", datetime.datetime.now())
### Activate timerv ###
self.is_activated = True
self.__check_timer_loop()
#######################
def deactivate(self):
if self.enabled and self._timer is not None:
print("MOTOR TIMER DEACTIVATED", datetime.datetime.now())
self._timer.cancel()
self.is_activated = False
| [
"chhuang215@gmail.com"
] | chhuang215@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.