blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a3dafb3d4576186964f7d3265b17eb05cf0d5f78 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/359/usersdata/282/109815/submittedfiles/lecker.py | ac07eef3ffae2806101fd83deb886992b12a8634 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # -*- coding: utf-8 -*-
c=int(input('Digite o número de consultas: '))
pedidos=[]
fabricados=[]
for i in range (0,c,1):
pedidos.append(int(input('Digite o tamanho do taco: ')))
for i in range(0,c,1):
if pedidos[1] not in fabricados:
fabricados.append(pedidos[i])
fabricados.append(pedidos[i])
print(len(fabricados))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e0719050ff49a3b843702d0edb828058ac1fa8b4 | 72dc9db1fa6272b1148f90e19d88799962075275 | /azureEventHubRSS.py | 19ef668a924935a80c5e6ae8c64a0a6fbd44ba67 | [] | no_license | Gyt94/PPD1516-CloudComputing | 73e33bd395dcdb552da16ffce6363fcd201f9955 | 1acb14d315703fe98c05371df10103dde78f65dc | refs/heads/master | 2021-01-18T22:40:21.424336 | 2016-06-04T10:53:43 | 2016-06-04T10:53:43 | 49,195,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,196 | py |
from azure.servicebus import ServiceBusService
import json,codecs
import feedparser
import time
import config
cpt = 0
sbs = ServiceBusService(service_namespace=config.servns,
shared_access_key_name=config.key_name,
shared_access_key_value=config.key_value) # Create a ServiceBus Service Object
temps=time.time()
chars_to_remove = ['\'']
europe = feedparser.parse('http://www.europe1.fr/var/export/rss/europe1/actus.xml')
dernE = europe.entries[0]
jason = "{'source':'europe1',\'title\':\'"+unicode(dernE.title)+"\','text':'"+unicode(dernE.description)+"'}"
#print(unicode(jason))
sbs.send_event('iot', jason.encode('cp850', errors='replace'))
france24 = feedparser.parse('http://www.france24.com/fr/france/rss')
#if france24.entries
dernLM= france24.entries[0]
jasonLM = "{'source':'france24',\'title\':\'"+unicode(dernLM.title)+"\','text':'"+unicode(dernLM.description)+"'}"
#print(unicode(jasonLM))
sbs.send_event('iot', jasonLM.encode('cp850', errors='replace'))
while True:
europe = feedparser.parse('http://www.europe1.fr/var/export/rss/europe1/actus.xml')
france24 = feedparser.parse('http://www.france24.com/fr/france/rss')
if dernE != europe.entries[0]:
dernE = europe.entries[0]
jason = "{'source':'europe1',\'title\':\'"+unicode(dernE.title)+"\','text':'"+unicode(dernE.description)+"'}"
print(jason.encode('cp850', errors='replace'))
sbs.send_event('iot', jason.encode('cp850', errors='replace'))
if dernLM != france24.entries[0]:
dernLM = france24.entries[0]
#jasonLM = "{'source':'france24',\'title\':\'"+dernLM.title.replace('\'','')+"\','text':'"+dernLM.description.replace('\'','')+"'}"
jasonLM = "{'source':'france24',\'title\':\'"+unicode(dernLM.title)+"\','text':'"+unicode(dernLM.description)+"'}"
print(jasonLM.encode('cp850', errors='replace'))
sbs.send_event('iot', jasonLM.encode('cp850', errors='replace'))
cpt=cpt+1
print("boucle")
time.sleep(30)
#from time import sleep
#for e in o :
# print(json.dumps(e))
# cpt = cpt + 1
# sbs.send_event('iot', json.dumps(e))
# sleep(1)
#print(cpt)
#print("Finished")
# | [
"gygy.tho@gmail.com"
] | gygy.tho@gmail.com |
f05442e9741b182f1e7c3ffa18d1cc16ef6bc203 | 7b1c5c1236caa3a79f6c7834a217d952d41d9347 | /portfolioWebsite/urls.py | f8537ba9a97684c9ad4ef837501d0191cdabaaab | [] | no_license | WallerTheDeveloper/portfolio_website | b00002387aef9aa3b4327552b3d7fce8f4187646 | a4e17b9c278a1c118ea40061e7a35cdf6fd77bf2 | refs/heads/main | 2023-08-29T08:44:10.643796 | 2021-10-16T19:53:39 | 2021-10-16T19:53:39 | 417,798,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | """portfolioWebsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("", include("main_app.urls")),
path('admin/', admin.site.urls),
]
| [
"golo7ov.danil@gmail.com"
] | golo7ov.danil@gmail.com |
b41aa3d3259e4334540595adda736be80537b7a5 | 9dc178afac0e82800e2f8466a3d9850339db3a59 | /Assignment 4/settings.py | 7b0142a9ea0fa0b4adc242902946241e632487b3 | [] | no_license | katherinevelasco/SoftwareDesign | 3d9d14686e1dd047432846a6a609a88789f84da2 | 50531c6826aeb7b4b30e72dd7d956dfab89ecb55 | refs/heads/master | 2022-12-01T21:03:53.831025 | 2020-08-03T07:19:00 | 2020-08-03T07:19:00 | 273,292,546 | 2 | 0 | null | 2020-06-18T17:00:03 | 2020-06-18T16:50:51 | null | UTF-8 | Python | false | false | 3,185 | py | """
Django settings for djangoProject project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^cg22)=yc@8_3$v_i3xr5u5k#@7vb6$_f#s9sm_gp(4-lav*a@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'accounts',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/' | [
"noreply@github.com"
] | noreply@github.com |
116c698dc0441a0dfb1b2be68349f103920bfe2f | 35a237030be25c38932368f6914db78acef7158e | /python_zipfile.py | 46043e411c8f425f4c5f31eb4590541934f5d12e | [] | no_license | Tanvir-Chowdhury/Python-Modules | 39206e2e5a541ecd69e208c6530cd22eed3d9f04 | 1523a82b8ad739120c6c5c98f01f775c8ababaae | refs/heads/master | 2023-06-23T21:57:50.233719 | 2021-07-28T12:16:55 | 2021-07-28T12:16:55 | 271,040,510 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | from zipfile import ZipFile
import os
def get_all_file_paths(directory):
file_paths = []
for root, _, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return file_paths
def main():
filepaths = get_all_file_paths("/home/codinxter/Downloads/")
print("following files will be ziped")
for filenames in filepaths:
print(filenames)
with ZipFile("my_python_zip.zip", "w") as zip:
for filenames in filepaths:
zip.write(filenames)
if __name__ == "__main__":
main()
from zipfile import ZipFile
with ZipFile("my_python_zip.zip", "r") as zip:
zip.printdir()
zip.extractall()
| [
"tanvirvlogger@gmail.com"
] | tanvirvlogger@gmail.com |
edf5e83b359ed5b8efb8591884d8569faa659898 | 17baf167558456f2aaa702abdd3e95d33e1e1cd8 | /gics/gics/wsgi.py | 4b05ddba7d0a473c34e6281ef553af38168f227f | [] | no_license | jilljenn/gics.fr | e746d0ba901d5d59b8942f8d869a330b957c455c | 8a6a850a4011b88bf93b7776e97465c3554ea608 | refs/heads/master | 2020-04-06T04:13:54.883582 | 2017-02-24T10:03:46 | 2017-02-24T10:03:46 | 83,024,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for gics project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gics.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"vie@jill-jenn.net"
] | vie@jill-jenn.net |
766956206154a35eeb62808af4bc7e50542f7f8a | 1805a5bb1eb1256da8359f7ace546b9ebe29f293 | /games/tables.py | fbb153f96d29046e5af5472bec42ca8d8f82d62a | [] | no_license | loztop/oxaside | 57e48e45360dd80ba9315931334a78bc3d9ca80f | 9efca323820fbbc5ec69528bd14d37f9742ad028 | refs/heads/master | 2022-11-28T16:59:35.941956 | 2015-01-27T15:32:49 | 2015-01-27T15:32:49 | 29,323,550 | 0 | 0 | null | 2022-11-22T00:24:35 | 2015-01-15T23:33:52 | JavaScript | UTF-8 | Python | false | false | 2,332 | py | import django_tables2 as tables
from games.models import Game
from django.utils.safestring import mark_safe
from django.utils.html import escape
class DeleteColumn(tables.Column):
empty_values = list()
def render(self, value, record):
return mark_safe('<button id="%s" class="btn btn-info">Delete</button>' % escape(record.id))
class UpdateColumn(tables.Column):
empty_values = list()
def render(self, value, record):
return mark_safe('<button id="%s" class="btn btn-info">Update</button>' % escape(record.id))
class UpdateTable(tables.Table):
delete = DeleteColumn()
# update = UpdateColumn()
#user = tables.Column(verbose_name="User")
game_text = tables.Column(verbose_name="Details")
contact_text = tables.Column(verbose_name="Contact")
location_text = tables.Column(verbose_name="Location")
players_needed = tables.Column(verbose_name="Spaces")
kickoff_date = tables.Column(verbose_name="Kickoff")
class Meta:
model = Game
# add class="paleblue" to <table> tag
attrs = {'class': 'paleblue'}
fields = ('game_text','contact_text','location_text','kickoff_date','players_needed',)
class GameTable(tables.Table):
#user = tables.Column(verbose_name="User")
game_text = tables.Column(verbose_name="Details")
contact_text = tables.Column(verbose_name="Contact")
location_text = tables.Column(verbose_name="Location")
players_needed = tables.Column(verbose_name="Spaces")
kickoff_date = tables.Column(verbose_name="Kickoff")
class Meta:
model = Game
# add class="paleblue" to <table> tag
attrs = {'class': 'paleblue'}
fields = ('game_text','contact_text','location_text','kickoff_date','players_needed',)
#class UserTable(tables.Table):
# game_text = tables.Column(verbose_name="Details")
# contact_text = tables.Column(verbose_name="Contact")
# location_text = tables.Column(verbose_name="Location")
# players_needed = tables.Column(verbose_name="Spaces",accessor=4)
# kickoff_date = tables.Column(verbose_name="Kickoff")
# class Meta:
# model = Game
# # add class="paleblue" to <table> tag
# attrs = {'class': 'paleblue'}
# fields = ('game_text','contact_text','location_text','kickoff_date','players_needed',) | [
"lorenzuberger@gmail.com"
] | lorenzuberger@gmail.com |
d4ef79f1d42135b241425cfb23eada729d85805d | 420f974d85376031e66bb7241caedee1675b93ec | /init.py | a071836a49381b59b0ae48ee879ae0dacc8fbade | [] | no_license | uiandwe/chatting | 060c8b513ecd53db9519c97f99198c09cc918e0a | e8430cf4db173d44ee37601b96a8028271000cd1 | refs/heads/master | 2020-04-01T23:33:02.324646 | 2016-06-29T02:26:53 | 2016-06-29T02:26:53 | 62,188,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | __author__ = 'hyeonsj'
# db
host = '127.0.0.1'
user = 'root'
passwd = 'spoqa'
db = 'spoqa'
charset = 'utf8'
# logging level
# debug 10
# warning 30
# error 40
log_level = 10 | [
"uiandwe@gmail.com"
] | uiandwe@gmail.com |
15a008b1080fa777b4e6d8d5a5e00ca0b967ea59 | 3a351e36919aa20e833e26543f25b9e47761d42e | /filter.py | 940b83fab992d5ebdf1fd9b70eda2f67fa8e09e3 | [] | no_license | 047/pyspark_exercise | 56d4fb57e7d0817ccd84e39364bb4ae487b3d7b4 | 7d96d3182f397b8f6f51294ea43f6e6a2bdd6649 | refs/heads/master | 2023-04-23T23:21:08.523092 | 2021-05-17T18:34:41 | 2021-05-17T18:34:41 | 368,286,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | import sys
from os.path import exists
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
def filter_clients(personal_data_path, financial_data_path, countries_of_interest):
spark = SparkSession.builder.getOrCreate()
spark_context = spark.sparkContext
log4jLogger = spark_context._jvm.org.apache.log4j
LOGGER = log4jLogger.LogManager.getLogger(__name__)
LOGGER.warn("Transforming personal data")
personalDF = spark.read.csv(personal_data_path, header=True)
countries_of_interest = set(countries_of_interest)
personalDF = personalDF.where(col('country').isin(countries_of_interest))
personalDF = personalDF.select("id", "email")
LOGGER.warn("Transforming financial data")
financialDF = spark.read.csv(financial_data_path, header=True)
financialDF = financialDF.select("id", "btc_a", "cc_t")
LOGGER.warn("Join personal and financial data")
emails_and_details = personalDF.join(financialDF, on='id')
LOGGER.info("Rename result columns")
emails_and_details = emails_and_details.\
withColumnRenamed('id', 'client_identifier').\
withColumnRenamed('btc_a', 'bitcoin_address').\
withColumnRenamed('cc_t', 'credit_card_type')
LOGGER.warn("Writing results")
emails_and_details.write.option("header", True).csv('client_data')
if __name__ == '__main__':
def is_csv_filename(fname):
return fname.endswith('.csv') and exists(fname)
if len(sys.argv) >= 4:
personal, financial, countries = sys.argv[1], sys.argv[2], sys.argv[3:]
if is_csv_filename(personal) and is_csv_filename(financial) and 1 <= len(countries) and all(isinstance(item, str) for item in countries):
filter_clients(personal, financial, countries)
exit(0)
print('Sorry, wrong arguments.\nUsage: '
f'python {__file__} path/to/presonal_data.csv path/to/financial_data.csv country_name_1 country_name_2 .. country_name_N')
| [
"m.d.volodin@gmail.com"
] | m.d.volodin@gmail.com |
75909244f23ef13c6850631c801a95fcc525f524 | e32ee307e4c59cc18f9dea18d797784a1b23148f | /calculate the number of local extrema in the given array..py | b2eb8e2bd69cb0f68b09931e45bd4707c0c00a29 | [] | no_license | GuhanSGCIT/SGCIT | f4ab44346186d45129c74cbad466c6614f9f0f08 | 8b2e5ccf693384aa22aa9d57f39b63e4659f6261 | refs/heads/master | 2020-07-11T05:47:54.033120 | 2020-07-07T05:02:41 | 2020-07-07T05:02:41 | 204,459,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | n = int(input())
l = [int(x) for x in input().split()]
count = 0
for i in range(1, n-1):
if (l[i]>l[i-1] and l[i]>l[i+1]) or (l[i]<l[i-1] and l[i]<l[i+1]):
count+=1
print(count)
| [
"noreply@github.com"
] | noreply@github.com |
7b13f2453af39f2d8ce8980fb548903267988fb9 | e47d5da2a947c3b3a834817d0b084ee65d302067 | /atcoder.jp/aising2020/aising2020_b/Main.py | 066248010306017828be4a1ada26949f6befc4c7 | [] | no_license | aki-nlp/AtCoder | 3293b9b183c0a8cefbf20d7f4f491c6f1e7604b8 | 9385805cbb1fa158f6d3c4a2415cdf7ba94547e5 | refs/heads/master | 2023-02-25T06:04:10.913237 | 2020-10-03T12:02:00 | 2020-10-03T12:02:00 | 296,792,313 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | def main():
n = int(input())
a = list(map(int, input().split()))
a = a[::2]
ans = 0
for aa in a:
if aa%2 == 1:
ans += 1
print(ans)
if __name__ == '__main__':
main() | [
"akiuo.ou@gmail.com"
] | akiuo.ou@gmail.com |
4fc2004df32c632fb5b93a61788feb353544192a | b65325d8381b2cd1d0306c441915b621aae3b372 | /day3.py | bd97d9daf51544f13b2644d0d904e237206b781c | [] | no_license | ljuba95/advent2016 | fe435007db41a4f191d4b3e118fc4aa7af71df4c | d2a50f7f1c3e62fceee4b936e6fbb2176c0ccd9c | refs/heads/master | 2021-01-22T23:06:53.826487 | 2017-03-25T18:19:08 | 2017-03-25T18:19:08 | 85,611,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
num = 0
with open("input/input3.txt") as f:
for line in [x.strip() for x in f.readlines()]:
[x, y, z] = sorted(map(int,line.split()))
if x + y > z: num+=1
print(num)
| [
"ljuba95@hotmail.com"
] | ljuba95@hotmail.com |
7138199d17ce5d21d5395a8ea2228f815ea2bb79 | 27acb207b21b4572561de4a5f7dfb9740318c0b8 | /Python-Data-Representations/Week1/Ex6_W1_substring.py | b5a1afe3b91a4d51ec0978800eac5b19ff906c2d | [] | no_license | iamieht/intro-scripting-in-python-specialization | ee836ef05b62f6c74fe8da3ee137687b4d0035cf | 8ea4f85f0ed3dcd541f89521c013335e9eb32980 | refs/heads/master | 2021-01-16T05:35:51.616276 | 2020-06-08T18:39:45 | 2020-06-08T18:39:45 | 242,993,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | """
Function that tests for substring
"""
def is_substring(example_string, test_string):
"""
Function that returns True if test_string
is a substring of example_string and False otherwise
"""
# enter one line of code for substring test here
return test_string in example_string
# Tests
example_string = "It's just a flesh wound."
print(is_substring(example_string, "just"))
print(is_substring(example_string, "flesh wound"))
print(is_substring(example_string, "piddog"))
print(is_substring(example_string, "it's"))
print(is_substring(example_string, "It's"))
# Output
#True
#True
#False
#False
#True | [
"iamieht@gmail.com"
] | iamieht@gmail.com |
42e3bf03fca2ccebb1cc469a3293a7cde0dec928 | 260611f7fa8743dd0080affbedf63d882f8c0f56 | /Maison.py | 23010504748f331049968ff49881b5bedc91fd72 | [] | no_license | servajon/jeu | 4b18cfacbaee4b5a7423f979df31777967418173 | d885254acce5eeacb7529bad509b162143050443 | refs/heads/master | 2023-03-01T10:22:57.605456 | 2021-02-11T16:31:18 | 2021-02-11T16:31:18 | 337,098,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | import pygame
class Maison(object):
def __init__(self, x, y, nom):
self.nom = nom
self.x = x
self.y = y
self.centrex = x
self.centrey = y
if self.nom == 'Artichaut':
self.sprit = pygame.image.load('sprit/assets-image-maison-Fruit+Légume/maison-artichaut_gamejam2021.png')
elif self.nom == 'Pastèque':
self.sprit = pygame.image.load('sprit/assets-image-maison-Fruit+Légume/maison_pasteque-gamejam2021.png')
elif self.nom == 'Grande Noix':
self.sprit = pygame.image.load('sprit/assets-image-maison-Fruit+Légume/maison-GrandeNoix_gamejam2021.png')
elif self.nom == 'Pomme Dorée':
self.sprit = pygame.image.load('sprit/assets-image-maison-Fruit+Légume/maison-goldenApple_gamejam2021.png')
elif self.nom == 'Avocat':
self.sprit = pygame.image.load('sprit/assets-image-maison-Fruit+Légume/maison-avocat_gamejam2021.png')
else:
self.sprit = pygame.image.load('sprit/assets-image-maison-Fruit+Légume/maison-patate_gamejam2021.png')
def get_x(self):
return self.x
def get_y(self):
return self.y
def get_centrex(self):
return self.centrex
def get_centrey(self):
return self.centrey
def get_nom(self):
return self.nom
def draw(self, win):
win.blit(self.sprit, (self.x, self.y))
def __str__(self):
return self.get_nom()
| [
"m.servajon@laposte.net"
] | m.servajon@laposte.net |
72ad00e39cc8e6c09b50e778412f8d9d2094a9e5 | 3996539eae965e8e3cf9bd194123989741825525 | /EventFilter/Utilities/rawStreamFileWriterForBU_cfi.py | 55b0b4128380e1fd75980e1887abc4c5ada3b947 | [] | no_license | cms-sw/cmssw-cfipython | 01990ea8fcb97a57f0b0cc44a8bf5cde59af2d98 | 25ee4c810103c4a507ca1b949109399a23a524c5 | refs/heads/CMSSW_11_2_X | 2023-09-01T16:56:00.658845 | 2022-06-20T22:49:19 | 2022-06-20T22:49:19 | 136,184,115 | 1 | 0 | null | 2022-10-19T14:04:01 | 2018-06-05T13:47:28 | Python | UTF-8 | Python | false | false | 291 | py | import FWCore.ParameterSet.Config as cms
rawStreamFileWriterForBU = cms.OutputModule('RawStreamFileWriterForBU',
source = cms.InputTag('rawDataCollector'),
numEventsPerFile = cms.uint32(100),
frdVersion = cms.uint32(6),
microSleep = cms.int32(0),
frdFileVersion = cms.uint32(0)
)
| [
"cmsbuild@cern.ch"
] | cmsbuild@cern.ch |
b7b69d6d417a01a9049cb34dc3f1738cb8619b6e | 70788b6851ca21e228d765b2f7e5e74cb3f885ca | /printfile.py | 7cf13a18cafc244bd557ea8c59c7bdb8ced7b5fd | [] | no_license | johnnysaldana/python_practice_exercises | 7e324acf85cc0b852fe0d17d17f7c69fdbb38846 | 2656ac679d80b6cf1774a706dda120d901cc6c14 | refs/heads/master | 2021-01-21T15:00:34.360038 | 2017-06-25T16:02:19 | 2017-06-25T16:02:19 | 95,369,769 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | # printfile.py
def printfile(fname):
with open(fname) as f:
print(f.read().split())
def main():
printfile("inp.cwl")
main()
| [
"noreply@github.com"
] | noreply@github.com |
ffbd97eec034ae214e9ce58a07ae52a18ef44d5b | 1f5b24ad2baaf0138d708fec9d8cde963e6dfd17 | /gorden_crawler/spiders/item_luisaviaroma.py | 0390d551050cdd7a09789afa8cfa61d6123fe6d9 | [
"Apache-2.0"
] | permissive | Enmming/gorden_cralwer | d4661f8cd31f88303fddb4405a8abe64db1b703c | 3c279e4f80eaf90f3f03acd31b75cf991952adee | refs/heads/master | 2020-03-19T21:45:54.559137 | 2018-06-11T16:24:29 | 2018-06-11T16:24:29 | 136,949,276 | 2 | 1 | null | 2018-06-11T16:04:35 | 2018-06-11T15:59:26 | null | UTF-8 | Python | false | false | 1,720 | py | # -*- coding: utf-8 -*-
from scrapy.spiders import Spider
from scrapy.selector import Selector
from gorden_crawler.items import BaseItem, ImageItem, SkuItem, Color
import scrapy
from scrapy import Request
from scrapy_redis.spiders import RedisSpider
from random import random
from urllib import quote
import re
import execjs
from gorden_crawler.spiders.shiji_base import ItemSpider
from gorden_crawler.spiders.luisaviaroma import LuisaviaromaSpider
class ItemLuisaviaromaSpider(ItemSpider):
name = "item_luisaviaroma"
allowed_domains = ["luisaviaroma.com"]
custom_settings = {
'DOWNLOAD_TIMEOUT': 30,
'COOKIES_ENABLED': True,
'DOWNLOADER_MIDDLEWARES': {
# 'gorden_crawler.middlewares.MyCustomDownloaderMiddleware': 543,'
'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None,
'gorden_crawler.contrib.downloadmiddleware.rotate_useragent.RotateUserAgentMiddleware': 1,
'gorden_crawler.middlewares.proxy_ats.ProxyHttpsMiddleware': 100,
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 110,
}
}
'''
正式运行的时候,start_urls为空,通过redis来喂养爬虫
'''
start_urls = (
)
base_url = 'https://www.luisaviaroma.com'
def make_requests_from_url(self, url):
return Request(url, dont_filter=True, cookies={'LVR_UserData': 'cty=US&curr=USD&vcurr=USD&lang=ZH&Ver=4 '})
'''具体的解析规则'''
def parse(self, response):
item = BaseItem()
item['type'] = 'base'
item['from_site'] = 'luisaviaroma'
item['url'] = response.url
return LuisaviaromaSpider().handle_parse_item(response, item)
| [
"em.yu@idiaoyan.com"
] | em.yu@idiaoyan.com |
b518623e6b6b8ea63ff29e7ca800daeb7394776f | 69d563773005cc3c8f62ffb5b967d7e4485d379d | /repositories/country_repository.py | dd507a3b3c4ce81d4934cabf69267af48b3e04e0 | [] | no_license | M4RC1N76/python_project | c7b5f36324ce9ae2d474f12f23c732346d8e9c08 | 2ca532ceea9e1111d96d43d63b4e521757a1be35 | refs/heads/main | 2023-07-31T22:21:19.917651 | 2021-09-24T09:58:35 | 2021-09-24T09:58:35 | 407,251,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | from models.city import City
from db.run_sql import run_sql
from models.country import Country
def save(country):
sql = "INSERT INTO countries (name, visited) VALUES (%s, %s) RETURNING *" # removed id from brackets
values = [country.name, country.visited] # country.id was wrong and removed
results = run_sql(sql, values)
id = results[0]['id']
country.id = id
return results
def select_all():
countries = []
sql = "SELECT * FROM countries"
results = run_sql(sql)
for row in results:
country = Country(row['name'], row['visited'], row['id'])
countries.append(country)
return countries
def select(id):
print(id)
country = None
sql = "SELECT * FROM countries WHERE id = %s"
value = [id]
result = run_sql(sql, value)[0] # ERROR
if result is not None:
country = Country(result['name'], result['visited'], result['id'])
return country
def delete_all():
sql = "DELETE FROM countries"
run_sql
def delete(id):
sql = "DELETE FROM countries WHERE id = %s"
values = [id]
run_sql(sql, values)
# ADD UPDATE method
def update(country):
sql = "UPDATE countries SET (name, visited) = (%s, %s) WHERE id = %s"
values = [country.name, country.visited, country.id]
run_sql(sql, values)
def cities(country):
cities = []
sql = "SELECT * FROM cities WHERE country_id = %s"
values = [country.id]
results = run_sql(sql ,values)
for row in results:
city = City(row['name'], row['visited'], row['id'])
cities.append(city)
return cities | [
"borowski1976@yahoo.com"
] | borowski1976@yahoo.com |
d470c0605d5fe7642c5fee077b40a812c7a198f4 | ba69d3462e6b6031a460052d930076536e8fb780 | /test/test_basic_func.py | f6e2454658c204c998f1b6f63b1eebe3b2fcbe0c | [] | no_license | rahanahu/py_unittest_beginner | 34120d9cd9618d188b711bc88f27c5fa5ba1a58e | 2bfb9500ff44cd8927f51e8b45190614f8947b64 | refs/heads/master | 2022-08-02T20:23:36.566776 | 2020-05-30T19:59:42 | 2020-05-30T19:59:42 | 268,143,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | import unittest
import sys
import os
TO_ROOT = '../'
HERE = os.path.dirname(__file__)
sys.path.append(os.path.join(HERE, TO_ROOT))
from source.basic_func import *
class TestBasicFunc(unittest.TestCase):
def test_add_num(self):
self.assertEqual(add_num(2,3),5)
pass
def test_sum_list(self):
self.assertEqual(sum_list([1,2,3,4,5]), 1+2+3+4+5)
if __name__ == "__main__":
pass | [
"ra87who@gmail.com"
] | ra87who@gmail.com |
ba1a284531e5e1f2b4e492eca0027f9a3e9bc9b6 | 102a33464fd3a16ceedd134e9c64fea554ca5273 | /apps/shop/forms.py | 22014c7b482f0b94dbeda97e4c41e71fdb9827e3 | [] | no_license | pythonguru101/django-ecommerce | b688bbe2b1a53c906aa80f86f764cf9787e6c2fe | f94de9c21223716db5ffcb86ba87219da88d2ff4 | refs/heads/master | 2020-07-24T14:57:02.047702 | 2020-06-10T06:06:23 | 2020-06-10T06:06:23 | 207,961,132 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | import re
from django import forms
from django.utils.translation import ugettext as _
from markdownx.widgets import MarkdownxWidget
from apps.shop.models import Product, ShippingType, Category
from .plugshop.forms import OrderForm as PlugshopOrderForm
class CategoryAdminForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
widgets = {
'short_description': MarkdownxWidget(),
'description': MarkdownxWidget(),
}
class ProductAdminForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
widgets = {
'short_description': MarkdownxWidget(),
'description': MarkdownxWidget(),
}
class OrderForm(PlugshopOrderForm):
shipping_type = forms.ModelChoiceField(empty_label=None,
queryset=ShippingType.objects.filter(is_active=True))
name = forms.CharField(required=True, error_messages={
'required': _(u'Укажите имя')
})
email = forms.EmailField(required=True, error_messages={
'required': _(u'Укажите email')
})
phone = forms.CharField(required=True, error_messages={
'required': _(u'Укажите телефон')
})
def __require(self, name, error):
value = self.cleaned_data.get(name, None)
if len(value) == 0:
self.errors[name] = [error]
def clean_name(self):
name = self.cleaned_data.get('name').strip().split()
shipping_type = self.cleaned_data.get('shipping_type')
if shipping_type.require_zip_code and len(name) < 3:
raise forms.ValidationError(_(u'Введите фамилию имя и отчество'))
if len(name):
self.cleaned_data['last_name'] = name[0]
self.cleaned_data['first_name'] = " ".join(name[1:])
else:
raise forms.ValidationError(_(u'Введите имя'))
return " ".join(name)
def clean(self):
cleaned_data = self.cleaned_data
shipping_type = cleaned_data.get('shipping_type')
if shipping_type:
if shipping_type.require_address:
self.__require('address', _(u'Не указан адрес доставки'))
if shipping_type.require_zip_code:
self.__require('zip_code', _(u'Не указан индекс'))
self.__require('city', _(u'Не указан город'))
zip_code = self.cleaned_data.get('zip_code', None)
if re.search(r'^\d{6}$', zip_code) is None:
self.errors['zip_code'] = [_(u'Индекс состоит из 6 цифр')]
return cleaned_data
| [
"pythonguru101@gmail.com"
] | pythonguru101@gmail.com |
6a6d24c52e172aba9136809c35577ee343ebaed0 | 8da40e29e5881421d3db9c7c5a75c89369b27b46 | /3.tts.py | 1130f897eeefa1c2fcf15f9c2fb17994149bc64e | [] | no_license | raylee0703/Embedded_system_project | 424513a14061e45325ff96d9ead0d55161406e98 | 3dba93ac1fb130d8b0549ad7193d4f0c39aa0326 | refs/heads/master | 2022-12-19T05:18:20.893535 | 2020-09-20T11:52:17 | 2020-09-20T11:52:17 | 297,064,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | from gtts import gTTS
import os
import dht11
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
instance = dht11.DHT11(pin=7)
result = instance.read()
hum = result.humidity
temp = result.temperature
report_string = "The temperature is now " + str(temp) + " degrees"
tts = gTTS(text=report_string, lang='en')
tts.save('weather.flac')
os.system('omxplayer -o local -p weather.flac > /dev/null 2>&1')
| [
"raylee0703@gmail.com"
] | raylee0703@gmail.com |
eda8dd15828e7c7de2fd1653046a30914f5492b4 | adae7220c201bdd6b88f4f31a230d2124ac336f9 | /mysite/settings.py | 026f6fb2ad0e26e61e49663b527b74c91b729259 | [] | no_license | scampins/my-first-blog | 97a5e861b7eba57b5030a8a390ed40ab3f86529a | 625e7ab4fd1c386522001c97a9a5c90aae170b77 | refs/heads/master | 2020-04-23T07:04:09.108129 | 2019-02-17T14:59:25 | 2019-02-17T14:59:25 | 170,995,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,202 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'loso4v86y3h26rg*o^$l%r0_w)$%-u4!8n(re0l*n)=*l)u5s+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'tecnologia.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'es-es'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"scampins@gmail.com"
] | scampins@gmail.com |
bad48b717af6d0fe998ea77eafc3f833bc2306be | 438a0364d37383a914ecf337ffdb63cb891e54a2 | /adOfRoommate/migrations/0003_auto_20200719_0213.py | 5d1e20cdd918db31edb3422afc47cbe96ad1f9b4 | [] | no_license | Amiti3/HamAshian | ac846df546272d6089b870dc21a1d8ffbe461f9a | 278b683c33c70f0dd08e1d0469c8bf6ccda8ff81 | refs/heads/master | 2022-11-06T22:12:12.623500 | 2020-07-21T20:31:08 | 2020-07-21T20:39:22 | 281,482,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # Generated by Django 3.0.8 on 2020-07-18 21:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adOfRoommate', '0002_auto_20200717_1622'),
]
operations = [
migrations.AlterField(
model_name='adofroommate',
name='date_publish',
field=models.DateField(default=datetime.datetime(2020, 7, 19, 2, 13, 40, 505720)),
),
]
| [
"mari.ghayouri@gmail.com"
] | mari.ghayouri@gmail.com |
4a8a08909397b5d1c28e2f029ec69e5bba7a0535 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2437/60586/311745.py | df394328050a5b32f1a4d7b71b3a5abaa5a94c4e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | x=input()
if x=="6 2 ":
print(6,end="")
if x=="6 3 ":
print(1,end="")
elif x=="8 3 ":
print(3,end="")
elif x=="8 5 ":
print(0,end="")
else:
print(x) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
6622914b56a3c2109a3987bd2e46dcaa651add0a | f4fb002cca1f2fa60924b6dabb7acf93b2a46be3 | /mysql.py | ab9c0e33ad2c1e139d344cb92042411f430bfe45 | [] | no_license | ManT21/SevendAutoTest | 909d71d731cf53ba1f10f82108b606e5f8ab9911 | 80527bc439395a14a0ff00d773123a6eac2019b7 | refs/heads/master | 2020-07-09T13:15:48.440093 | 2019-08-23T10:28:35 | 2019-08-23T10:28:35 | 203,977,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | #!/usr/bin/env python
# coding=utf-8
import pymysql
def connectdb():
# 打开数据库连接
# 用户名:hp, 密码:Hp12345.,用户名和密码需要改成你自己的mysql用户名和密码,并且要创建数据库TESTDB,并在TESTDB数据库中创建好表Student
try:
db = pymysql.connect(host='10.40.11.180', user='root', password='dafy1024', port=3306)
return db
except:
print("连接失败")
def fetchonedb(sql):
db = connectdb()
# 使用cursor()方法获取操作游标
cursor = db.cursor(pymysql.cursors.DictCursor)
# SQL 查询语句
sql = sql
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
result = cursor.fetchone()
return result
except:
print("Error: unable to fetch data")
# 关闭数据库连接
db.close()
def updatedb(sql):
db = connectdb()
# 使用cursor()方法获取操作游标
cursor = db.cursor(pymysql.cursors.DictCursor)
# SQL 查询语句
sql = sql
#try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
db.commit()
#except:
# print("Error: unable to update data")
# 关闭数据库连接
cursor.close()
db.close()
| [
"luoyujuan@7daichina.com"
] | luoyujuan@7daichina.com |
d823fca9b27f34af478f6c88c97725a4014d1c14 | c7aadaba9ee8f8f28cf1b2fc604d671f12675b49 | /src/transient/diffusion/d3_d2D.py | 2085a7f7796dc3b1d05dc6336268aa3832a7d63b | [] | no_license | ellipsis14/fenics-tutorial | 2147656822afa36e4e6b8d39e9728d63708d6c73 | a1d9a7352675048b9d7f388b9b737701e7e78399 | refs/heads/master | 2021-01-15T23:45:09.826960 | 2015-03-04T10:46:33 | 2015-03-04T10:46:33 | 31,659,473 | 1 | 0 | null | 2015-03-04T13:54:36 | 2015-03-04T13:54:36 | null | UTF-8 | Python | false | false | 3,107 | py | """
FEniCS tutorial demo program: Diffusion equation with Dirichlet
conditions and a solution that will be exact at all nodes.
As d2_d2D.py, but here we test various start vectors for iterative
solution of the linear system at each time level.
The script d3_d2D_script.py runs experiments with different start
vectors and prints out the number of iterations.
"""
from dolfin import *
import numpy, sys
numpy.random.seed(12)
# zero, random, default, last
initial_guess = 'zero' if len(sys.argv) == 1 else sys.argv[1]
# PETSc, Epetra, MTL4,
la_backend = 'PETSc' if len(sys.argv) <= 2 else sys.argv[2]
parameters['linear_algebra_backend'] = la_backend
# Create mesh and define function space
nx = ny = 40
mesh = UnitSquareMesh(nx, ny)
V = FunctionSpace(mesh, 'Lagrange', 1)
# Define boundary conditions
alpha = 3; beta = 1.2
u0 = Expression('1 + x[0]*x[0] + alpha*x[1]*x[1] + beta*t',
alpha=alpha, beta=beta, t=0)
class Boundary(SubDomain): # define the Dirichlet boundary
def inside(self, x, on_boundary):
return on_boundary
boundary = Boundary()
bc = DirichletBC(V, u0, boundary)
# Initial condition
u_1 = interpolate(u0, V)
u_2 = Function(V)
#u_1 = project(u0, V) # will not result in exact solution!
dt = 0.9 # time step
T = 10*dt # total simulation time
# Define variational problem
# Laplace term
u = TrialFunction(V)
v = TestFunction(V)
a_K = inner(nabla_grad(u), nabla_grad(v))*dx
# "Mass matrix" term
a_M = u*v*dx
M = assemble(a_M)
K = assemble(a_K)
A = M + dt*K
bc.apply(A)
# f term
f = Expression('beta - 2 - 2*alpha', beta=beta, alpha=alpha)
# Linear solver initialization
#solver = KrylovSolver('cg', 'ilu')
solver = KrylovSolver('gmres', 'ilu')
#solver = KrylovSolver('gmres', 'none') # cg doesn't work, probably because matrix bc makes it nonsymmetric
solver.parameters['absolute_tolerance'] = 1E-5
solver.parameters['relative_tolerance'] = 1E-17 # irrelevant
solver.parameters['maximum_iterations'] = 10000
if initial_guess == 'default':
solver.parameters['nonzero_initial_guess'] = False
else:
solver.parameters['nonzero_initial_guess'] = True
u = Function(V)
set_log_level(DEBUG)
print 'nonzero initial guess:', solver.parameters['nonzero_initial_guess']
# Compute solution
u = Function(V)
t = dt
while t <= T:
print 'time =', t
# f.t = t # if time-dep f
f_k = interpolate(f, V)
F_k = f_k.vector()
b = M*u_1.vector() + dt*M*F_k
u0.t = t
bc.apply(b) # BIG POINT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if initial_guess == 'zero':
u.vector()[:] = 0
elif initial_guess == 'last':
pass
elif initial_guess == 'random':
u.vector()[:] = numpy.random.uniform(-1, 1, V.dim())
elif t >= 2*dt and initial_guess == 'extrapolate':
u.vector()[:] = 2*u_1.vector() - u_2.vector()
solver.solve(A, u.vector(), b)
# Verify
u_e = interpolate(u0, V)
u_e_array = u_e.vector().array()
u_array = u.vector().array()
print 'Max error, t=%-10.3f:' % t, numpy.abs(u_e_array - u_array).max()
t += dt
u_2.assign(u_1)
u_1.assign(u)
| [
"hpl@simula.no"
] | hpl@simula.no |
8bf896583d058f0c4eb88b11b3e5b5b50bbfd43c | 749f867b96f4021cf80b1c298db6b14756a23cd0 | /030CAICT-AtlasToolkit/main_last_v1.py | 46de87b5b0d20deeca1e6cc52ada7a11c4a6d382 | [] | no_license | mandeling/Crawler4Caida | 4e4ae53ca64bff140d1353171c774522103aace4 | 4f85526d6ea49e7206038e0c9b8f4d87b488bd45 | refs/heads/master | 2022-12-23T22:25:03.815280 | 2020-09-23T10:31:34 | 2020-09-23T10:31:34 | 297,939,217 | 1 | 0 | null | 2020-09-23T10:50:56 | 2020-09-23T10:50:55 | null | UTF-8 | Python | false | false | 12,745 | py | # coding:utf-8
"""
create on Feb 29. 2020 By Wenyan YU
Function:
实现CAICT地图绘制工具箱(CAICT-AtlasToolkit)的主界面
"""
from tkinter import *
import tkinter as tk
from tkinter import ttk
import tkinter.messagebox
import tkinter.filedialog
from ttkthemes import ThemedTk, ThemedStyle
def get_screen_size(window):
return window.winfo_screenwidth(), window.winfo_screenheight()
def get_window_size(window):
return window.winfo_reqwidth(), window.winfo_reqheight()
def center_window(root, width, height):
screenwidth = root.winfo_screenwidth()
screenheight = root.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 8, (screenheight - height) / 8)
# print(size)
root.geometry(size)
class App:
def __init__(self, root):
"""
初始化界面
:param root:
"""
# 初始化参数
self.aim_v_radio = tk.IntVar() # 绘图目标单选按钮值
self.tool_v_radio = tk.IntVar() # 绘图工具单选按钮值
self.root = root
# 增加菜单栏
menu_bar = Menu(root)
root.config(menu=menu_bar)
# #增加文件一级菜单
file_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="文件(F)", menu=file_menu)
file_menu.add_command(label="新建画布")
file_menu.add_command(label="打开文件")
file_menu.add_separator()
file_menu.add_command(label="退出", command=self.quit)
# #增加工作区一级菜单
workplace_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="工作区", menu=workplace_menu)
workplace_menu.add_command(label="返回主页", command=self.return_main)
# #增加视图一级菜单
view_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="视图(V)", menu=view_menu)
view_menu.add_command(label="全屏")
# #增加工具一级菜单
tool_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="工具(T)", menu=tool_menu)
tool_menu.add_command(label="选项")
tool_menu.add_command(label="在线文档和支持")
# #增加窗口一级菜单
window_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="窗口(W)", menu=window_menu)
window_menu.add_command(label="配置")
# #增加帮助一级菜单
help_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="帮助(H)", menu=help_menu)
help_menu.add_command(label="检查更新")
help_menu.add_command(label="关于")
# 增加左边画布 Frame
self.cv_frame = Frame(root, width=600, height=685, bg='#fff2cc')
self.cv_frame.grid(row=0, rowspan=5, column=0, sticky=W)
self.cv = Canvas(self.cv_frame, width=600, height=685, bg='#fff2cc')
self.cv.grid(row=0, column=0)
"""
显示画布中的图片
"""
global image
global cv_bg
cv_bg = PhotoImage(file="./cv_bg.PNG")
image = self.cv.create_image(600, 685, ancho='se', image=cv_bg)
# 增加右边功能 Frame
func_frame_top = Frame(root, width=160)
func_frame_top.grid(row=0, column=1, sticky=N)
func_frame_mid = Frame(root, width=160)
func_frame_mid.grid(row=1, column=1, sticky=N)
func_frame_bottom = Frame(root, width=160)
func_frame_bottom.grid(row=4, column=1, sticky=S)
# # 增加绘图向导Button
Button(func_frame_top, command=self.draw_guide_init, text="绘图向导", anchor="e", width=21, fg='white', bg='#4bacc6').grid(row=0, column=0, sticky=N)
# # 增加作品一览Button
Button(func_frame_top, text="作品一览", anchor="e", width=21, fg='white', bg='#4bacc6').grid(row=1, column=0, sticky=N)
# # 增加绘图工具Button
Button(func_frame_mid, text="绘图工具", anchor="e", width=21, fg='white', bg='#c05046').grid(row=0, column=0, sticky=S)
# # 增加绘图工具 01网络拓扑图(2D)Button
Button(func_frame_mid, text="01网络拓扑图(2D)", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=1, column=0, sticky=W)
# # 增加绘图工具 02网络拓扑图(3D)Button
Button(func_frame_mid, text="02网络拓扑图(3D)", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=2, column=0, sticky=W)
# # 以此类推
Button(func_frame_mid, text="03极坐标图", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=3, column=0, sticky=W)
Button(func_frame_mid, text="04星云图", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=4, column=0, sticky=W)
Button(func_frame_mid, text="05词汇云图", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=5, column=0, sticky=W)
Button(func_frame_mid, text="06主题河流图", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=6, column=0, sticky=W)
Button(func_frame_mid, text="07地理图绘制系列", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=7, column=0, sticky=W)
# #添加关于按钮
Button(func_frame_bottom, text="关于", anchor="e", width=21, fg='white', bg='#4bacc6').grid(row=8, column=0, sticky=S)
def quit(self):
# 结束主事件循环
self.root.quit() # 关闭窗口
self.root.destroy() # 将所有的窗口小部件进行销毁,回收内存
exit()
def draw_guide_init(self):
""""
点击绘图向导后,界面的初始化
"""
print("Event:绘图向导")
# # 清空画布
# self.cv.delete(image)
# 初始化绘图向导UI frame
for widget in self.cv_frame.winfo_children():
widget.destroy()
# 开始添加绘图向导界面相关控件
# 增加绘图目标Label Frame
self.cv_frame = Frame(root, width=600, height=685, bg='#fff2cc')
self.cv_frame.grid(row=0, rowspan=5, column=0, sticky=N)
aim_frame = LabelFrame(self.cv_frame, text="第一步:确定绘图目标", width=600, height=60, bg='#fff2cc')
aim_frame.grid(row=0, column=0, sticky=W)
aim_frame.grid_propagate(0) # 组件大小不变
# #给绘图目标Label Frame里面添加Radiobutton
aim_list = ["希望展示数据间的关联关系(小规模网络拓扑)",
"希望展示数据间的关联关系(大规模网络拓扑)",
"希望展示数据间的地位排名",
"希望进行数据地理位置展示",
"希望分析文本数据词频信息",
"希望展示多类时间序列数据"]
# for i in range(0, len(aim_list)):
# Radiobutton(aim_frame, text=aim_list[i], command=self.call_aim_rb, variable=self.aim_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
comvalue_aim = StringVar()
c_aim = ttk.Combobox(aim_frame, textvariable=comvalue_aim, width=80)
c_aim["values"] = aim_list
c_aim.current(1)
c_aim.grid(row=0, column=0, sticky=W)
# 根据第一步的选择自动给出绘图实例
def call_aim_rb(self):
"""
绘图目标单选按钮单击事件,生成绘图工具选择、导出绘图数据格式、个性化数据处理、用户上传绘图数据、用户获取绘图结果(绘图参数调优)、目标反馈与评价
:return:
"""
tool_frame = LabelFrame(self.cv_frame, text="第二步:选择绘图工具", width=600, height=80, bg='#fff2cc')
tool_frame.grid(row=1, column=0, sticky=W)
tool_frame.grid_propagate(0) # 组件大小不变
# 导出绘图数据格式
export_frame = LabelFrame(self.cv_frame, text="第三步:导出数据格式", width=600, height=50, bg='#fff2cc')
export_frame.grid(row=2, column=0, sticky=W)
export_frame.grid_propagate(0) # 组件大小不变
if self.aim_v_radio.get() == 0:
# 希望展示数据间的关联关系(小规模网络拓扑), 01 02图例均可
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["01网络拓扑图(2D)",
"02网络拓扑图(3D)"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
elif self.aim_v_radio.get() == 1:
# 希望展示数据间的关联关系(大规模网络拓扑), 04图例
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["04星云图"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
elif self.aim_v_radio.get() == 2:
# 希望展示数据间的地位排名, 03图例
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["03极坐标图"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
elif self.aim_v_radio.get() == 3:
# 希望进行数据地理位置展示, 07图例
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["07地理图绘制系列"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
elif self.aim_v_radio.get() == 4:
# 希望分析文本数据词频信息, 05图例
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["05词汇云图"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
elif self.aim_v_radio.get() == 5:
# 希望展示多类时间序列数据, 06图例
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["06主题河流图"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
# 个性化数据处理
process_frame = LabelFrame(self.cv_frame, text="第四步:个性数据处理", width=600, height=100, bg='#fff2cc')
process_frame.grid(row=3, column=0, sticky=W)
process_frame.grid_propagate(0) # 组件大小不变
# 用户上传绘图数据
upload_frame = LabelFrame(self.cv_frame, text="第五步:上传绘图数据", width=600, height=50, bg='#fff2cc')
upload_frame.grid(row=4, column=0, sticky=W)
upload_frame.grid_propagate(0) # 组件大小不变
# 用户获取绘图结果(绘图参数调优)
result_frame = LabelFrame(self.cv_frame, text="第六步:获取绘图结果", width=600, height=50, bg='#fff2cc')
result_frame.grid(row=5, column=0, sticky=W)
result_frame.grid_propagate(0) # 组件大小不变
# 目标反馈与评价
feedback_frame = LabelFrame(self.cv_frame, text="第七步:目标反馈评价", width=600, height=50, bg='#fff2cc')
feedback_frame.grid(row=6, column=0, sticky=W)
feedback_frame.grid_propagate(0) # 组件大小不变
def return_main(self):
"""
回到主页
:return:
"""
print("Event:回到主页")
self.__init__(self.root)
if __name__ == "__main__":
# 创建一个Top Level的根窗口, 并把他们作为参数实例化为App对象
# root = tk.Tk()
root = ThemedTk(theme="arc")
root.title("CAICT地图绘制工具箱(CAICT-AtlasToolkit)")
center_window(root, 0, 0) # 设置窗口位置
# root.maxsize(750, 800)
root.minsize(770, 690) # 设置窗口最小尺寸
root.resizable(0, 0) # 锁定尺寸
# root.attributes("-alpha", 0.80)
app = App(root)
# 开始主事件循环
root.mainloop()
| [
"ieeflsyu@outlook.com"
] | ieeflsyu@outlook.com |
3c972da0222637e910a4db503ff3014c0230098c | 5c40acb734e03a39e0c34fdddf927181ffe85437 | /geek_for_python/LXF/test.py | 167b944c3db9069bf0e2e756c5a57e9803175839 | [] | no_license | hyc123no1/Python-100-Days | 075feee0f25e12388a8aa94671f2ce0d9c7216c1 | beec317971abb85d6b7b854058d676adfdd98156 | refs/heads/master | 2020-05-18T17:04:18.944074 | 2019-11-12T08:00:18 | 2019-11-12T08:00:18 | 184,543,479 | 0 | 0 | null | 2019-05-02T08:05:21 | 2019-05-02T08:05:21 | null | UTF-8 | Python | false | false | 128 | py | import subprocess
print('$ nslookup www.python.org')
r = subprocess.call(['nslookup','www.python.org'])
print('Exit code:',r) | [
"869995755@qq.com"
] | 869995755@qq.com |
9630f08498dcfff585aa7d19b7cebc5da20e0548 | 5db5c8b21d412762c1713a466f9d3a8a8a3743ab | /Test_Generators/Thirtysix/full_hydra/scripts/fixed-point/works/intconversion.py | 14a3e3b1bf12fba9ef369d70ac14010bc3d157fb | [] | no_license | r4space/Fynbos | f3ee6d7cce1f7d14cbc672227be6d922274912c5 | cda7034e1ba5ba3916a87490cec702a6f48e89fb | refs/heads/master | 2021-01-21T21:39:01.498126 | 2016-05-03T17:23:23 | 2016-05-03T17:23:23 | 24,953,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | #!/usr/bin/python2.6
#File containg the function version of int1.py to convert input to int data word
import sys
from math import pow
DATA_SIZE_C =36
######Decimal integer to binary#############
def Intdec_to_bin (int_in,DATA_SIZE_C):
a = range(0,DATA_SIZE_C)
for i in range(0,len(a)):
a[i] = 0
k = int_in
i = DATA_SIZE_C -1
while k != 0:
a[i] = k%2
k = k/2
i = i-1
return a
#####Invert binary##########################
def invert_vector (vec):
output = range(0,len(vec))
for k in range(0,len(vec)):
if vec[k] == 0:
output[k] = 1
else:
output[k] = 0
return output
####Add 1 to a binary vector################
def bin_add_one (vec):
output = vec
t = 0
for k in range(len(vec)-1,-1,-1):
if vec[k] == 0:
output[k] = 1
break
else:
if t == 0:
output[k] = 0
t= 1
else:
output[k] = 0
return output
####Convert binary to Hexidecimal##########
def bin_to_hex (vec):
count = 0
interim = 0
j = 0
e= 3
z = len(vec)/4+(len(vec)%4) # length of vec once extended to be divisible by 4
result = range(0,z)
if len(vec)%4 != 0:
g = range(0,4-len(vec)%4)
for w in range(0,len(g)):
g[w] = 0
vec = g+vec
for i in range(0,len(vec)):
if vec[i] == 1:
interim = interim + pow(2,e)
if e == 0:
e = 3
result[j] = interim
j = j+1
interim = 0
else:
e = e-1
count = count +1
for k in range(0, len(result)):
if result[k] == 10:
result[k] = "A"
elif result[k] == 11:
result[k] = "B"
elif result[k] == 12:
result[k] = "C"
elif result[k] == 13:
result[k] = "D"
elif result[k] == 14:
result[k] = "E"
elif result[k] == 15:
result[k] = "F"
else:
result[k] = int(result[k])
return result
###Print vec###############################
def print_vec (vec):
d = ""
for i in range(0,len(vec)):
d = d+str(vec[i])
print d
##Decimal to custom Integer binary conversion:
def myint ():
i = 0
input = raw_input("Enter an Int: ")
int_in = int(input)
if int_in>pow(2,DATA_SIZE_C-1)-1: #>34359738367
print '\033[1;41mNumber too big\033[1;m'
#sys.exit(0);
if int_in<(-1*pow(2,DATA_SIZE_C-1)): #<-34359738368
print '\033[1;41mNumber too negative\033[1;m'
#sys.exit(0);
x= Intdec_to_bin (abs(int_in),DATA_SIZE_C)
##Convert to 2's comp if neg:
if input[0][0] == '-':
#invert array
a = invert_vector(x)
#binary_add one
x = bin_add_one(a)
#Result in hex:
e = ""
h = bin_to_hex(x)
for i in range(0,len(h)):
e = e+str(h[i])
result = [e,str(input),x]
return result
| [
"r4space@users.noreply.github.com"
] | r4space@users.noreply.github.com |
75f93e34d8ca3a0b0979c369e1660952c13e8142 | dd90264bbfb79700d1d32effc207b555c29f3bcf | /python/task/Daemon.py | ca16c6dff3fddd699904cfbeebcfd5065fe1a956 | [] | no_license | tomzhang/other_workplace | 8cead3feda7e9f067412da8252d83da56a000b51 | 9b5beaf4ed3586e6037bd84968c6a407a8635e16 | refs/heads/master | 2020-04-22T22:18:23.913665 | 2019-01-26T03:36:19 | 2019-01-26T03:36:19 | 170,703,419 | 1 | 0 | null | 2019-02-14T14:23:21 | 2019-02-14T14:23:20 | null | UTF-8 | Python | false | false | 221 | py | #!/usr/bin/python3
# coding=utf8
import daemon
class Daemon:
def task(self):
pass
# 后置进程
def main(self):
# 后置进程
with daemon.DaemonContext():
self.task()
| [
"Pj879227577"
] | Pj879227577 |
df7dfb75533316e32a45428f1c897409c9f474a3 | 5ec3f7e4e6dca1504ec421813f879d7f24224f2b | /src/website/search/indexes.py | f80577607509c75285d5c0aa79b8d8cf35bab03e | [] | no_license | samsath/secondeditions_main | afe77a272ce5ea1a4e24dc7bc3e79453d0601b5c | a529abb8294483a35396d869f84254a1dbae3c13 | refs/heads/master | 2021-01-25T07:27:40.002304 | 2015-09-28T18:49:20 | 2015-09-28T18:49:20 | 39,802,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | # -*- coding: utf-8 -*-
from haystack import site, indexes
from django.conf import settings
class BaseSearchIndex(indexes.RealTimeSearchIndex):
text = indexes.CharField(document=True, use_template=True)
absolute_url = indexes.CharField(indexed=False)
modified = indexes.DateTimeField(model_attr='modified')
def prepare_absolute_url(self, obj):
return obj.get_absolute_url()
def prepare_section(self, obj):
return getattr(obj, 'section', '')
def get_updated_field(self):
if settings.DEBUG:
return None
return 'modified'
def index_queryset(self):
if hasattr(self.model, 'public'):
return self.model.public.all()
else:
return self.model.objects.all()
| [
"samhipwell@gmail.com"
] | samhipwell@gmail.com |
94fc9317eee3472ca965e14dcea7ae89cb599a30 | 6492dcf3d93765cc27ce592131460fb6ecb892cb | /scripts/leader_example.py | 247ca2a1be5c1e597780c9b534116d1a5d39b64d | [
"MIT"
] | permissive | buckbaskin/drive_stack | d3beb3f7af11c794358826078381b7040c5a893d | 7a242969c4189379d6545782d9584692976b9687 | refs/heads/master | 2021-01-18T22:04:33.929328 | 2016-02-26T19:23:30 | 2016-02-26T19:23:30 | 42,958,401 | 1 | 2 | null | 2016-01-18T21:46:00 | 2015-09-22T20:16:04 | Python | UTF-8 | Python | false | false | 3,674 | py | #!/usr/bin/env python
# import sys
# print sys.path
import leader
import rospy
import math
from geometry_msgs.msg import Point, Vector3
from nav_msgs.msg import Odometry
from utils import heading_to_quaternion
class ExampleLeader(leader.Leader):
# methods to override:
# generate_initial_path, generate_next_path
# this is the same implementation as the Leader class, but separate to
# demonstrate how to override it.
def generate_initial_path(self):
"""
Path creation for node
"""
rospy.loginfo('generating generate_initial_path')
# Note: this is called once during node initialization
end = self.path_goal().goal # Odometry
start = self.path_start().goal # Odometry
start.header.frame_id = 'odom'
self.targets = []
self.targets.append(start)
# pylint: disable=invalid-name
# dt, dx, dy properly express what I'm trying to get across
# i.e. differential time, x, y
dt = .1
des_speed = .5 # m/s
dx = end.pose.pose.position.x - start.pose.pose.position.x
dy = end.pose.pose.position.y - start.pose.pose.position.y
# total dx above
heading = math.atan2(dy, dx)
step_x = des_speed*math.cos(heading)*dt
step_y = des_speed*math.sin(heading)*dt
rospy.loginfo('step_x: '+str(step_x))
distance = math.sqrt(dx*dx+dy*dy)
steps = math.floor(distance/(des_speed*dt))
rospy.loginfo('steps generated? '+str(steps))
for i in range(1, int(steps)+1):
rospy.loginfo('a;sdf '+str(i))
odo = Odometry()
odo.header.frame_id = 'odom'
odo.pose.pose.position = Point(x=start.pose.pose.position.x+i*step_x, y=start.pose.pose.position.y+i*step_y)
rospy.loginfo('gen x: '+str(start.pose.pose.position.x+i*step_x))
rospy.loginfo('gen y: '+str(start.pose.pose.position.y+i*step_y))
odo.pose.pose.orientation = heading_to_quaternion(heading)
odo.twist.twist.linear = Vector3(x=des_speed)
odo.twist.twist.angular = Vector3()
self.targets.append(odo)
self.index = 0
def generate_next_path(self):
"""
generate a new path, either forwards or backwards (rvs == True)
"""
end = self.path_next().goal
start = self.path_start().goal
self.targets = []
self.targets.append(start)
# pylint: disable=invalid-name
# dt, dx, dy properly express what I'm trying to get across
# i.e. differential time, x, y
dt = .1
des_speed = .5 # m/s
dx = end.pose.pose.position.x - start.pose.pose.position.x
dy = end.pose.pose.position.y - start.pose.pose.position.y
heading = math.atan2(dy, dx)
dx = des_speed*math.cos(heading)*dt
dy = des_speed*math.sin(heading)*dt
distance = math.sqrt(dx*dx+dy*dy)
steps = math.floor(distance/des_speed)
for i in range(1, int(steps)):
odo = Odometry()
odo.header.frame_id = 'odom'
odo.pose.pose.point = Point(x=start.x+i*dx, y=start.y+i*dy)
odo.pose.pose.orientation = heading_to_quaternion(heading)
odo.twist.twist.linear = Vector3(x=des_speed)
odo.twist.twist.angular = Vector3()
self.targets.append(odo)
if rvs:
self.index = len(self.targets)-2
else:
self.index = 0
if __name__ == '__main__':
# pylint: disable=invalid-name
# leader is a fine name, it's not a constant
leader = ExampleLeader()
leader.run_server()
| [
"mobile.wbaskin@gmail.com"
] | mobile.wbaskin@gmail.com |
06fb5bc9ee57f17f7fc682b0ed72f5547f527deb | 69dca600d8338901fe6167c7172992caae2eb78b | /resize.py | ab55b54ab538e8eb8ba196a202a9b6984b53e3d4 | [] | no_license | bilel46/flask_project | eaf64bd221aa84eeeff984bc6ceb9dcdf0221c8d | 71e0ee70c62518ebc9c749bba69b1edc4a77b0e1 | refs/heads/master | 2021-01-01T23:44:10.699920 | 2020-02-10T00:52:01 | 2020-02-10T00:52:01 | 239,395,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py |
import cv2
import numpy as np
"""
def resize_c(image,a,b):
img_x = np.ones((a,b,3), np.uint8)
img_x[:,:,0] = cv2.resize(image[:,:,0],(b,a))
img_x[:,:,1] = cv2.resize(image[:,:,1],(b,a))
img_x[:,:,2] = cv2.resize(image[:,:,2],(b,a))
return img_x
cart1 = cv2.imread('images\\nouveau.png')
# soutour amida
cart = resize_c(cart1,450,400)
cv2.imshow('card',cart)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('images\\b_nouveau.png',cart)
"""
import time
def im():
msg = np.zeros((100,450,3),dtype=np.uint8)
msg[:,:,:]=(102, 102, 0)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(msg,'Apuie la cart sur la came',(10,60), font, 1,(0,0,0),2,cv2.LINE_AA)
cv2.imshow('msg',msg)
time.sleep(3)
r=2
#im()
msg = np.zeros((100,450,3),dtype=np.uint8)
msg[:,:,:]=(102, 102, 0)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(msg,'Apuie la cart sur la came',(10,60), font, 1,(0,0,0),2,cv2.LINE_AA)
cv2.imshow('msg',msg)
if r ==1 :
cv2.destroyAllWindows()
time.sleep(3)
#cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"boumedieneb68@gmail.com"
] | boumedieneb68@gmail.com |
b088b7e8a4069b741246eaf5ac68d6faad85613b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04012/s874951633.py | 7b934360297ee1e1391f1376a323f92dc1ecebb8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # coding: utf-8
w = list(input())
w_ = list(set(w))
flg = True
for a in w_:
if w.count(a)%2 != 0:
flg = False
if flg:
print("Yes")
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c7078179eccf187a64bbe80888d14d3535eb0d98 | d524dc1cf48dfb3dac6b288d0f5d3206ac2ad33b | /CMSSW/src/PhysicsTools/PatAlgos/python/selectionLayer1/electronCountFilter_cff.py | 6db059f5323f864264d77bcab418c770ba1ea90b | [] | no_license | bainbrid/usercode | 59e7e2c2ba66be8ee6696be5b7fdddc3fa5d6d2a | 3d1ae8563ff470725721100f6e5a2e7b5e8e125e | refs/heads/master | 2016-09-07T18:38:31.267514 | 2013-09-10T20:21:21 | 2013-09-10T20:21:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.selectionLayer1.electronMinFilter_cfi import *
from PhysicsTools.PatAlgos.selectionLayer1.electronMaxFilter_cfi import *
countLayer1Electrons = cms.Sequence(minLayer1Electrons + maxLayer1Electrons)
| [
""
] | |
35b0dbd07007d03f639af45783b1420ae34e0735 | 1831b957d155b8cdbef50a28ad05084f67606725 | /dl_hw3/single_crnn/train.py | d413999e19a30621aa0999331b206a9e0753e4e7 | [] | no_license | GaomingOrion/dl_hw | 898f77fea92ca05c5e2c667afe36159bbab51011 | b0858162763325c286d547533c130a01506f09e1 | refs/heads/master | 2020-04-27T21:19:21.738722 | 2019-06-20T12:31:04 | 2019-06-20T12:31:04 | 174,691,970 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,699 | py | import tensorflow as tf
import numpy as np
import os, time
from crnn_SVNH import CRNN
from dataset import Dataset
from common import config
def train(prev_model_path=None):
# prepare dataset
dataset_train = Dataset('train')
dataset_test = Dataset('test')
# define computing graph
model = CRNN()
net_out, raw_pred = model.build_infer_graph()
loss = model.compute_loss(net_out)
# set optimizer
global_step = tf.Variable(0, name='global_step', trainable=False)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
#optimizer = tf.train.AdamOptimizer()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0005)
train_op = optimizer.minimize(
loss=loss, global_step=global_step)
# decoder
decoded, _ = tf.nn.ctc_beam_search_decoder(net_out,
sequence_length=tf.to_int32(tf.fill(tf.shape(model._inputdata)[:1], config.seq_length)),
beam_width=5,
merge_repeated=False, top_paths=1)
decoded = decoded[0]
decoded_paths = tf.sparse_tensor_to_dense(decoded, default_value=config.class_num-1)
# evaluate on test set
def evaluate(sess, dataset):
loss_lst = []
label_pred = []
label_true = []
for inputdata, sparse_label, raw_label in dataset.one_epoch_generator():
decoded_paths_val, loss_val = sess.run([decoded_paths, loss], feed_dict={
model.place_holders['inputdata']: inputdata,
model.place_holders['label']: sparse_label,
model.place_holders['is_training']: False
})
for x in decoded_paths_val:
label_pred.append([idx for idx in x if idx != config.class_num-1])
for x in raw_label:
label_true.append(x)
loss_lst.append(loss_val)
acc = cal_acc(label_pred, label_true)
return np.mean(loss_lst), acc
# Set tf summary
tboard_save_dir = config.tboard_save_dir
os.makedirs(tboard_save_dir, exist_ok=True)
tf.summary.scalar(name='train_loss', tensor=loss)
merged = tf.summary.merge_all()
# Set saver configuration
saver = tf.train.Saver()
model_save_dir = config.model_save_dir
os.makedirs(model_save_dir, exist_ok=True)
# Set sess configuration
sess = tf.Session()
summary_writer = tf.summary.FileWriter(tboard_save_dir)
summary_writer.add_graph(sess.graph)
# training
global_cnt = 0
with sess.as_default():
if prev_model_path is None:
sess.run(tf.global_variables_initializer())
print('Initialiation finished!')
epoch = 0
else:
print('Restore model from {:s}'.format(prev_model_path))
saver.restore(sess=sess, save_path=prev_model_path)
epoch = 0
while epoch < config.epochs:
epoch += 1
for batch_idx, (inputdata, sparse_label, raw_label) in enumerate(dataset_train.one_epoch_generator()):
global_cnt += 1
loss_val, _, summary = sess.run([loss, train_op, merged], feed_dict={
model.place_holders['inputdata']: inputdata,
model.place_holders['label']: sparse_label,
model.place_holders['is_training']: True
})
summary_writer.add_summary(summary, global_cnt)
if (batch_idx+1)%config.evaluate_batch_interval == 0:
test_loss_val, test_acc = evaluate(sess, dataset_test)
print("----Epoch-{:n}, progress:{:.2%}, evaluation results:".format(epoch,
(batch_idx+1)*config.train_batch_size/config.train_size))
print("--Train_loss: {:.4f}".format(loss_val))
print("--Test_loss: {:.4f}".format(test_loss_val))
print("--Test_accuarcy: {:.4f}\n".format(test_acc))
summary_writer.add_summary(
tf.Summary(value=[tf.Summary.Value(tag='test_loss', simple_value=test_loss_val)]),
global_cnt)
summary_writer.add_summary(
tf.Summary(value=[tf.Summary.Value(tag='test_acc', simple_value=test_acc)]),
global_cnt)
if epoch % config.save_epoch_interval == 0:
test_loss_val, test_acc = evaluate(sess, dataset_test)
train_loss_val, train_acc = evaluate(sess, dataset_train)
print("----Epoch-{:n} finished, evaluation results:".format(epoch))
print("--Train_loss: {:.4f}".format(train_loss_val))
print("--Train_accuarcy: {:.4f}".format(train_acc))
print("--Test_loss: {:.4f}".format(test_loss_val))
print("--Test_accuarcy: {:.4f}\n".format(test_acc))
model_name = 'CRNN-e{:n}-acc{:.1f}.ckpt'.format(epoch, 100*test_acc)
model_save_path = os.path.join(model_save_dir, model_name)
print('Saving model...')
saver.save(sess=sess, save_path=model_save_path, global_step=epoch)
print('Saved!')
def cal_acc(label_pred, label_true):
assert len(label_pred) == len(label_true)
cnt = 0
for i in range(len(label_pred)):
if label_pred[i] == label_true[i]:
cnt += 1
return cnt/len(label_pred)
if __name__ == '__main__':
train('.\\tf_ckpt\\CRNN-e7-acc59.7.ckpt-7')
#train()
| [
"542043468@qq.com"
] | 542043468@qq.com |
c77ed32e11c93dc6f6c61f0c74f6bf30e7c98353 | 0064a139ed764cfb8cf13db2ae987bc525f65f2f | /src/manage.py | 9448f6699c561ffdffcd0101a2cb3a76d1defcf0 | [] | no_license | opencookiecms/motorlist | 3b41d4822ccfb2db65f8f3670e5f0cc3d7c8f94d | 49082fc02fe6a6b131cc0929f21588018e33cc3c | refs/heads/main | 2023-05-25T01:52:46.247779 | 2021-06-02T08:54:36 | 2021-06-02T08:54:36 | 373,091,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'motorapp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"syed.m.afiq@outlook.com"
] | syed.m.afiq@outlook.com |
3b3bceaa15059ec9733d3e15f201a0871bc15447 | d89bc18590e0aec61a34499ac06ac88354d2fc75 | /crawler/twitter.py | 5d5132e8959070c016454baee4bfad53446887e5 | [] | no_license | socjordi/osint | 92fa96a0f824467c2ff9b1c3724c40c37d2c94f3 | 15162a3fecd6f2a6cc621fb3f6d4729c4fae19b7 | refs/heads/master | 2020-04-18T02:32:47.660179 | 2019-01-23T10:49:53 | 2019-01-23T10:49:53 | 167,166,935 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,590 | py | #!/usr/bin/python
import os
import json
import time
import sys
from datetime import datetime
#from dateutil.parser import parse
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
from elasticsearch import Elasticsearch
import hmac
import hashlib
import base64
hmac_secret=base64.b64decode("NR6xaQoKjL4=")
################################################################################
def esborra(message_id, user_id, timestamp):
global es
try:
res=es.search(index="osint-*", body={"query": {"match": {'message_id': message_id}}})
index=res['hits']['hits'][0]["_index"]
ident=res['hits']['hits'][0]["_id"]
source=res['hits']['hits'][0]["_source"]
source["deleted_user_id"]=user_id
source["deleted_timestamp"]=timestamp
es.update(
index=index,
doc_type="twitter",
id=ident,
body={"doc": source}
)
except:
return
################################################################################
def ocupacio(path):
statvfs = os.statvfs(path)
espai_total=statvfs.f_frsize * statvfs.f_blocks
espai_lliure=statvfs.f_frsize * statvfs.f_bavail
oo=100.0-float(espai_lliure)/float(espai_total)*100.0
return oo
################################################################################
def error(idgrup, msg):
global es
now = datetime.utcnow()
indexname="errors-%s" % (now.strftime("%Y-%m"))
logfilename="/home/osint/errors-%d.log" % (idgrup)
es.index(
index=indexname,
doc_type="error",
body={
"grup_id": idgrup,
"message": msg,
"timestamp": now
}
)
with open(logfilename, "a") as errfile:
errfile.write(msg+"\n")
if msg=="[Errno 28] No space left on device":
o=ocupacio("/media/img")
with open(logfilename, "a") as errfile:
errfile.write("Ocupacio /media/img: %f\n" % o)
o=ocupacio("/media/es")
with open(logfilename, "a") as errfile:
errfile.write("Ocupacio /media/es: %f\n" % o)
o=ocupacio("/tmp")
with open(logfilename, "a") as errfile:
errfile.write("Ocupacio /tmp: %f\n" % o)
################################################################################
def indexa(data):
global es, idgrup, hmac_secret, captura
#print(data)
#print
digest=hmac.new(hmac_secret, msg=data, digestmod=hashlib.sha256).digest()
signature=base64.b64encode(digest).decode()
now = datetime.now()
indexname="osint-%s" % (now.strftime("%Y-%m-%d"))
deleteindexname="osint-delete-%s" % (now.strftime("%Y-%m-%d"))
j=json.loads(data)
if 'delete' in j:
timestamp=int(j["delete"]["timestamp_ms"])
timestamp=datetime.utcfromtimestamp(timestamp/1000.0)
timestamp=timestamp.strftime('%Y-%m-%dT%H:%M:%S.%f')
message_id=str(j["delete"]["status"]["id"])
user_id=str(j["delete"]["status"]["user_id"])
es.index(
index=indexname,
doc_type="twitter",
body={
"grup_id": idgrup,
"message_id": message_id,
"user_id": user_id,
"timestamp": timestamp,
"json": data,
"signature": signature
}
)
esborra(message_id, user_id, j["delete"]["timestamp_ms"])
else:
if 'retweeted_status' in j:
retweeted=True
else:
retweeted=False
#print(j)
#print("text=%s retweeted=%d" % (j['text'], retweeted))
timestamp=int(j["timestamp_ms"])
timestamp=datetime.utcfromtimestamp(timestamp/1000.0)
timestamp=timestamp.strftime('%Y-%m-%dT%H:%M:%S.%f')
message_id=str(j["id"])
user_id=str(j["user"]["id"])
pathimg="%04d/%02d/%02d/%s.jpg" % (now.year,now.month,now.day,message_id)
es.index(
index=indexname,
doc_type="twitter",
body={
"grup_id": idgrup,
"message_id": message_id,
"user_screen_name": j["user"]["screen_name"],
"user_name": j["user"]["name"],
"user_id": user_id,
"timestamp": timestamp,
"text": j["text"],
"json": data,
"retweeted": retweeted,
"signature": signature,
"deleted_user_id": "",
"deleted_timestamp": "",
"pathimg": pathimg
}
)
if captura==1:
path="/var/www/html/osintimg/%04d" % (now.year)
if not os.path.exists(path):
os.makedirs(path)
path="%s/%02d" % (path,now.month)
if not os.path.exists(path):
os.makedirs(path)
path="%s/%02d" % (path,now.day)
if not os.path.exists(path):
os.makedirs(path)
path="%s/%s.jpg" % (path,message_id)
if not os.path.isfile(path):
url="https://twitter.com/%s/status/%s" % (j["user"]["screen_name"],message_id)
cmd="xvfb-run -a --server-args=\"-screen 0 1280x1200x24\" cutycapt --min-width=1024 --min-height=2048 --url=%s --out=%s --print-backgrounds=on --delay=3000 --max-wait=10000 --http-proxy=\"http://192.168.47.162:8080\" >/dev/null 2>&1 &" % (url,path)
os.system(cmd)
################################################################################
class FileDumperListener(StreamListener):
def __init__(self):
super(FileDumperListener,self).__init__(self)
self.tweetCount=0
self.errorCount=0
self.limitCount=0
self.last=datetime.now()
#This function gets called every time a new tweet is received on the stream
def on_data(self, data):
print(data)
indexa(data)
self.tweetCount+=1
self.status()
return True
def close(self):
print "close"
def on_error(self, statusCode):
if statusCode==401:
msg="ERROR 401 - No autoritzat (credencials incorrectes o inexistents)"
elif statusCode==406:
msg="ERROR 406 - No acceptable (peticio amb format no valid)"
elif statusCode==429:
msg="ERROR 429 - Massa peticions"
else:
msg="ERROR %s (API Twitter)" % (statusCode)
print(msg)
error(idgrup, msg)
#with open(logFileName, "a") as logfile:
# logfile.write(msg)
self.errorCount+=1
def on_timeout(self):
raise TimeoutException()
def on_limit(self, track):
msg="LIMIT missatge rebut %s " % (track)
print(msg)
error(idgrup, msg)
#with open(logFileName, "a") as logfile:
# logfile.write(msg)
self.limitCount+=1
def status(self):
now=datetime.now()
if (now-self.last).total_seconds()>300:
msg="%s - %i tweets, %i limits, %i errors in previous five minutes\n" % (now,self.tweetCount,self.limitCount,self.errorCount)
print(msg)
#with open(logFileName, "a") as logfile:
# logfile.write(msg)
self.tweetCount=0
self.limitCount=0
self.errorCount=0
self.last=now
################################################################################
class TimeoutException(Exception):
msg="%s TIMEOUT\n" % (datetime.now())
print(msg)
#with open(logFileName, "a") as logfile:
# logfile.write(msg)
pass
################################################################################
def process_users_old(api,users):
u=[]
n=[]
for user in users:
if user=="":
continue
user=user.encode("ascii","ignore")
print "user=%s" % (user)
#if user[0]==u'\u200f':
if user[0]=='@':
n.append(user[1:])
else:
u.append(user)
twinfo=api.lookup_users(user_ids=u, screen_names=n)
u=[]
for t in twinfo:
u.append(str(t.id))
return u
################################################################################
def process_users(api,users):
nbatch=50
u2=[]
for i in range(0,len(users), nbatch):
u=[]
n=[]
if i+nbatch>len(users):
final=len(users)
else:
final=i+nbatch
for j in range(i, final):
user=users[j].encode("ascii","ignore")
if user[0]=='@':
n.append(user[1:])
else:
u.append(user)
#print("j=%d %s" % (j, users[j]))
twinfo=api.lookup_users(user_ids=u, screen_names=n)
for t in twinfo:
#print(str(t.id))
u2.append(str(t.id))
return u2
################################################################################
if __name__ == '__main__':
if len(sys.argv)!=2:
print "Cal passar com argument el path del fitxer de parametres"
exit()
settings=sys.argv[1]
fh = open(settings,"r")
json_data=fh.read()
fh.close()
data=json.loads(json_data)
#print(data)
twitter_consumer_key=data["twitter_consumer_key"]
twitter_consumer_secret=data["twitter_consumer_secret"]
twitter_access_token=data["twitter_access_token"]
twitter_access_token_secret=data["twitter_access_token_secret"]
if twitter_consumer_key=='':
time.sleep(60)
exit()
if twitter_consumer_secret=='':
time.sleep(60)
exit()
if twitter_access_token=='':
time.sleep(60)
exit()
if twitter_access_token_secret=='':
time.sleep(60)
exit()
keywords=data["llistaparaules"]
users=data["llistausuaris"]
idgrup=data["idgrup"]
captura=data["captura"]
es = Elasticsearch(["127.0.0.1"],max_retries=10,retry_on_timeout=True)
while True:
try:
listener = FileDumperListener()
auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
api = API(auth)
stream = Stream(auth, listener)
users=process_users(api,users)
print users
print keywords
if (not users) and (not keywords):
time.sleep(60)
exit()
stream.filter(follow=users, track=keywords)
except KeyboardInterrupt:
print("KeyboardInterrupt caught. Closing stream and exiting.")
listener.close()
stream.disconnect()
break
except TimeoutException:
msg="Timeout exception caught. Closing stream and reopening."
print(msg)
error(idgrup, msg)
try:
listener.close()
stream.disconnect()
except:
pass
continue
except Exception as e:
try:
exc_type, exc_obj, exc_tb = sys.exc_info()
info = str(e)
#msg="%s - Unexpected exception. %s\n" % (datetime.now(),info)
#print(exc_type, fname, exc_tb.tb_lineno)
#msg=msg+" "+exc_type+" "+fname+" "+exc_tb.tb_lineno
msg=info
sys.stderr.write(msg)
error(idgrup, msg)
except:
print "ERROR ERROR\n"
pass
print "sleep"
time.sleep(60)
exit()
###############################################################################
| [
"jordi.gilabert@gencat.cat"
] | jordi.gilabert@gencat.cat |
6641def8d36aeae4adcb4258668f75e5a08deee0 | cdb4eb5d34b06655a9868643ede0712572720be7 | /URLshortener/views.py | 1246f99e2ec14aa5332c962cf14a75732adc2683 | [] | no_license | parthpandyappp/StripURL | 5d35ed4e24d0feffd122b16fe429ba73599d3887 | d5430584b28fe658701ee934c430cc61a520c592 | refs/heads/master | 2023-01-23T00:02:16.831378 | 2020-11-29T11:11:29 | 2020-11-29T11:11:29 | 292,256,080 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | from django.shortcuts import render
import pyshorteners
import pyqrcode
import png
from pyqrcode import QRCode
import shutil
import os
# Create your views here.
def index(request):
return render(request, "URLshortener/index.html")
def process(request):
if request.method == "POST":
link = request.POST['link']
shortner = pyshorteners.Shortener()
x = shortner.tinyurl.short(link)
url = pyqrcode.create(x)
url.svg("/home/parth/Documents/finale/StripURL/URLshortener/static/images/myqr.svg", scale=8)
return render(request, "URLshortener/shortened.html", {'short': x})
else:
return render(request, "URLshortener/shortened.html") | [
"parthpandyappp@gmail.com"
] | parthpandyappp@gmail.com |
168e26e56ef3cf1c3ce8fd1273233f37536f2858 | 1dd3c51f26fd0d9a20381683575e4d01f897232c | /src/profiles_project/profiles_api/urls.py | 773e922b0f5a187658c364b7158a91221e5a72fc | [] | no_license | ranganadh234/Rest-apis-profile-management | 3e8a6a517b1aa2acc459a96307ee33d471799939 | f40f17d8641819b55fe9c485f345201303e111f9 | refs/heads/master | 2022-05-04T02:05:53.545068 | 2019-09-25T08:53:19 | 2019-09-25T08:53:19 | 210,784,616 | 0 | 0 | null | 2022-04-22T22:23:07 | 2019-09-25T07:39:00 | Python | UTF-8 | Python | false | false | 1,324 | py | """profiles_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path,include
from rest_framework.routers import DefaultRouter
from .views import HelloApiView,HelloViewSet,UserProfileViewSet,LoginViewSet,UserProfileFeedViewSet
router=DefaultRouter()
router.register('hello-viewset',HelloViewSet,base_name='hello-viewset')
router.register('profile',UserProfileViewSet,base_name='profile')
router.register('login',LoginViewSet,base_name='login')
router.register('feed',UserProfileFeedViewSet,base_name='feed')
urlpatterns = [
#path('admin/', admin.site.urls),
path('hello-view/',HelloApiView.as_view(),name='hello'),
path('',include(router.urls)),
#path('feed',UserProfileFeedViewSet.as_view(),name='feed')
]
| [
"ranganadh234@gmail.com"
] | ranganadh234@gmail.com |
f031c303d9e4960a7cf1940403553395066f27c9 | a32c35a4e8ebb73557bbd0e2805b71e4a4890d10 | /145_post_order.py | 26aecc1f3840e9c6fa39c816b4a8b9ba3c23b926 | [] | no_license | Yiling-J/leetcode | 6c7351a78d09a4139f09942b59b19eaa11fcf9e0 | ca01cd89c43445b750654aa52ef4e8bd92e54dcf | refs/heads/master | 2021-07-12T03:09:48.207372 | 2019-01-01T14:53:09 | 2019-01-01T14:53:09 | 144,372,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | """
Easy to understand loop stack solution.
We put the node and an integer val to stack.
val can be:
2: not traversal left and right
1: traversal left
0: traversal left and right
when val is 0, we can pop that node.
"""
class Solution(object):
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
stack = [[root, 2]]
final = []
while stack:
node, val = stack[-1]
if val == 2:
stack[-1][1] = 1
if node.left is not None:
stack.append([node.left, 2])
elif val == 1:
stack[-1][1] = 0
if node.right is not None:
stack.append([node.right, 2])
elif val == 0:
stack.pop()
final.append(node.val)
return final
| [
"njjyl723@gmail.com"
] | njjyl723@gmail.com |
d3a6aa42166b4d18271f903f734bb3137b484836 | 0ec0fa7a6dc0659cc26113e3ac734434b2b771f2 | /4.refactored/log/2016-11-21@09:03/minibatch.py | 81fc07180a820f169d2b248b9cd4647a948aba64 | [] | no_license | goldleaf3i/3dlayout | b8c1ab3a21da9129829e70ae8a95eddccbf77e2f | 1afd3a94a6cb972d5d92fe373960bd84f258ccfe | refs/heads/master | 2021-01-23T07:37:54.396115 | 2017-03-28T10:41:06 | 2017-03-28T10:41:06 | 86,431,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,935 | py | from __future__ import division
import datetime as dt
import numpy as np
import util.layout as lay
import util.GrafoTopologico as gtop
import util.transitional_kernels as tk
import util.MappaSemantica as sema
import util.frontiere as fr
from object import Segmento as sg
from util import pickle_util as pk
from util import accuracy as ac
from util import layout as lay
from util import disegna as dsg
from util import predizionePlan_geometriche as pgeom
from object import Superficie as fc
from object import Spazio as sp
from object import Plan as plan
from util import MCMC as mcmc
from util import valutazione as val
from shapely.geometry import Polygon
import parameters as par
import pickle
import os
import glob
import shutil
import time
import cv2
import warnings
warnings.warn("Settare i parametri del lateralLine e cvThresh")
def start_main(parametri_obj, path_obj):
#----------------------------1.0_LAYOUT DELLE STANZE----------------------------------
#------inizio layout
#leggo l'immagine originale in scala di grigio e la sistemo con il thresholding
img_rgb = cv2.imread(path_obj.metricMap)
img_ini = img_rgb.copy() #copio l'immagine
# 127 per alcuni dati, 255 per altri
ret,thresh1 = cv2.threshold(img_rgb,parametri_obj.cv2thresh,255,cv2.THRESH_BINARY)#prova
#------------------1.1_CANNY E HOUGH PER TROVARE MURI---------------------------------
walls , canny = lay.start_canny_ed_hough(thresh1,parametri_obj)
print len(walls)
#walls , canny = lay.start_canny_ed_hough(img_rgb,parametri_obj)
if par.DISEGNA:
#disegna mappa iniziale, canny ed hough
dsg.disegna_map(img_rgb,filepath = path_obj.filepath, format='png')
dsg.disegna_canny(canny,filepath = path_obj.filepath, format='png')
dsg.disegna_hough(img_rgb,walls,filepath = path_obj.filepath, format='png')
lines = lay.flip_lines(walls, img_rgb.shape[0]-1)
walls = lay.crea_muri(lines)
print "lines", len(lines), len(walls)
if par.DISEGNA:
#disegno linee
dsg.disegna_segmenti(walls, format='png')#solo un disegno poi lo elimino
#------------1.2_SETTO XMIN YMIN XMAX YMAX DI walls-----------------------------------
#tra tutti i punti dei muri trova l'ascissa e l'ordinata minima e massima.
estremi = sg.trova_estremi(walls)
xmin = estremi[0]
xmax = estremi[1]
ymin = estremi[2]
ymax = estremi[3]
offset = 20
xmin -= offset
xmax += offset
ymin -= offset
ymax += offset
#-------------------------------------------------------------------------------------
#---------------1.3_CONTORNO ESTERNO--------------------------------------------------
#(contours, vertici) = lay.contorno_esterno(img_rgb, parametri_obj, path_obj)
(contours, vertici) = lay.contorno_esterno_versione_tre(img_rgb)
if par.DISEGNA:
dsg.disegna_contorno(vertici,xmin,ymin,xmax,ymax,filepath = path_obj.filepath, format='png')
#-------------------------------------------------------------------------------------
#---------------1.4_MEAN SHIFT PER TROVARE CLUSTER ANGOLARI---------------------------
(indici, walls, cluster_angolari) = lay.cluster_ang(parametri_obj.h, parametri_obj.minOffset, walls, diagonali= parametri_obj.diagonali)
if par.DISEGNA:
#dsg.disegna_cluster_angolari(walls, cluster_angolari, filepath = path_obj.filepath,savename = '5b_cluster_angolari')
dsg.disegna_cluster_angolari_corretto(walls, cluster_angolari, filepath = path_obj.filepath,savename = '5b_cluster_angolari',format='png')
#-------------------------------------------------------------------------------------
#---------------1.5_CLUSTER SPAZIALI--------------------------------------------------
#questo metodo e' sbagliato, fai quella cosa con il hierarchical clustering per classificarli meglio.e trovare in sostanza un muro
#cluster_spaziali = lay.cluster_spaz(parametri_obj.minLateralSeparation, walls)
#inserisci qui il nuovo Cluster_spaz
nuovo_clustering = 2 #1 metodo di matteo, 2 mio
#in walls ci sono tutti i segmenti
if nuovo_clustering == 1:
cluster_spaziali = lay.cluster_spaz(parametri_obj.minLateralSeparation, walls)#metodo di matteo
elif nuovo_clustering ==2:
cluster_mura = lay.get_cluster_mura(walls, cluster_angolari, parametri_obj)#metodo di valerio
cluster_mura_senza_outliers = []
for c in cluster_mura:
if c!=-1:
cluster_mura_senza_outliers.append(c)
# ottengo gli outliers
# outliers = []
# for s in walls:
# if s.cluster_muro == -1:
# outliers.append(s)
# dsg.disegna_segmenti(outliers, savename = "outliers")
#ora che ho un insieme di cluster relativi ai muri voglio andare ad unire quelli molto vicini
#ottengo i rappresentanti dei cluster (tutti tranne gli outliers)
#segmenti_rappresentanti = lay.get_rappresentanti(walls, cluster_mura)
segmenti_rappresentanti = lay.get_rappresentanti(walls, cluster_mura_senza_outliers)
if par.DISEGNA:
dsg.disegna_segmenti(segmenti_rappresentanti,filepath = path_obj.filepath, savename = "5c_segmenti_rappresentanti", format='png')
#classifico i rappresentanti
#qui va settata la soglia con cui voglio separare i cluster muro
#segmenti_rappresentanti = segmenti_rappresentanti
segmenti_rappresentanti = sg.spatialClustering(parametri_obj.sogliaLateraleClusterMura, segmenti_rappresentanti)
#in questo momento ho un insieme di segmenti rappresentanti che hanno il cluster_spaziale settato correttamente, ora setto anche gli altri che hanno lo stesso cluster muro
cluster_spaziali = lay.new_cluster_spaziale(walls, segmenti_rappresentanti, parametri_obj)
if par.DISEGNA:
dsg.disegna_cluster_spaziali(cluster_spaziali, walls,filepath = path_obj.filepath, format='png')
dsg.disegna_cluster_mura(cluster_mura, walls,filepath = path_obj.filepath, savename= '5d_cluster_mura', format='png')
#-------------------------------------------------------------------------------------
#-------------------1.6_CREO EXTENDED_LINES-------------------------------------------
(extended_lines, extended_segments) = lay.extend_line(cluster_spaziali, walls, xmin, xmax, ymin, ymax,filepath = path_obj.filepath)
if par.DISEGNA:
dsg.disegna_extended_segments(extended_segments, walls,filepath = path_obj.filepath, format='png')
#-------------------------------------------------------------------------------------
#-------------1.7_CREO GLI EDGES TRAMITE INTERSEZIONI TRA EXTENDED_LINES--------------
edges = sg.crea_edges(extended_segments)
#-------------------------------------------------------------------------------------
#----------------------1.8_SETTO PESI DEGLI EDGES-------------------------------------
edges = sg.setPeso(edges, walls)
#-------------------------------------------------------------------------------------
#----------------1.9_CREO LE CELLE DAGLI EDGES----------------------------------------
celle = fc.crea_celle(edges)
#-------------------------------------------------------------------------------------
#----------------CLASSIFICO CELLE-----------------------------------------------------
global centroid
#verificare funzioni
if par.metodo_classificazione_celle ==1:
print "1.metodo di classificazione ", par.metodo_classificazione_celle
(celle, celle_out, celle_poligoni, indici, celle_parziali, contorno, centroid, punti) = lay.classificazione_superfici(vertici, celle)
elif par.metodo_classificazione_celle==2:
print "2.metodo di classificazione ", par.metodo_classificazione_celle
#sto classificando le celle con il metodo delle percentuali
(celle_out, celle, centroid, punti,celle_poligoni, indici, celle_parziali) = lay.classifica_celle_con_percentuale(vertici, celle, img_ini)
#-------------------------------------------------------------------------------------
#--------------------------POLIGONI CELLE---------------------------------------------
(celle_poligoni, out_poligoni, parz_poligoni, centroid) = lay.crea_poligoni_da_celle(celle, celle_out, celle_parziali)
#ora vorrei togliere le celle che non hanno senso, come ad esempio corridoi strettissimi, il problema e' che lo vorrei integrare con la stanza piu' vicina ma per ora le elimino soltanto
#RICORDA: stai pensando solo a celle_poligoni
#TODO: questo metodo non funziona benissimo(sbagli ad eliminare le celle)
#celle_poligoni, celle = lay.elimina_celle_insensate(celle_poligoni,celle, parametri_obj)#elimino tutte le celle che hanno una forma strana e che non ha senso siano stanze
#-------------------------------------------------------------------------------------
#------------------CREO LE MATRICI L, D, D^-1, ED M = D^-1 * L------------------------
(matrice_l, matrice_d, matrice_d_inv, X) = lay.crea_matrici(celle, sigma = parametri_obj.sigma)
#-------------------------------------------------------------------------------------
#----------------DBSCAN PER TROVARE CELLE NELLA STESSA STANZA-------------------------
clustersCelle = lay.DB_scan(parametri_obj.eps, parametri_obj.minPts, X, celle_poligoni)
#questo va disegnato per forza perche' restituisce la lista dei colori
if par.DISEGNA:
colori, fig, ax = dsg.disegna_dbscan(clustersCelle, celle, celle_poligoni, xmin, ymin, xmax, ymax, edges, contours,filepath = path_obj.filepath, format='png')
else:
colori = dsg.get_colors(clustersCelle, format='png')
#-------------------------------------------------------------------------------------
#------------------POLIGONI STANZE(spazio)--------------------------------------------
stanze, spazi = lay.crea_spazio(clustersCelle, celle, celle_poligoni, colori, xmin, ymin, xmax, ymax, filepath = path_obj.filepath)
if par.DISEGNA:
dsg.disegna_stanze(stanze, colori, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, format='png')
#-------------------------------------------------------------------------------------
#cerco le celle parziali
coordinate_bordi = [xmin, ymin, xmax, ymax]
celle_parziali, parz_poligoni = lay.get_celle_parziali(celle, celle_out, coordinate_bordi)#TODO: non ho controllato bene ma mi pare che questa cosa possa essere inserita nel metodo 1 che crca le celle parziali
#creo i poligoni relativi alle celle_out
out_poligoni = lay.get_poligoni_out(celle_out)
# TODO: questo blocco e' da eliminare, mi serviva solo per risolvere un bug
# l = []
# for i,p in enumerate(out_poligoni):
# l.append(i)
# col_prova = dsg.get_colors(l)
# dsg.disegna_stanze(out_poligoni, col_prova, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename='0a_prova')
# exit()
#
#--------------------------------fine layout------------------------------------------
#------------------------------GRAFO TOPOLOGICO---------------------------------------
#costruisco il grafo
(stanze_collegate, doorsVertices, distanceMap, points, b3) = gtop.get_grafo(path_obj.metricMap, stanze, estremi, colori, parametri_obj)
(G, pos) = gtop.crea_grafo(stanze, stanze_collegate, estremi, colori)
#ottengo tutte quelle stanze che non sono collegate direttamente ad un'altra, con molta probabilita' quelle non sono stanze reali
stanze_non_collegate = gtop.get_stanze_non_collegate(stanze, stanze_collegate)
#ottengo le stanze reali, senza tutte quelle non collegate
stanze_reali, colori_reali = lay.get_stanze_reali(stanze, stanze_non_collegate, colori)
if par.DISEGNA:
#sto disegnando usando la lista di colori originale, se voglio la lista della stessa lunghezza sostituire colori con colori_reali
dsg.disegna_stanze(stanze_reali, colori_reali, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '8_Stanze_reali', format='png')
#------------------------------------------------------------------------------------
if par.DISEGNA:
dsg.disegna_distance_transform(distanceMap, filepath = path_obj.filepath, format='png')
dsg.disegna_medial_axis(points, b3, filepath = path_obj.filepath, format='png')
dsg.plot_nodi_e_stanze(colori,estremi, G, pos, stanze, stanze_collegate, filepath = path_obj.filepath, format='png')
#-----------------------------fine GrafoTopologico------------------------------------
#-------------------------------------------------------------------------------------
#DA QUI PARTE IL NUOVO PEZZO
#IDEA:
#1) trovo le celle parziali(uno spazio e' parziali se almeno una delle sue celle e' parziale) e creo l'oggetto Plan
#2) postprocessing per capire se le celle out sono realmente out
#3) postprocessing per unire gli spazi che dovrebbero essere uniti
#creo l'oggetto plan che contiene tutti gli spazi, ogni stanza contiene tutte le sue celle, settate come out, parziali o interne.
#setto gli spazi come out se non sono collegati a nulla.
spazi = sp.get_spazi_reali(spazi, stanze_reali) #elimino dalla lista di oggetti spazio quegli spazi che non sono collegati a nulla.
#---------------------------trovo le cellette parziali--------------------------------
#se voglio il metodo che controlla le celle metto 1,
#se voglio il confronto di un intera stanza con l'esterno metto 2
#se volgio il confronto di una stanza con quelli che sono i pixel classificati nella frontiera metto 3
trova_parziali=3
if par.mappa_completa ==False and trova_parziali==1:
#QUESTO METODO OGNI TANTO SBAGLIA PER VIA DELLA COPERTURA DEI SEGMANTI, verifico gli errori con il postprocessing per le stanze parziali.
#TODO: Questo deve essere fatto solo se sono in presenza di mappe parziali
sp.set_cellette_parziali(spazi, parz_poligoni)#trovo le cellette di uno spazio che sono parziali
spazi = sp.trova_spazi_parziali(spazi)#se c'e' almeno una celletta all'interno di uno spazio che e' parziale, allora lo e' tutto lo spazio.
#creo l'oggetto Plan
#faccio diventare la lista di out_poligoni delle cellette
cellette_out = []
for p,c in zip(out_poligoni, celle_out):
celletta = sp.Celletta(p,c)
celletta.set_celletta_out(True)
cellette_out.append(celletta)
plan_o = plan.Plan(spazi, contorno, cellette_out) #spazio = oggetto Spazio. contorno = oggetto Polygon, cellette_out = lista di Cellette
dsg.disegna_spazi(spazi, colori, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13_spazi', format='png')
if par.mappa_completa ==False and trova_parziali==2:
#secondo metodo per trovare gli spazi parziali. Fa una media pesata. migliore rispetto al primo ma bisogna fare tuning del parametro
plan.trova_spazi_parziali_due(plan_o)
if par.mappa_completa == False and trova_parziali==3:
#terzo metodo per trovare le celle parziali basato sulla ricerca delle frontiere.
immagine_cluster, frontiere, labels, lista_pixel_frontiere = fr.ottieni_frontire_principali(img_ini)
if len(labels) > 0:
plan.trova_spazi_parziali_da_frontiere(plan_o, lista_pixel_frontiere, immagine_cluster, labels)
spazi = sp.trova_spazi_parziali(plan_o.spazi)
if par.DISEGNA:
dsg.disegna_map(immagine_cluster,filepath = path_obj.filepath, savename = '0a_frontiere', format='png')
#-------------------------------------------------------------------------------------
#-----------------------------calcolo peso per extended_segments----------------------
#calcolo il peso di un extended segment in base alla copertura sei segmenti. Ovviamente non potra' mai essere 100%.
extended_segments = sg.setPeso(extended_segments, walls)#TODO:controllare che sia realmente corretto
#calcolo per ogni extended segment quante sono le stanze che tocca(la copertura)
lay.calcola_copertura_extended_segment(extended_segments, plan_o.spazi)
plan_o.set_extended_segments(extended_segments)
#-------------------------------------------------------------------------------------
#---------------------------unisco spazi oversegmentati ------------------------------
#unisco le spazi che sono state divisi erroneamente
#fa schifissimo come metodo(nel caso lo utilizziamo per MCMCs)
uniciStanzeOversegmentate = 2
#1) primo controlla cella per cella
#2) unisce facendo una media pesata
#3) non unisce le stanze, non fa assolutamente nulla, usato per mappe parziali se non voglio unire stanze
if uniciStanzeOversegmentate ==1:
#fa schifissimo come metodo(nel caso lo utilizziamo per MCMCs)
#unione stanze
#provo ad usare la distance transforme
#dsg.disegna_distance_transform_e_stanze(distanceMap,stanze,colori, filepath = path_obj.filepath, savename = 'distance_and_stanze')
#se esistono due spazi che sono collegati tramite un edge di una cella che ha un peso basso allora unisco quegli spazi
plan.unisci_stanze_oversegmentate(plan_o)
#cambio anche i colori
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13b_spazi_nuovo', format='png')
elif uniciStanzeOversegmentate == 2:
#TODO: questo metodo funziona meglio del primo, vedere se vale la pena cancellare il primo
#metodo molto simile a quello di Mura per il postprocessing
plan.postprocessing(plan_o, parametri_obj)
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13b_spazi_nuovo', format='png')
else:
#se non voglio unire le stanze, ad esempio e' utile quando sto guardando le mappe parziali
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13b_spazi_nuovo', format='png')
#-------------------------------------------------------------------------------------
#------------------------------PREDIZIONE GEOMETRICA----------------------------------
#da qui comincia la parte di predizione, io la sposterei in un altro file
#ricavo gli spazi parziali
cellette_out = plan_o.cellette_esterne
spazi_parziali = []
for s in plan_o.spazi:
if s.parziale == True:
spazi_parziali.append(s)
import copy
plan_o_2 = copy.deepcopy(plan_o)#copio l'oggetto per poter eseguire le azioni separatamente
plan_o_3 = copy.deepcopy(plan_o)
#metodo di predizione scelto.
#se MCMC == True si vuole predirre con il MCMC, altrimenti si fanno azioni geometriche molto semplici
if par.MCMC ==True:
# TODO:da eliminare, mi serviva solo per delle immagini e per controllare di aver fatto tutto giusto
#TODO: MCMC rendilo una funzione privata o di un altro modulo, che se continui a fare roba qua dentro non ci capisci piu' nulla.
#guardo quali sono gli extended che sto selezionando
for index,s in enumerate(spazi_parziali):
celle_di_altre_stanze = []
for s2 in plan_o.spazi:
if s2 !=s:
for c in s2.cells:
celle_di_altre_stanze.append(c)
#-----non serve(*)
celle_circostanti = celle_di_altre_stanze + cellette_out #creo una lista delle celle circostanti ad una stanza
a = sp.estrai_extended_da_spazio(s, plan_o.extended_segments, celle_circostanti)
tot_segment = list(set(a))
#dsg.disegna_extended_segments(tot_segment, walls,filepath = path_obj.filepath, format='png', savename = '7a_extended'+str(index))
#extended visti di una stanza parziale.
b= sp.estrai_solo_extended_visti(s, plan_o.extended_segments, celle_circostanti)#estraggo solo le extended sicuramente viste
tot_segment_visti = list(set(b))
#dsg.disegna_extended_segments(tot_segment_visti, walls,filepath = path_obj.filepath, format='png', savename = '7b_extended'+str(index))
#-----fine(*)
#computo MCMC sulla stanza in considerazione
mcmc.computa_MCMC(s, plan_o, celle_di_altre_stanze, index, xmin, ymin, xmax, ymax, path_obj)
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '14_MCMC', format='png')
if par.azione_complessa == True:
#1) FACCIO AZIONE SEMPLICE PER AGGIUNGERE CELLE VISTE DAL LASER
#2) FACCIO AZIONE COMPLESSA: nel quale vado a creare l'intero spazio degli stati fino ad una certa iterazione.
#-------------------------------AZIONE GEOMETRICA 1)----------------------------------
#-----AGGIUNGO CELLE OUT A CELLE PARZIALI SOLO SE QUESTE CELLE OUT SONO STATE TOCCANTE DAL BEAM DEL LASER
for s in spazi_parziali:
celle_confinanti = pgeom.estrai_celle_confinanti_alle_parziali(plan_o, s)#estraggo le celle confinanti alle celle interne parziali delle stanze parziali.
print "le celle confinanti sono: ", len(celle_confinanti)
#unisco solo se le celle sono state toccate dal beam del laser
celle_confinanti = plan.trova_celle_toccate_dal_laser_beam(celle_confinanti, immagine_cluster)
#delle celle confinanti non devo unire quelle che farebbero sparire una parete.
celle_confinanti = pgeom.elimina_celle_con_parete_vista(celle_confinanti, s)
#faccio una prova per unire una cella che e' toccata dal beam del laser.
if len(celle_confinanti)>0:
#unisco la cella allo spazio
for cella in celle_confinanti:
if cella.vedo_frontiera == True:
sp.aggiungi_cella_a_spazio(s, cella, plan_o)
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13c_azione_geom_1', format='png')
#-----------------------------AZIONE COMPLESSA--------------------------------
for index,s in enumerate(spazi_parziali):
#estraggo le celle delle altre stanze
celle_di_altre_stanze = plan.estrai_celle_di_altre_stanze(s,plan_o)
#creo il mio spazio degli stati
level= 1 #questa e la profondita' con la quale faccio la mia ricerca, oltre al secondo livello non vado a ricercare le celle.
elementi = pgeom.estrai_spazio_delle_celle(s, plan_o, level)
elementi = pgeom.elimina_spazi_sul_bordo_da_candidati(elementi, plan_o) #per ora non considero elementi che toccano il bordo, perchs' tanto non voglio aggiungerli e mi ingrandiscono lo spazio degli stati per nulla.
print "gli elementi sono:", len(elementi)
print "-------inizio calcolo permutazioni-------"
permutazioni = pgeom.possibili_permutazioni(elementi)
print "-------fine calcolo permutazioni-------"
print "il numero di permutazioni sono:", len(permutazioni)
if len(permutazioni)>0:
#per ogni permutazione degli elementi devo controllare il costo che avrebbe il layout con l'aggiunta di tutte le celle di quella permutazione.
permutazioni_corrette = []
score_permutazioni_corrette = []
for indice,permutazione in enumerate(permutazioni):
ok=False
pgeom.aggiunge_celle_permutazione(permutazione, plan_o, s)#aggiungo le celle della permutazione corrente alla stanza
#calcolo penalita'
penal1_dopo = val.penalita1(s)#piu' questo valore e' alto peggio e', valori prossimi allo zero indicano frome convesse.
penal4_dopo = val.penalita4(s, plan_o, celle_di_altre_stanze)#conto il numero di extended che ci sono dopo aver aggiungere la permutazione, sfavorisce i gradini
# il risultato potrebbe portare ad una stanza non Polygon, allora quella permutazione non e' valida
if type(s.spazio)== Polygon:
ok = True
permutazioni_corrette.append(permutazione)
#elimino dalla lista delle permutazioni tutte quelle permutazioni che hanno gli stessi elementi
for p in permutazioni:
vuoto= list(set(p)-set(permutazione))
if len(vuoto)==0 and len(p)== len(permutazione) and p!= permutazione:
permutazioni.remove(p)
#------------valuto il layout con permutazione aggiunta---------------
score = val.score_function(penal1_dopo, penal4_dopo)#non ancora implementata fino alla fine
score_permutazioni_corrette.append(score)
#----------------------fine valutazione-----------------------------------
#disegno
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = 'permutazioni/14_stanza'+str(index)+'permutazioni_'+str(indice)+'_a', format='png')#TODO:DECOMMENTA SE NON SEI IN BATCH
else:
#elimina la permutazione perche' non e' valida
permutazioni.remove(permutazione)
#------
pgeom.elimina_celle_permutazione(permutazione, plan_o, s)
if ok ==True:
a=0
#dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = 'permutazioni/14_stanza'+str(index)+'permutazioni_'+str(indice)+'_b', format='png')#TODO:DECOMMENTA SE NON SEI IN BATCH
#------
print "permutazione", indice
#valuto la permutazione che mi permette di minimizzare lo score
if len(score_permutazioni_corrette)>0:
min_score = np.amin(score_permutazioni_corrette)
print "min_core", min_score
posizione_permutazione = score_permutazioni_corrette.index(min_score)
permutazione_migliore = permutazioni_corrette[posizione_permutazione]
#ottenuto lo score migliore lo confronto con lo score del layout originale e guardo quale a' migliore
#calcolo score del layout originale, senza previsioni
penal1_prima = val.penalita1(s)#piu' questo valore e' alto peggio e', valori prossimi allo zero indicano frome convesse.
penal4_prima = val.penalita4(s, plan_o, celle_di_altre_stanze)#conto il numero di extended che ci sono prima di aggiungere la permutazione
score_originale = val.score_function(penal1_prima, penal4_prima)#non ancora implementata fino alla fine
print "score_originale", score_originale
if min_score<=score_originale:
#preferisco fare una previsione
permutazione_migliore = permutazione_migliore
pgeom.aggiunge_celle_permutazione(permutazione_migliore, plan_o, s)
else:
#il layout originale ottenuto e' migliore di tutti gli altri, non faccio nessuana previsione per la stanza corrente
pass
else:
#non ho trovato permutazioni che hanno senso, allora lascio tutto come e'
pass
#disegno le computazioni migliori TODO: momentaneo, solo perche' in questo momento uso solo la penalita' della convessita'
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '14_stanza'+str(index)+'azione_complessa', format='png')
#---------------------------FINE AZIONE COMPLESSA-----------------------------
# for r in permutazioni:
# print r
# print "\n\n"
#
# poligoni= []
# colori=[]
# for ele in elementi:
# poligoni.append(ele.cella)
# colori.append('#800000')
#
# dsg.disegna_stanze(poligoni,colori , xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '15_poligoni_esterni_stanza'+str(index), format='png')
#
#-----------------------------AZIONE COMPLESSA--------------------------------
#stampo il layout finale
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '15_azione_complessa', format='png')
if par.azioni_semplici==True:
#------------------------------AZIONE GEOMETRICA 1)+2)--------------------------------
#-------------------------------AZIONE GEOMETRICA 1)----------------------------------
#-----AGGIUNGO CELLE OUT A CELLE PARZIALI SOLO SE QUESTE CELLE OUT SONO STATE TOCCANTE DAL BEAM DEL LASER
celle_candidate = []
for s in spazi_parziali:
celle_confinanti = pgeom.estrai_celle_confinanti_alle_parziali(plan_o, s)#estraggo le celle confinanti alle celle interne parziali delle stanze parziali.
print "le celle confinanti sono: ", len(celle_confinanti)
#unisco solo se le celle sono state toccate dal beam del laser
celle_confinanti = plan.trova_celle_toccate_dal_laser_beam(celle_confinanti, immagine_cluster)
#delle celle confinanti non devo unire quelle che farebbero sparire una parete.
celle_confinanti = pgeom.elimina_celle_con_parete_vista(celle_confinanti, s)
#faccio una prova per unire una cella che e' toccata dal beam del laser.
if len(celle_confinanti)>0:
#unisco la cella allo spazio
for cella in celle_confinanti:
if cella.vedo_frontiera == True:
sp.aggiungi_cella_a_spazio(s, cella, plan_o)
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13c_azione_geom_1', format='png')
#-------------------------------AZIONE GEOMETRICA 2)-----------------------------------
#--UNISCO LE CELLE IN BASE ALLE PARETI CHE CONDIVIDONO CON ALTRE STANZE
for s in spazi_parziali:
#estraggo le celle out che confinano con le celle parziali
celle_confinanti = pgeom.estrai_celle_confinanti_alle_parziali(plan_o, s)#estraggo le celle confinanti alle celle interne parziali delle stanze parziali.
print "le celle confinanti sono: ", len(celle_confinanti)
#delle celle confinanti appena estratte devo prendere solamente quelle che hanno tutti i lati supportati da una extended line
celle_confinanti = pgeom.estrai_celle_supportate_da_extended_segmement(celle_confinanti, s, plan_o.extended_segments)
#delle celle confinanti non devo unire quelle che farebbero sparire una parete.
celle_confinanti = pgeom.elimina_celle_con_parete_vista(celle_confinanti, s)
#unisco solo quelle selezionate
#TODO questa parte e' da cancellare
if len(celle_confinanti)>0:
#unisco la cella allo spazio
for cella in celle_confinanti:
sp.aggiungi_cella_a_spazio(s, cella, plan_o)
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13e_azione_geom_1_piu_geom_2', format='png')
#----------------------------------FINE 1)+2)-----------------------------------------
#----------------------------FACCIO SOLO AZIONE GEOM 2)-------------------------------
#questa azione la faccio su una copia di plan
#ricavo gli spazi parziali dalla copia di plan_o che sono esattamente una copia di spazi_parziali precedente.
cellette_out = plan_o_2.cellette_esterne
spazi_parziali = []
for s in plan_o_2.spazi:
if s.parziale == True:
spazi_parziali.append(s)
cella_prova =None#eli
spp = None#eli
for s in spazi_parziali:
#estraggo le celle out che confinano con le celle parziali
celle_confinanti = pgeom.estrai_celle_confinanti_alle_parziali(plan_o_2, s)#estraggo le celle confinanti alle celle interne parziali delle stanze parziali.
print "le celle confinanti sono: ", len(celle_confinanti)
#delle celle confinanti appena estratte devo prendere solamente quelle che hanno tutti i lati supportati da una extended line
celle_confinanti = pgeom.estrai_celle_supportate_da_extended_segmement(celle_confinanti, s, plan_o_2.extended_segments)
print "le celle confinanti sono2: ", len(celle_confinanti)
#delle celle confinanti non devo unire quelle che farebbero sparire una parete.
celle_confinanti = pgeom.elimina_celle_con_parete_vista(celle_confinanti, s)
print "le celle confinanti sono3: ", len(celle_confinanti)
#unisco solo quelle selezionate
#TODO questa parte e' da cancellare
if len(celle_confinanti)>0:
#unisco la cella allo spazio
for cella in celle_confinanti:
sp.aggiungi_cella_a_spazio(s, cella, plan_o_2)
cella_prova = cella#elimina
spp = s#elimina
dsg.disegna_spazi(plan_o_2.spazi, dsg.get_colors(plan_o_2.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13d_azione_geom_2', format='png')
#----------------------------------FINE SOLO AZIONE GEOM 2)--------------------------
#------------------------CREO PICKLE--------------------------------------------------
#creo i file pickle per il layout delle stanze
print("creo pickle layout")
pk.crea_pickle((stanze, clustersCelle, estremi, colori, spazi, stanze_reali, colori_reali), path_obj.filepath_pickle_layout)
print("ho finito di creare i pickle del layout")
#creo i file pickle per il grafo topologico
print("creo pickle grafoTopologico")
pk.crea_pickle((stanze, clustersCelle, estremi, colori), path_obj.filepath_pickle_grafoTopologico)
print("ho finito di creare i pickle del grafo topologico")
#-----------------------CALCOLO ACCURACY----------------------------------------------
#L'accuracy e' da controllare, secondo me non e' corretta.
if par.mappa_completa:
#funzione per calcolare accuracy fc e bc
print "Inizio a calcolare metriche"
results, stanze_gt = ac.calcola_accuracy(path_obj.nome_gt,estremi,stanze_reali, path_obj.metricMap,path_obj.filepath, parametri_obj.flip_dataset)
#results, stanze_gt = ac.calcola_accuracy(path_obj.nome_gt,estremi,stanze, path_obj.metricMap,path_obj.filepath, parametri_obj.flip_dataset)
if par.DISEGNA:
dsg.disegna_grafici_per_accuracy(stanze, stanze_gt, filepath = path_obj.filepath, format='png')
print "Fine calcolare metriche"
else:
#setto results a 0, giusto per ricordarmi che non ho risultati per le mappe parziali
results = 0
stanze_gt = ac.get_stanze_gt(path_obj.nome_gt, estremi, flip_dataset = False)
if par.DISEGNA:
#raccolgo i poligoni
stanze_acc = []
for spazio in plan_o.spazi:
stanze_acc.append(spazio.spazio)
dsg.disegna_grafici_per_accuracy(stanze_acc, stanze_gt, filepath = path_obj.filepath, format='png')
#in questa fase il grafo non e' ancora stato classificato con le label da dare ai vai nodi.
#-------------------------------------------------------------------------------------
#creo il file xml dei parametri
par.to_XML(parametri_obj, path_obj)
#-------------------------prova transitional kernels----------------------------------
#splitto una stanza e restituisto la nuova lista delle stanze
#stanze, colori = tk.split_stanza_verticale(2, stanze, colori,estremi)
#stanze, colori = tk.split_stanza_orizzontale(3, stanze, colori,estremi)
#stanze, colori = tk.slit_all_cell_in_room(spazi, 1, colori, estremi) #questo metodo e' stato fatto usando il concetto di Spazio, dunque fai attenzione perche' non restituisce la cosa giusta.
#stanze, colori = tk.split_stanza_reverce(2, len(stanze)-1, stanze, colori, estremi) #questo unisce 2 stanze precedentemente splittate, non faccio per ora nessun controllo sul fatto che queste 2 stanze abbiano almeno un muro in comune, se sono lontani succede un casino
#-----------------------------------------------------------------------------------
#-------------------------MAPPA SEMANTICA-------------------------------------------
'''
#in questa fase classifico i nodi del grafo e conseguentemente anche quelli della mappa.
#gli input di questa fase non mi sono ancora molto chiari
#per ora non la faccio poi se mi serve la copio/rifaccio, penso proprio sia sbagliata.
#stanze ground truth
(stanze_gt, nomi_stanze_gt, RC, RCE, FCES, spaces, collegate_gt) = sema.get_stanze_gt(nome_gt, estremi)
#corrispondenze tra gt e segmentate (backward e forward)
(indici_corrispondenti_bwd, indici_gt_corrispondenti_fwd) = sema.get_corrispondenze(stanze,stanze_gt)
#creo xml delle stanze segmentate
id_stanze = sema.crea_xml(nomeXML,stanze,doorsVertices,collegate,indici_gt_corrispondenti_fwd,RCE,nomi_stanze_gt)
#parso xml creato, va dalla cartella input alla cartella output/xmls, con feature aggiunte
xml_output = sema.parsa(dataset_name, nomeXML)
#classifico
predizioniRCY = sema.classif(dataset_name,xml_output,'RC','Y',30)
predizioniRCN = sema.classif(dataset_name,xml_output,'RC','N',30)
predizioniFCESY = sema.classif(dataset_name,xml_output,'RCES','Y',30)
predizioniFCESN = sema.classif(dataset_name,xml_output,'RCES','N',30)
#creo mappa semantica segmentata e ground truth e le plotto assieme
sema.creaMappaSemantica(predizioniRCY, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, RC, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniRCN, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, RC, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniFCESY, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, FCES, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniFCESN, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, FCES, estremi, colori)
plt.show()
'''
#-----------------------------------------------------------------------------------
print "to be continued..."
return results
#TODO
def load_main(filepath_pickle_layout, filepath_pickle_grafoTopologico, parXML):
#carico layout
pkl_file = open(filepath_pickle_layout, 'rb')
data1 = pickle.load(pkl_file)
stanze = data1[0]
clustersCelle = data1[1]
estremi = data1[2]
colori = data1[3]
spazi = data1[4]
stanze_reali = data1[5]
colori_reali= data1[6]
#print "controllo che non ci sia nulla di vuoto", len(stanze), len(clustersCelle), len(estremi), len(spazi), len(colori)
#carico il grafo topologico
pkl_file2 = open( filepath_pickle_grafoTopologico, 'rb')
data2 = pickle.load(pkl_file2)
G = data2[0]
pos = data2[1]
stanze_collegate = data2[2]
doorsVertices = data2[3]
#creo dei nuovi oggetti parametri caricando i dati dal file xml
new_parameter_obj, new_path_obj = par.load_from_XML(parXML)
#continuare il metodo da qui
def makeFolders(location,datasetList):
for dataset in datasetList:
if not os.path.exists(location+dataset):
os.mkdir(location+dataset)
os.mkdir(location+dataset+"_pickle")
def main():
start = time.time()
print ''' PROBLEMI NOTI \n
1] LE LINEE OBLIQUE NON VANNO;\n
2] NON CLASSIFICA LE CELLE ESTERNE CHE STANNO DENTRO IL CONVEX HULL, CHE QUINDI VENGONO CONSIDERATE COME STANZE;\n
OK 3] ACCURACY NON FUNZIONA;\n
4] QUANDO VENGONO RAGGRUPPATI TRA DI LORO I CLUSTER COLLINEARI, QUESTO VIENE FATTO A CASCATA. QUESTO FINISCE PER ALLINEARE ASSIEME MURA MOLTO DISTANTI;\n
5] IL SISTEMA E' MOLTO SENSIBILE ALLA SCALA. BISOGNEREBBE INGRANDIRE TUTTE LE IMMAGINI FACENDO UN RESCALING E RISOLVERE QUESTO PROBLEMA. \n
[4-5] FANNO SI CHE I CORRIDOI PICCOLI VENGANO CONSIDERATI COME UNA RETTA UNICA\n
6] BISOGNEREBBE FILTRARE LE SUPERFICI TROPPO PICCOLE CHE VENGONO CREATE TRA DEI CLUSTER;\n
7] LE IMMAGINI DI STAGE SONO TROPPO PICCOLE; VANNO RIPRESE PIU GRANDI \n
>> LANCIARE IN BATCH SU ALIENWARE\n
>> RENDERE CODICE PARALLELO\n
8] MANCANO 30 DATASET DA FARE CON STAGE\n
9] OGNI TANTO NON FUNZIONA IL GET CONTORNO PERCHE SBORDA ALL'INTERNO\n
>> PROVARE CON SCAN BORDO (SU IMMAGINE COPIA)\n
>> PROVARE A SETTARE IL PARAMETRO O A MODIFICARE IL METODO DI SCAN BORDO\n
>> CERCARE SOLUZIONI ALTERNATIVE (ES IDENTIFICARE LE CELLE ESTERNE)\n
OK 10] VANNO TARATI MEGLIO I PARAMETRI PER IL CLUSTERING\n
>> I PARAMETRI DE CLUSTERING SONO OK; OGNI TANTO FA OVERSEGMENTAZIONE.\n
>>> EVENTUALMENTE SE SI VEDE CHE OVERSEGMENTAZIONE SONO UN PROBLEMA CAMBIARE CLUSTERING O MERGE CELLE\n
11] LE LINEE DELLA CANNY E HOUGH TALVOLTA SONO TROPPO GROSSE \n
>> IN REALTA SEMBRA ESSERE OK; PROVARE CON MAPPE PIU GRANDI E VEDERE SE CAMBIA.
12] BISOGNEREBBE AUMENTARE LA SEGMENTAZIONE CON UN VORONOI
OK 13] STAMPA L'IMMAGINE DELLA MAPPA AD UNA SCALA DIVERSA RISPETTO A QUELLA VERA.\n
OK 14] RISTAMPARE SCHOOL_GT IN GRANDE CHE PER ORA E' STAMPATO IN PICCOLO (800x600)\n
OK VEDI 10] 15] NOI NON CALCOLIAMO LA DIFFUSION DEL METODO DI MURA; PER ALCUNI VERSI E' UN BENE PER ALTRI NO\n
OK VEDI 4] 16] NON FACCIAMO IL CLUSTERING DEI SEGMENTI IN MANIERA CORRETTA; DOVREMMO SOLO FARE MEANSHIFT\n
17] LA FASE DEI SEGMENTI VA COMPLETAMENTE RIFATTA; MEANSHIFT NON FUNZIONA COSI'; I SEGMENTI HANNO UN SACCO DI "==" CHE VANNO TOLTI; SPATIAL CLUSTRING VA CAMBIATO;\n
18] OGNI TANTO IL GRAFO TOPOLOGICO CONNETTE STANZE CHE SONO ADIACENTI MA NON CONNESSE. VA RIVISTA LA PARTE DI MEDIALAXIS;\n
19] PROVARE A USARE L'IMMAGINE CON IL CONTORNO RICALCATO SOLO PER FARE GETCONTOUR E NON NEGLI ALTRI STEP.\n
20] TOGLIERE THRESHOLD + CANNY -> USARE SOLO CANNY.\n
21] TOGLIERE LE CELLE INTERNE CHE SONO BUCHI.\n
>> USARE VORONOI PER CONTROLLARE LA CONNETTIVITA.\n
>> USARE THRESHOLD SU SFONDO \n
>> COMBINARE I DUE METODI\n
22] RIMUOVERE LE STANZE ERRATE:\n
>> STANZE "ESTERNE" INTERNE VANNO TOLTE IN BASE ALLE CELLE ESTERNE\n
>> RIMUOVERE STANZE CON FORME STUPIDE (ES PARETI LUNGHE STRETTE), BISOGNA DECIDERE SE ELIMINARLE O INGLOBARLE IN UN ALTRA STANZA\n
23] RISOLVERE TUTTI I WARNING.\n
da chiedere: guardare il metodo clustering_dbscan_celle(...) in layout la riga
af = DBSCAN(eps, min_samples, metric="precomputed").fit(X) non dovrebbe essere cosi?
af = DBSCAN(eps= eps, min_samples = min_samples, metric="precomputed").fit(X)
'''
print '''
FUNZIONAMENTO:\n
SELEZIONARE SU QUALI DATASETs FARE ESPERIMENTI (variabile DATASETs -riga165- da COMMENTARE / DECOMMENTARE)\n
SPOSTARE LE CARTELLE CON I NOMI DEI DATASET CREATI DALL'ESPERIMENTO PRECEDENTE IN UNA SOTTO-CARTELLA (SE TROVA UNA CARTELLA CON LO STESSO NOME NON CARICA LA MAPPA)\n
SETTARE I PARAMERI \n
ESEGUIRE\n
OGNI TANTO IL METODO CRASHA IN FASE DI VALUTAZIONE DI ACCURATEZZA. NEL CASO, RILANCIARLO\n
SPOSTARE TUTTI I RISULTATI IN UNA CARTELLA IN RESULTS CON UN NOME SIGNIFICATIVO DEL TEST FATTO\n
SALVARE IL MAIN DENTRO QUELLA CARTELLA\n
'''
#-------------------PARAMETRI-------------------------------------------------------
#carico parametri di default
parametri_obj = par.Parameter_obj()
#carico path di default
path_obj = par.Path_obj()
#-----------------------------------------------------------------------------------
makeFolders(path_obj.OUTFOLDERS,path_obj.DATASETs)
skip_performed = True
#-----------------------------------------------------------------------------------
#creo la cartella di log con il time stamp
our_time = str(dt.datetime.now())[:-10].replace(' ','@') #get current time
SAVE_FOLDER = os.path.join('./log', our_time)
if not os.path.exists(SAVE_FOLDER):
os.mkdir(SAVE_FOLDER)
SAVE_LOGFILE = SAVE_FOLDER+'/log.txt'
#------------------------------------------------------------------------------------
with open(SAVE_LOGFILE,'w+') as LOGFILE:
print "AZIONE", par.AZIONE
print >>LOGFILE, "AZIONE", par.AZIONE
shutil.copy('./minibatch.py',SAVE_FOLDER+'/minibatch.py') #copio il file del main
shutil.copy('./parameters.py',SAVE_FOLDER+'/parameters.py') #copio il file dei parametri
if par.AZIONE == "batch":
if par.LOADMAIN==False:
print >>LOGFILE, "SONO IN MODALITA' START MAIN"
else:
print >>LOGFILE, "SONO IN MODALITA' LOAD MAIN"
print >>LOGFILE, "-----------------------------------------------------------"
for DATASET in path_obj.DATASETs :
print >>LOGFILE, "PARSO IL DATASET", DATASET
global_results = []
print 'INIZIO DATASET ' , DATASET
for metricMap in glob.glob(path_obj.INFOLDERS+'IMGs/'+DATASET+'/*.png') :
print >>LOGFILE, "---parso la mappa: ", metricMap
print 'INIZIO A PARSARE ', metricMap
path_obj.metricMap =metricMap
map_name = metricMap.split('/')[-1][:-4]
#print map_name
SAVE_FOLDER = path_obj.OUTFOLDERS+DATASET+'/'+map_name
SAVE_PICKLE = path_obj.OUTFOLDERS+DATASET+'_pickle/'+map_name.split('.')[0]
if par.LOADMAIN==False:
if not os.path.exists(SAVE_FOLDER):
os.mkdir(SAVE_FOLDER)
os.mkdir(SAVE_PICKLE)
else:
# evito di rifare test che ho gia fatto
if skip_performed :
print 'GIA FATTO; PASSO AL SUCCESSIVO'
continue
#print SAVE_FOLDER
path_obj.filepath = SAVE_FOLDER+'/'
path_obj.filepath_pickle_layout = SAVE_PICKLE+'/'+'Layout.pkl'
path_obj.filepath_pickle_grafoTopologico = SAVE_PICKLE+'/'+'GrafoTopologico.pkl'
add_name = '' if DATASET == 'SCHOOL' else ''
if par.mappa_completa == False:
nome = map_name.split('_updated')[0]
path_obj.nome_gt = path_obj.INFOLDERS+'XMLs/'+DATASET+'/'+nome+'_updated.xml'
else:
path_obj.nome_gt = path_obj.INFOLDERS+'XMLs/'+DATASET+'/'+map_name+add_name+'.xml'
#--------------------new parametri-----------------------------------
#setto i parametri differenti(ogni dataset ha parametri differenti)
parametri_obj.minLateralSeparation = 7 if (DATASET=='SCHOOL' or DATASET=='PARZIALI' or DATASET=='SCHOOL_grandi') else 15
#parametri_obj.cv2thresh = 150 if DATASET == 'SCHOOL' else 200
parametri_obj.cv2thresh = 150 if (DATASET=='SCHOOL' or DATASET=='PARZIALI' or DATASET == 'SCHOOL_grandi') else 200
parametri_obj.flip_dataset = True if DATASET == 'SURVEY' else False
#--------------------------------------------------------------------
#-------------------ESECUZIONE---------------------------------------
if par.LOADMAIN==False:
print "start main"
results = start_main(parametri_obj, path_obj)
global_results.append(results);
#calcolo accuracy finale dell'intero dataset
if metricMap == glob.glob(path_obj.INFOLDERS+'IMGs/'+DATASET+'/*.png')[-1]:
accuracy_bc_medio = []
accuracy_bc_in_pixels = []
accuracy_fc_medio = []
accuracy_fc_in_pixels=[]
for i in global_results :
accuracy_bc_medio.append(i[0])
accuracy_fc_medio.append(i[2])
accuracy_bc_in_pixels.append(i[4])
accuracy_fc_in_pixels.append(i[5])
filepath= path_obj.OUTFOLDERS+DATASET+'/'
print filepath
f = open(filepath+'accuracy.txt','a')
#f.write(filepath)
f.write('accuracy_bc = '+str(np.mean(accuracy_bc_medio))+'\n')
f.write('accuracy_bc_pixels = '+str(np.mean(accuracy_bc_in_pixels))+'\n')
f.write('accuracy_fc = '+str(np.mean(accuracy_fc_medio))+'\n')
f.write('accuracy_fc_pixels = '+str(np.mean(accuracy_fc_in_pixels))+'\n\n')
f.close()
LOGFILE.flush()
elif par.LOADMAIN==True:
print "load main"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
load_main(path_obj.filepath_pickle_layout, path_obj.filepath_pickle_grafoTopologico, path_obj.filepath+"parametri.xml")
LOGFILE.flush()
else :
continue
break
LOGFILE.flush()
elif par.AZIONE =='mappa_singola':
#-------------------ESECUZIONE singola mappa----------------------------------
if par.LOADMAIN==False:
print "start main"
print >>LOGFILE, "SONO IN MODALITA' START MAIN"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
start_main(parametri_obj, path_obj)
LOGFILE.flush()
else:
print "load main"
print >>LOGFILE, "SONO IN MODALITA' LOAD MAIN"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
load_main(path_obj.filepath_pickle_layout, path_obj.filepath_pickle_grafoTopologico, path_obj.filepath+"parametri.xml")
LOGFILE.flush()
#-------------------TEMPO IMPIEGATO-------------------------------------------------
fine = time.time()
elapsed = fine-start
print "la computazione ha impiegato %f secondi" % elapsed
if __name__ == '__main__':
main() | [
"matteo.luperto@polimi.it"
] | matteo.luperto@polimi.it |
f48f72b17ce051d9183e15e314f5e015b0dbba9e | de15fe164850e5fb84b0e0a3bf98aa6a9f38e109 | /recreation/migrations/0001_initial.py | cbfd222d44d66a1a7425fa7bca5b5092c11beb0a | [] | no_license | arthurarp/api_rest-django | d6b88851153005c8fa24c6c5a680991a1f10817a | 3f4b6c90843d11feef4f0aa4a8e49fc8e6bbb665 | refs/heads/master | 2023-04-08T00:34:12.268906 | 2020-02-29T21:37:38 | 2020-02-29T21:37:38 | 240,120,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # Generated by Django 2.2.10 on 2020-02-17 20:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Recreation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('description', models.TextField(default=None)),
('opening_hours', models.TextField(default=None)),
('minimum_age', models.IntegerField()),
],
),
]
| [
"arthurarp@hotmail.com"
] | arthurarp@hotmail.com |
28c1dfc61258264f239362b028882e8afb8f980b | b6285bd7cc7695c76877686c9567743b051a3339 | /app1/migrations/0017_job.py | 0434e40d0b0b3f6afe2526b3b521bb4c26674b68 | [] | no_license | Janeclear/newone-1 | bd09cbbe37bbd675d4bb21e67aa8e626cf5b4800 | 623a528ef26b3aa307d439103c25da8242509f2d | refs/heads/master | 2022-07-28T08:46:40.902787 | 2020-05-21T11:50:56 | 2020-05-21T11:50:56 | 254,839,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2020-04-20 10:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0016_auto_20200418_1849'),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('job_name', models.CharField(blank=True, default=0, max_length=100, unique=True)),
('description', models.CharField(blank=True, default=0, max_length=512)),
('requirement', models.CharField(blank=True, default=0, max_length=512)),
('salary', models.CharField(blank=True, choices=[('3000以下', '3000以下'), ('3000-5000', '3000-5000'), ('5000-8000', '5000-8000'), ('8000-10000', '8000-10000'), ('10000以上', '10000以上')], default='3000以下', max_length=32)),
('character', models.CharField(blank=True, default=0, max_length=256)),
],
),
]
| [
"1255019407@qq.com"
] | 1255019407@qq.com |
03bc87f53f061e3f2e48f8dad7316e3b8b59329f | 884d1630093460668c40abf0313a47fddc25bd88 | /Oil.py | fc74e26121fe0148f4b5db4c37fe2658fcc67653 | [] | no_license | Gregoryish/MohovHW | c087bb61784b8baa70403bbd8406b211fdbc0644 | da4d2b27639dcc05931d534df86b729fca91881e | refs/heads/master | 2022-04-23T01:18:25.379048 | 2020-04-14T06:08:36 | 2020-04-14T06:08:36 | 254,909,289 | 0 | 0 | null | 2020-04-14T06:08:37 | 2020-04-11T16:31:37 | Jupyter Notebook | UTF-8 | Python | false | false | 8,370 | py | #!/usr/bin/env python
# coding: utf-8
# Иходные данные
#
# 1. density_oil_without_gas - плотность дегазированной нефти ( p_0 = 0.1 *10^6 МПа, Tст = 293 К), кг/м^3
# 2. viscosity_without_gas - вязкость нефти в стандартных условиях , мПа*с
# 3. gas_saturation - газонасыщенность (газосодержание) пластовой нефти, т.е. отношение объёма газа, растворённого в нефти, к массе сепарированной нефти м3/т (объём газа приведен к нормальным условиям)
# 4. relative_density_gas (relative_density) - относительная плотность газа по воздуху
# 5. T_formation - пластовая температура, К
# 6. pressure_formation - пластовое давление, МПа
# 7. pressure_bubble_point_form - давление насыщения пластовой нефти при пластовой температуре, МПа
# 8. y_a, y_c1 - молярная доли азота и метана в попутном газе однократного разгазирования нефти до (0.1 МПа, 293 К)
#
#
# In[4]:
import numpy as np
import math
# In[2]:
#1 определяем термодинамические условия разгазирования p, T
#TODO
#2 равновесное давление насыщения при T<=Tпл
def _pressure_bbp_T (pressure_bubble_point_form, T_formation, T_current, gas_saturation, y_a, y_c1):
pressure_bbp_T = pressure_bubble_point_form - (T_formation - T_current)/(9.157 + 701.8/(gas_saturation*(y_c1 - 0.8*y_a)))
return round(pressure_bbp_T, 3)
# In[8]:
# 3
def _R(p, pressure_bbp_T):
R = (1+math.log10(p))/(1+math.log10(pressure_bbp_T)) -1
return round(R, 4)
def _m (T, density_oil_without_gas, relative_density):
m = 1+0.029*(T-293)*(density_oil_without_gas*relative_density*10**-3 - 0.7966)
return round(m, 4)
def _D (T, density_oil_without_gas, relative_density):
D = (10**-3)*density_oil_without_gas*relative_density*(4.5 - 0.00305*(T-293))-4.785
return round(D, 4)
# приведённый к нормальным условиям удельный объём выделившегося газа
def _volume_separate_gas (gas_saturation, R, m, D,):
volume_separate_gas = gas_saturation*R*m*(D*(1+R)-1)
return round(volume_separate_gas, 4)
# In[14]:
#4 рассчёт остаточной газонасыщенности нефти (удельный объём растворенного раза) в процессе её разгазирвоания
def _volume_dissolved_gas (gas_saturation, m, volume_separate_gas):
volume_dissolved_gas = gas_saturation*m - volume_separate_gas
return round(volume_dissolved_gas, 2)
# In[18]:
#5 относительная плотность выделившегося газа (p, T)
def _relative_density_separate_gas (relative_density, a, u, R):
relative_density_separate_gas = a*(relative_density - 0.0036*(1+R)*(105.7 + u*R))
return round(relative_density_separate_gas, 4)
def _a (T):
a = 1 + 0.0054*(T-293)
return a
def _u(density_oil_without_gas, gas_saturation):
u = 10**-3*density_oil_without_gas*gas_saturation - 186
return u
# In[19]:
#6 находим относительную плотность растворённого раза, остающегося в нефти при данных условиях её разгазирования (p, T)
def _relative_density_dissolved_gas (gas_saturation, a, m, relative_density_gas, relative_density_separate_gas, volume_separate_gas, volume_dissolved_gas):
relative_density_dissolved_gas = gas_saturation*(a*m*relative_density_gas - relative_density_separate_gas*volume_separate_gas/gas_saturation)/volume_dissolved_gas
return round (relative_density_dissolved_gas , 4)
# In[39]:
#7 рассчитываем объёмный коэффициент,
def _b_oil(p ,T ,density_oil_without_gas, volume_dissolved_gas, lambda_T, m, alpha_n):
b_oil = 1 + 1.0733*10**-3*density_oil_without_gas*volume_dissolved_gas*lambda_T/m +alpha_n*(T-293) - 6.5*10**-4*p
return round(b_oil,3)
# предварительно определив удельное приращение объёма нефти за счёт единичного изменения газонасыщенности lambda_T
def _lambda_T(density_oil_without_gas, relative_density_dissolved_gas, a, volume_dissolved_gas):
lambda_T = 10**-3*(4.3-3.54*10**-3*density_oil_without_gas + 1.0337*relative_density_dissolved_gas/a +5.581*10**-6*density_oil_without_gas*(1-1.61*10**-6*density_oil_without_gas*volume_dissolved_gas) * volume_dissolved_gas)
return round(lambda_T, 6)
# температурный коэффициент объёмного расширения дегазированной нефти при стандартном давлении
def _alpha_n(density_oil_without_gas):
if 780<=density_oil_without_gas<=860:
alpha_n = 10**-3*(3.083-2.638*10**-3*density_oil_without_gas)
if 860<=density_oil_without_gas<=960:
alpha_n = 10**-3*(2.513-1.975*10**-3*density_oil_without_gas)
return round(alpha_n, 8)
# In[25]:
#8 определяем плотность газонасыщенной нефти
def _density_oil_with_gas (density_oil_without_gas, relative_density_dissolved_gas, volume_dissolved_gas, a, m, b_oil,):
density_oil_with_gas = density_oil_without_gas*(1+1.293*10**-3*relative_density_dissolved_gas*volume_dissolved_gas/(a*m))/b_oil
return round(density_oil_with_gas, 3)
# In[32]:
#9 определяем вязкость дегазированной нефти при p_0 , T
def _viscosity_without_gas_T ( density_without_gas, T, a, b,viscosity_without_gas=0):
if viscosity_without_gas == 0:
viscosity_without_gas = _Dunyshkin_viscosity_without_gas(density_without_gas)
viscosity_without_gas = viscosity_without_gas*(T-293)**a*math.exp(1)**(b*(293-T))
return round(viscosity_without_gas,3)
def _Dunyshkin_viscosity_without_gas (density_without_gas):
if 845<density_without_gas<924:
viscosity_without_gas = ((0.658*density_without_gas**2)/(886*10**3-density_without_gas**2))**2
if 780<density_without_gas<=845:
viscosity_without_gas = ((0.456*density_without_gas**2)/(833*10**3-density_without_gas**2))**2
return round (viscosity_without_gas, 3)
def _a_viscosity (T):
a = 10**(-0.0175*(293-T)-2.58)
return a
def _b_viscosity (density_without_gas, T, viscosity_without_gas=0):
if viscosity_without_gas == 0:
viscosity_without_gas = _Dunyshkin_viscosity_without_gas(density_without_gas)
b_viscosity = (8*10**-5*density_without_gas-0.047)*viscosity_without_gas**(0.13+0.002*(T-293))
return round(b_viscosity,4)
# In[41]:
#10 определяем вязкость газонасыщенной нефти
def _viscosity_dissolved_gas(A_visc_dissolved, B_visc_dissolved, viscosity_without_gas_T):
viscosity_dissolved_gas = A_visc_dissolved*viscosity_without_gas_T**B_visc_dissolved
return viscosity_dissolved_gas
def _A_visc_dissolved (volume_dissolved_prived):
A_visc_dissolved = 1 + 0.0129*volume_dissolved_prived - 0.0364*volume_dissolved_prived**0.85
return round(A_visc_dissolved,4)
def _B_visc_dissolved (volume_dissolved_prived):
B_visc_dissolved = 1 + 0.0017*volume_dissolved_prived - 0.0228*volume_dissolved_prived**0.667
return round(B_visc_dissolved, 4)
#приведенный объём газа растворенного в нефти к стандартным условиям
def _volume_dissolved_prived (volume_dissolved_gas, density_oil_without_gas, alpha_n):
volume_dissolved_prived = 1.055*10**-3*(1+5*alpha_n)*volume_dissolved_gas*density_oil_without_gas
return round(volume_dissolved_prived,3)
# In[38]:
#11 рассчёт повернхностного натяжения
def _sigma_oil_gas(p,T):
sigma_oil_gas = (1/10)**(1.58+0.05*p) - 72*10**-6*(T-305)
return round(sigma_oil_gas, 4)
# In[ ]:
# In[ ]:
| [
"gregoryish@gmail.com"
] | gregoryish@gmail.com |
0361b75dc0630118ca7291ef92d6eedb19e0f3ed | f0c35cd1d458f2f9ec1c605d73b9fc4738f62986 | /web/admin/forms.py | 59ea21852a00a9dacdc2d9f95b918f1dafa08ad3 | [] | no_license | dougmpx/xiaoli | 9e57c7bdd1d6e9ab55adb657ad5fa9d10dbe2a50 | 88f28754d1a67351b90461ad004ca5d36dde1e02 | refs/heads/master | 2021-04-15T07:39:06.655988 | 2013-01-05T08:10:02 | 2013-01-05T08:10:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,099 | py | #coding=utf-8
from tango.models import db, Category
from nodes.models import Vendor, Model
from .models import Miboid, Module
from flask_wtf import Form, TextField, PasswordField, HiddenField, SelectField, IntegerField, \
QuerySelectField, TextAreaField, widgets, ValidationError, required, equal_to, email
class SearchForm(Form):
keyword = TextField()
class CategoryForm(Form):
id = TextField(validators=[required(message=u'必填')])
obj = TextField(u'分组', [required(message=u'必填')])
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
class PermissionForm(Form):
endpoint = TextField(u'Endpoint')
module_text = TextField(u'模块显示名')
name = TextField(u'子模块显示名')
operation = TextField(u'操作名')
default_permission = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无权限'),(u'1', u'有权限')])
next = HiddenField()
class VendorForm(Form):
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
url = TextField(u'厂商主页')
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
class ModelForm(Form):
category = QuerySelectField(u'类别', get_label=u'alias',
query_factory=lambda: Category.query.filter_by(obj='node'))
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
sysoid = TextField(u'Sysoid')
vendor = QuerySelectField(u'厂商', get_label=u'alias',
query_factory=lambda: Vendor.query)
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
remark = TextAreaField(u'备注')
class SysoidForm(Form):
sysoid = TextField(u'SysOid', [required(message=u'必填')])
model = QuerySelectField(u'设备型号', get_label=u'alias',
query_factory=lambda:Model.query)
disco = TextField(u'发现模块')
mib = QuerySelectField(u'Mib文件', get_pk=lambda x: x, get_label=lambda x: x,
query_factory=lambda: [m[0] for m in db.session.query(Miboid.mib).distinct().all()])
remark = TextAreaField(u'备注')
class ModuleForm(Form):
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
period = IntegerField(u'周期(min)')
retries = IntegerField(u'重试次数(次)')
timeout = IntegerField(u'超时(s)')
remark = TextAreaField(u'备注')
class MonitorForm(Form):
category = TextField(u'分类')
vendor = TextField(u'供应商')
sysoid = TextField(u'Sysoid')
match = TextField(u'匹配规则')
module = QuerySelectField(u'采集模块', get_label=u'alias',
query_factory=lambda:Module.query)
mib = QuerySelectField(u'Mib文件', get_pk=lambda x: x, get_label=lambda x: x,
query_factory=lambda: [m[0] for m in db.session.query(Miboid.mib).distinct().all()])
remark = TextAreaField(u'备注')
class MiboidForm(Form):
mib = TextField(u'mib', [required(message=u'必填')])
grp = TextField(u'分组', [required(message=u'必填')])
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
oid = TextField(u'oid')
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
remark = TextAreaField(u'备注')
| [
"thewawar@gmail.com"
] | thewawar@gmail.com |
50f4218bab8cab402a3642b888fffb7a6a8f06f5 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-ief/huaweicloudsdkief/v1/model/update_edge_node_device_response.py | c0a8a018e150454b0fe2df63d8f1a2d583739033 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,496 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateEdgeNodeDeviceResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'delete_connector': 'bool',
'deploy_connector': 'bool',
'deployment_id': 'str',
'update_devices': 'NodeDevice'
}
attribute_map = {
'delete_connector': 'delete_connector',
'deploy_connector': 'deploy_connector',
'deployment_id': 'deployment_id',
'update_devices': 'update_devices'
}
def __init__(self, delete_connector=None, deploy_connector=None, deployment_id=None, update_devices=None):
"""UpdateEdgeNodeDeviceResponse
The model defined in huaweicloud sdk
:param delete_connector: 工业终端设备预留字段
:type delete_connector: bool
:param deploy_connector: 工业终端设备预留字段
:type deploy_connector: bool
:param deployment_id: 工业终端设备预留字段
:type deployment_id: str
:param update_devices:
:type update_devices: :class:`huaweicloudsdkief.v1.NodeDevice`
"""
super(UpdateEdgeNodeDeviceResponse, self).__init__()
self._delete_connector = None
self._deploy_connector = None
self._deployment_id = None
self._update_devices = None
self.discriminator = None
if delete_connector is not None:
self.delete_connector = delete_connector
if deploy_connector is not None:
self.deploy_connector = deploy_connector
if deployment_id is not None:
self.deployment_id = deployment_id
if update_devices is not None:
self.update_devices = update_devices
@property
def delete_connector(self):
"""Gets the delete_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:return: The delete_connector of this UpdateEdgeNodeDeviceResponse.
:rtype: bool
"""
return self._delete_connector
@delete_connector.setter
def delete_connector(self, delete_connector):
"""Sets the delete_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:param delete_connector: The delete_connector of this UpdateEdgeNodeDeviceResponse.
:type delete_connector: bool
"""
self._delete_connector = delete_connector
@property
def deploy_connector(self):
"""Gets the deploy_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:return: The deploy_connector of this UpdateEdgeNodeDeviceResponse.
:rtype: bool
"""
return self._deploy_connector
@deploy_connector.setter
def deploy_connector(self, deploy_connector):
"""Sets the deploy_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:param deploy_connector: The deploy_connector of this UpdateEdgeNodeDeviceResponse.
:type deploy_connector: bool
"""
self._deploy_connector = deploy_connector
@property
def deployment_id(self):
"""Gets the deployment_id of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:return: The deployment_id of this UpdateEdgeNodeDeviceResponse.
:rtype: str
"""
return self._deployment_id
@deployment_id.setter
def deployment_id(self, deployment_id):
"""Sets the deployment_id of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:param deployment_id: The deployment_id of this UpdateEdgeNodeDeviceResponse.
:type deployment_id: str
"""
self._deployment_id = deployment_id
@property
def update_devices(self):
"""Gets the update_devices of this UpdateEdgeNodeDeviceResponse.
:return: The update_devices of this UpdateEdgeNodeDeviceResponse.
:rtype: :class:`huaweicloudsdkief.v1.NodeDevice`
"""
return self._update_devices
@update_devices.setter
def update_devices(self, update_devices):
"""Sets the update_devices of this UpdateEdgeNodeDeviceResponse.
:param update_devices: The update_devices of this UpdateEdgeNodeDeviceResponse.
:type update_devices: :class:`huaweicloudsdkief.v1.NodeDevice`
"""
self._update_devices = update_devices
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateEdgeNodeDeviceResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
585d2a8ccbc5b0922cb2cf1b62436fa98b0eb552 | 83107ca8671e2e11ea09b9bdfeac02ac6fe34bdf | /customer/urls.py | ed949122f18e5247440cb27dfaccc8cc168f610a | [] | no_license | findjoywfj/question_web | be0d003e98e563ca20eb147219e11cf3df0314ea | 411b446cb0528c2ab364f558100c125b82e17ab8 | refs/heads/master | 2020-04-28T02:57:15.376147 | 2019-05-09T10:10:38 | 2019-05-09T10:10:38 | 174,906,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | from django.conf.urls import url
from customer.views import qes_show, home, qes_result, record_show, api_record_get
urlpatterns = [
url(r'^$', home),
url(r'^qes_body/(?P<query_id>\w+)/(?P<user_type>\w+)/$', qes_show),
#url(r'^qes_body/(?P<query_id>\w+)/$', qes_show),
url(r'^qes_body/(?P<query_id>\w+)/\w+/result/(?P<score>\d+)/$', qes_result),
url(r'^record/$', record_show),
url(r'^api/record/get/$',api_record_get)
] | [
"35032786+findjoywfj@users.noreply.github.com"
] | 35032786+findjoywfj@users.noreply.github.com |
8840fbe076ce35c5499f8951c83283b21e45ffd8 | c76ef7ef852dba81ab99099a9feef0c84573c629 | /crud.py | 487106c352504520c5fc9ebed513e8def33b6795 | [] | no_license | DhivyaMyl/Python_Practice | 4a28cf01336b853151747b4fc8dccde71b57e05f | 00d03175504de91fac67afc3f3b7c1628496d7f1 | refs/heads/master | 2022-11-08T14:57:46.197447 | 2020-06-14T20:04:11 | 2020-06-14T20:04:11 | 272,273,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | from flask import *
import sqlite3
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html");
@app.route("/add")
def add():
return render_template("add.html")
@app.route("/savedetails",methods = ["POST","GET"])
def saveDetails():
msg = "msg"
if request.method == "POST":
try:
name = request.form["name"]
email = request.form["email"]
address = request.form["address"]
with sqlite3.connect("addressbook.db") as con:
cur = con.cursor()
cur.execute("INSERT into Address (name, email, address) values (?,?,?)",(name,email,address))
con.commit()
msg = "Contact successfully Added"
except:
con.rollback()
msg = "We can not add Contact to the list"
finally:
return render_template("success.html",msg = msg)
con.close()
@app.route("/view")
def view():
con = sqlite3.connect("addressbook.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from Address")
rows = cur.fetchall()
return render_template("view.html",rows = rows)
@app.route("/searchRecord",methods = ["POST"])
def searchRecord():
msg = "msg"
name = request.form["name"]
email = request.form["email"]
con = sqlite3.connect("addressbook.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from Address where name=? or email=?", (name,email))
rows = cur.fetchall()
msg = "Your Search has got some value!! "
return render_template("searchRecord.html",rows=rows)
@app.route("/search")
def search():
return render_template("search.html")
@app.route("/delete")
def delete():
return render_template("delete.html")
@app.route("/deleterecord",methods = ["POST"])
def deleterecord():
id = request.form["id"]
with sqlite3.connect("addressbook.db") as con:
try:
cur = con.cursor()
cur.execute("delete from Address where id = ?",id)
msg = "Contact successfully deleted"
except:
msg = "can't be deleted"
finally:
return render_template("delete_record.html",msg = msg)
if __name__ == "__main__":
app.run(debug = True)
| [
"noreply@github.com"
] | noreply@github.com |
70bfc243e42e01faf5fa18aa2024cf3b5efcf67b | a311614fe6fc8f23f08573b6f4f1ce022293260e | /Week7/final-exam-q4/blogPostDAO.py | e307cc00e11a9c3ef3668dd1fa42ff89c44deb0d | [] | no_license | edombowsky/MongoDB-Course | e257e18f259cc180eed6b66b61c57449b376746d | aa7cef5ec6160fbdc6915b08c8c6f3f233717d16 | refs/heads/main | 2021-11-27T14:46:04.974091 | 2014-11-27T21:16:02 | 2014-11-27T21:16:02 | 27,203,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,667 | py | __author__ = 'aje'
#
# Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import sys
import re
import datetime
# The Blog Post Data Access Object handles interactions with the Posts collection
class BlogPostDAO:
# constructor for the class
def __init__(self, database):
self.db = database
self.posts = database.posts
# inserts the blog entry and returns a permalink for the entry
def insert_entry(self, title, post, tags_array, author):
print "inserting blog entry", title, post
# fix up the permalink to not include whitespace
exp = re.compile('\W') # match anything not alphanumeric
whitespace = re.compile('\s')
temp_title = whitespace.sub("_",title)
permalink = exp.sub('', temp_title)
# Build a new post
post = {"title": title,
"author": author,
"body": post,
"permalink":permalink,
"tags": tags_array,
"comments": [],
"date": datetime.datetime.utcnow()}
# now insert the post
try:
self.posts.insert(post)
print "Inserting the post"
except:
print "Error inserting post"
print "Unexpected error:", sys.exc_info()[0]
return permalink
# returns an array of num_posts posts, reverse ordered
def get_posts(self, num_posts):
cursor = self.posts.find().sort('date', direction=-1).limit(num_posts)
l = []
for post in cursor:
post['date'] = post['date'].strftime("%A, %B %d %Y at %I:%M%p") # fix up date
if 'tags' not in post:
post['tags'] = [] # fill it in if its not there already
if 'comments' not in post:
post['comments'] = []
l.append({'title':post['title'], 'body':post['body'], 'post_date':post['date'],
'permalink':post['permalink'],
'tags':post['tags'],
'author':post['author'],
'comments':post['comments']})
return l
# returns an array of num_posts posts, reverse ordered, filtered by tag
def get_posts_by_tag(self, tag, num_posts):
cursor = self.posts.find({'tags':tag}).sort('date', direction=-1).limit(num_posts)
l = []
for post in cursor:
post['date'] = post['date'].strftime("%A, %B %d %Y at %I:%M%p") # fix up date
if 'tags' not in post:
post['tags'] = [] # fill it in if its not there already
if 'comments' not in post:
post['comments'] = []
l.append({'title': post['title'], 'body': post['body'], 'post_date': post['date'],
'permalink': post['permalink'],
'tags': post['tags'],
'author': post['author'],
'comments': post['comments']})
return l
# find a post corresponding to a particular permalink
def get_post_by_permalink(self, permalink):
post = self.posts.find_one({'permalink': permalink})
# XXX Final exam Question 4
#
# if you store the likes value in the way the template expects
# and how is implied by by the fixup code below, you don't need to make a change here
if post is not None:
# fix up likes values. set to zero if data is not present for comments that have never been liked
for comment in post['comments']:
if 'num_likes' not in comment:
comment['num_likes'] = 0
# fix up date
post['date'] = post['date'].strftime("%A, %B %d %Y at %I:%M%p")
return post
# add a comment to a particular blog post
def add_comment(self, permalink, name, email, body):
comment = {'author': name, 'body': body}
if (email != ""):
comment['email'] = email
try:
last_error = self.posts.update({'permalink': permalink}, {'$push': {'comments': comment}}, upsert=False,
manipulate=False, safe=True)
return last_error['n'] # return the number of documents updated
except:
print "Could not update the collection, error"
print "Unexpected error:", sys.exc_info()[0]
return 0
# increments the number of likes on a particular comment. Returns the number of documented updated
def increment_likes(self, permalink, comment_ordinal):
#
# XXX Final exam
# Work here. You need to update the num_likes value in the comment being liked
#
# self.posts.update({'permalink': permalink},
# {'$inc': {'comments'+str(comment_ordinal)+'.num_likes': 1}},
# upsert=True)
self.posts.update({'permalink': permalink},
{'$inc': {'comments.'+str(comment_ordinal)+'.num_likes': 1}},
upsert=True)
return 0
| [
"earl.dombowsky@ventyx.abb.com"
] | earl.dombowsky@ventyx.abb.com |
e9e2d6ef3a1fb49514eb3fc0a6c17562f3c25eea | b767254735e59713b181205a7ff53835b09bad96 | /integer-to-english-words.py | 96d61b6225b5d3289bb10a41f7fd386b478eaa62 | [] | no_license | kavyan92/amazon_practice_problems | c21e9a4b2337795e922906429eaf0b4d5cdf0b1e | f3f5f163d6d1453baa6a11bd5b23c127c54f396e | refs/heads/master | 2023-09-04T10:34:53.819952 | 2021-11-16T19:36:24 | 2021-11-16T19:36:24 | 417,314,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | """Convert a non-negative integer num to its English words representation.
Example 1:
Input: num = 123
Output: "One Hundred Twenty Three"
Example 2:
Input: num = 12345
Output: "Twelve Thousand Three Hundred Forty Five"
Example 3:
Input: num = 1234567
Output: "One Million Two Hundred Thirty Four Thousand Five Hundred Sixty Seven"
Example 4:
Input: num = 1234567891
Output: "One Billion Two Hundred Thirty Four Million Five Hundred Sixty Seven Thousand Eight Hundred Ninety One"
"""
class Solution:
def numberToWords(self, num: int) -> str:
to19 = 'One Two Three Four Five Six Seven Eight Nine Ten Eleven Twelve ' \
'Thirteen Fourteen Fifteen Sixteen Seventeen Eighteen Nineteen'.split()
tens = 'Twenty Thirty Forty Fifty Sixty Seventy Eighty Ninety'.split()
def words(n):
if n < 20:
return to19[int(n-1):int(n)]
if n < 100:
return [tens[int(n/10-2)]] + words(n%10)
if n < 1000:
return [to19[int(n/100-1)]] + ['Hundred'] + words(n%100)
for p, w in enumerate(('Thousand', 'Million', 'Billion'), 1):
if n < 1000**(p+1):
return words(n/1000**p) + [w] + words(n%1000**p)
return ' '.join(words(num)) or 'Zero'
"""Runtime: 28 ms, faster than 92.69% of Python3 online submissions for Integer to English Words.
Memory Usage: 14.5 MB, less than 26.10% of Python3 online submissions for Integer to English Words.""" | [
"kavya.0219@gmail.com"
] | kavya.0219@gmail.com |
8f47c688cf3c18166740580337dc753cbebfe9b3 | 592dfddfbcaa22263c1bcffa231b5a8d337d5594 | /CVE_FSE/Step3_ApplyCVEData/Tongji_All_Affected.py | 6f20fe3d6ac733231b875790c64329cbb103304b | [] | no_license | CongyingXU/CodeMiningTeam_Tasks | 40f930cb6f8723e067c2358d8d36cecd877cac8c | cc9af9662bc3d965c3204a6c57346e284d7d14c5 | refs/heads/master | 2020-09-22T21:53:46.912934 | 2020-08-01T10:59:59 | 2020-08-01T10:59:59 | 225,327,790 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,184 | py | # -*- coding: utf-8 -*-
"""
Created on 2020-02-25 15:21
@author: congyingxu
用于统计 当前实验对象的相关数据
哪些项目,用了哪些GAV,触发了哪些CVE?
最多的,最少的项目是?平均是?中位数是?上下4分位数是?
"""
from CommonFunction import JSONFIle_processing
All_affected_pojs_path = "Wangying_FSEData/affected_projs_total.json"
All_used_vule_ga_path = "Wangying_FSEData/used_vulne_libs_total.json"
GAV_CVE_Buggymethod_wangying_path = "Wangying_FSEData/GAV_CVE_BuggyMethod.json"
Pojs_GAVDependency_data_path = "Wangying_FSEData/Pojs_GAVDependency_data.json"
All_related_CVE_path = "Wangying_FSEData/All_related_CVE.json"
All_AffectedPojModule_UsedVulnerGAV_RelatedCVE_path = "Wangying_FSEData/All_AffectedPojModule_UsedVulnerGAV_RelatedCVE.json"
UsedGAV_CVE_mapping_Congying_path = "Wangying_FSEData/UsedGAV_CVE_mapping_Congying.json"
UsedGAV_CVE_mapping_Congying = {}
All_affected_pojs = []
All_used_vule_ga = []
GAV_CVE_Buggymethod_wangying = {}
All_related_CVE = []
All_AffectedPojModule_UsedVulnerGAV_RelatedCVE = {}
def read():
global All_affected_pojs, All_used_vule_ga, GAV_CVE_Buggymethod_wangying, Pojs_GAVDependency_data, UsedGAV_CVE_mapping_Congying
GAV_CVE_Buggymethod_wangying = JSONFIle_processing.read(GAV_CVE_Buggymethod_wangying_path)
All_affected_pojs = JSONFIle_processing.read(All_affected_pojs_path)
All_used_vule_ga = JSONFIle_processing.read(All_used_vule_ga_path)
Pojs_GAVDependency_data = JSONFIle_processing.read(Pojs_GAVDependency_data_path)
UsedGAV_CVE_mapping_Congying = JSONFIle_processing.read(UsedGAV_CVE_mapping_Congying_path)
def collectData():
global All_affected_pojs, All_used_vule_ga, GAV_CVE_Buggymethod_wangying, All_AffectedPojModule_UsedVulnerGAV_RelatedCVE, Pojs_GAVDependency_data, All_related_CVE
# 汇合
# 计算
for poj in All_affected_pojs:
GAVs = Pojs_GAVDependency_data[poj]
All_AffectedPojModule_UsedVulnerGAV_RelatedCVE[poj] = {}
for GAV_item in GAVs:
groupId = GAV_item["groupId"]
artifactId = GAV_item["artifactId"]
version = GAV_item["version"]
poj_module = GAV_item["module"]
GA_str = groupId + "__fdse__" + artifactId
GAV_str = groupId + "__fdse__" + artifactId + "__fdse__" + version
if GA_str in All_used_vule_ga.keys() and version in All_used_vule_ga[GA_str]:
if GAV_str in GAV_CVE_Buggymethod_wangying.keys():
if poj_module not in All_AffectedPojModule_UsedVulnerGAV_RelatedCVE[poj].keys():
All_AffectedPojModule_UsedVulnerGAV_RelatedCVE[poj][poj_module] = {GAV_str: list(GAV_CVE_Buggymethod_wangying[GAV_str].keys())}
else:
All_AffectedPojModule_UsedVulnerGAV_RelatedCVE[poj][poj_module][GAV_str] = list(GAV_CVE_Buggymethod_wangying[GAV_str].keys())
All_related_CVE.extend(GAV_CVE_Buggymethod_wangying[GAV_str].keys())
elif GAV_str in UsedGAV_CVE_mapping_Congying.keys():
if poj_module not in All_AffectedPojModule_UsedVulnerGAV_RelatedCVE[poj].keys():
All_AffectedPojModule_UsedVulnerGAV_RelatedCVE[poj][poj_module] = {GAV_str: [ele.split("CVE_FSE-")[1] for ele in UsedGAV_CVE_mapping_Congying[GAV_str]]}
else:
All_AffectedPojModule_UsedVulnerGAV_RelatedCVE[poj][poj_module][GAV_str] = [ele.split("CVE_FSE-")[1] for ele in UsedGAV_CVE_mapping_Congying[GAV_str]]
All_related_CVE.extend(UsedGAV_CVE_mapping_Congying[GAV_str])
else:
print(GAV_str,"BU YING DANG!!!")
All_related_CVE = list( set( All_related_CVE ) )
def write():
global All_affected_pojs, All_used_vule_ga, GAV_CVE_Buggymethod_wangying, All_AffectedPojModule_UsedVulnerGAV_RelatedCVE, Pojs_GAVDependency_data
MetaData = {"All_affected_pojs_num": len(All_affected_pojs),
"All_used_vule_ga_num": len(All_used_vule_ga.keys()),
"All_related_CVE_num": len(All_related_CVE)}
JSONFIle_processing.write(All_AffectedPojModule_UsedVulnerGAV_RelatedCVE, All_AffectedPojModule_UsedVulnerGAV_RelatedCVE_path)
JSONFIle_processing.write( All_related_CVE,All_related_CVE_path)
JSONFIle_processing.write( MetaData,"Wangying_FSEData/MetaData.json")
def tiaozhengshuju():
proj_vulne_lib = JSONFIle_processing.read( "Wangying_FSEData/proj_vulne_lib.json" )
vulne_lib_poj = {}
for poj in proj_vulne_lib.keys():
for GA in proj_vulne_lib[poj]:
if GA not in vulne_lib_poj.keys():
vulne_lib_poj[GA] = {}
for V in proj_vulne_lib[poj][GA]:
if V not in vulne_lib_poj[GA].keys():
vulne_lib_poj[GA][V] = []
else:
if poj not in vulne_lib_poj[GA][V]:
vulne_lib_poj[GA][V].append( poj )
JSONFIle_processing.write(vulne_lib_poj, "Wangying_FSEData/vulne_lib_poj.json")
# tiaozhengshuju()
if __name__ == '__main__':
read()
collectData()
write() | [
"1084729816@qq.com"
] | 1084729816@qq.com |
e96ab559085b5370e1cee8a9eebe298fe0b46529 | 5dd139ac9d1f849be31d187d373e0db262d73131 | /person.py | 616e32c839f908bc6e197af2fca8b20da4943455 | [] | no_license | Ashanthe/van-input-naar-output | 774f7ff76f914d6775e19213c821ff5f9e285732 | 3c9eef7c2760149dc79261a85bd6de19b86f688f | refs/heads/main | 2023-08-22T22:42:06.630795 | 2021-09-15T09:02:40 | 2021-09-15T09:02:40 | 404,260,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | print("----------------------------")
naam = input("|Naam : ")
adres = input("|Adres : ")
postcode = input("|Postcode : ")
woonplaats = input("|Woonplaats : ")
print("----------------------------- ")
| [
"99069097@mydavinci.nl"
] | 99069097@mydavinci.nl |
f757d1832d1fc67dd4e6c72dc50225804a3b7819 | 1061c149ac193631c2c80cb50bc27a7c5b7a5af2 | /Craps.py | 42ab57aa4368ad276d061992a86d76d8ad18982d | [] | no_license | waidei/Craps | 94f3f7bbba499a1bfb75b0eab1e1c955676723be | d8d26da6850fe1d02f42f7f9567604083e8eeb56 | refs/heads/master | 2020-08-26T15:45:54.992673 | 2019-10-28T15:39:08 | 2019-10-28T15:39:08 | 217,060,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # Isaac Waide
# October 23, 2019
# Craps Due Friday Oct 25,2019
import random
bank = account
roll = roll_dice
bet = bank
def account():
print("Welcome to Hartwick College roll of chance.")
print("How much money will you be betting today?")
bet = int(input)
while bet <= 0:
print("Sorry boss, you cannot bet nothing, please try again.")
bet = int(input)
while bank > 0 and bet > 0:
def roll_dice():
return random.randint(2, 12)
print(f"No more bets, time to roll the two dice, you rolled a {roll}.")
if roll == 7 or roll == 11:
print("Congrats Champ! You won!")
choice = input()
| [
"waidei@hartwick.edu"
] | waidei@hartwick.edu |
c5b5216e50a35624832cb3c83ef89b17bad936c6 | fc3f784c8d00f419b11cbde660fe68a91fb080ca | /algoritm/20상반기 코딩테스트/보급로/1249.py | f8cb979771655a3bd22b8164a902086c5eea5c12 | [] | no_license | choo0618/TIL | 09f09c89c8141ba75bf92657ac39978913703637 | 70437a58015aecee8f3d86e6bfd0aa8dc11b5447 | refs/heads/master | 2021-06-25T07:01:34.246642 | 2020-12-21T04:57:13 | 2020-12-21T04:57:13 | 163,782,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | import sys
sys.stdin = open('1249.txt','r')
from collections import deque
dx=[1,0,-1,0]
dy=[0,1,0,-1]
def IS(y,x):
return -1<y<N and -1<x<N
for t in range(int(input())):
N=int(input())
A=[list(map(int,input()))for y in range(N)]
Map=[[10**9]*N for _ in range(N)]
Q=deque([(0,0,0)])
while Q:
c,y,x=Q.popleft()
if Map[y][x]<c:continue
for d in range(4):
Y,X=y+dy[d],x+dx[d]
if not IS(Y,X) or Map[Y][X]<=c+A[Y][X]:continue
Map[Y][X]=c+A[Y][X]
Q.append((c+A[Y][X],Y,X))
print('#%d %d'%(t+1,Map[N-1][N-1])) | [
"choo0618@naver.com"
] | choo0618@naver.com |
9655f6ea3d766232c5547a27623614157acc8830 | ca192c1d7939a8e32cb0b6fda5d6fba614893e25 | /SECUNIA_RESEARCH/SECUNIA_RESEARCH -Yokogawa/SECUNIA_RESEARCH/settings.py | 64742f68b6313cd41d4a9e58b0d3c00c0e802e3e | [] | no_license | Bruba/Webscraper | 9c59e33f6c7e96f43c79777988fbde00824a16be | 093b2350371e0cb3aafd1709ff2d2435be93a0d3 | refs/heads/master | 2020-03-19T00:08:36.711987 | 2018-05-30T14:47:00 | 2018-05-30T14:47:00 | 135,456,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | # -*- coding: utf-8 -*-
# Scrapy settings for SECUNIA_RESEARCH project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'SECUNIA_RESEARCH'
SPIDER_MODULES = ['SECUNIA_RESEARCH.spiders']
NEWSPIDER_MODULE = 'SECUNIA_RESEARCH.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'SECUNIA_RESEARCH (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'SECUNIA_RESEARCH.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'SECUNIA_RESEARCH.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'SECUNIA_RESEARCH.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"bizougreat@gmail.com"
] | bizougreat@gmail.com |
2582a401d6b72c269a71b87bbd3c57b88dbe66c6 | d723d8d5d32e6a3bdb43ee78efca1280949741f4 | /CycleGAN_DRPAN/proposal.py | f1c27d3fd77e12db974dafd42a4f4bf0bddb1345 | [] | no_license | godisboy/DRPAN | 8a224f8b8c64038f9fdcb683ba3d1507b87c9c54 | c4d62a15d1f6379f4ef94528851fed92a02ea889 | refs/heads/master | 2021-06-09T03:21:29.277681 | 2020-01-06T08:35:26 | 2020-01-06T08:35:26 | 142,092,208 | 52 | 13 | null | null | null | null | UTF-8 | Python | false | false | 7,340 | py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from roi_align.roi_align import RoIAlign
def to_varabile(arr, requires_grad=False, is_cuda=True):
tensor = torch.from_numpy(arr)
if is_cuda:
tensor = tensor.cuda()
var = Variable(tensor, requires_grad=requires_grad)
return var
class Proposal(nn.Module):
def __init__(self):
super(Proposal, self).__init__()
self.width = 1
self.height = 1
self.region_width = 70
self.region_height = 70
self.stride = 1
# using 5 layers PatchGAN
self.receptive_field = 70.
self.roialign = RoIAlign(self.region_height, self.region_width, transform_fpcoor=True)
# use mask operation or not
def _localize(self, score_map, input):
"""
width range: (feature_width - w_width) / stride + 1
:param score_map:
:param input:
:return:
"""
batch_size = score_map.size(0)
ax_tmp_fake = np.ones((batch_size, 3))
ax_tmp_real = np.zeros((batch_size, 3))
pro_height = (score_map.size(2) - self.height) / self.stride + 1
pro_width = (score_map.size(3) - self.width) / self.stride + 1
for n in range(batch_size):
for i in range(pro_width):
for j in range(pro_height):
_x, _y = i * self.stride, j * self.stride
region_score = score_map[n, :, _x:_x + self.stride, _y:_y + self.stride].mean()
if ax_tmp_real[n][2] < region_score.cpu().data.numpy():
ax_tmp_real[n] = _x, _y, region_score.cpu().data.numpy()
if ax_tmp_fake[n][2] > region_score.cpu().data.numpy():
ax_tmp_fake[n] = _x, _y, region_score.cpu().data.numpy()
_img_stride = (input.size(2) - self.receptive_field) // score_map.size(2)
ax_transform_fake = ax_tmp_fake[:, :2] * _img_stride + self.receptive_field
ax_transform_real = ax_tmp_real[:, :2] * _img_stride + self.receptive_field
return ax_transform_fake, ax_transform_real
def forward_A(self, real_B, fake_B, real_A, score_map):
ax_fake, ax_real = self._localize(score_map, real_B)
fake_Br, real_Ar, fake_Bf, real_Af= [], [], [], []
for i in range(real_B.size(0)):
x, y = ax_fake[i, :]
# Takes all the image
boxes = np.asarray([[y, x, y + self.region_height, x + self.region_width]], dtype=np.float32)
box_index_data = np.asarray([0], dtype=np.int32)
boxes = to_varabile(boxes, requires_grad=False, is_cuda=True)
box_index = to_varabile(box_index_data, requires_grad=False, is_cuda=True)
fake_Bf.append(self.roialign(fake_B[i].view(-1, 3, real_B.size(2), real_B.size(3)), boxes, box_index))
real_Af.append(self.roialign(real_A[i].view(-1, 3, real_A.size(2), real_A.size(3)), boxes, box_index))
fake_Bf, real_Af = torch.cat(fake_Bf, dim=0), torch.cat(real_Af, dim=0)
fake_ABf = torch.cat((real_Af, fake_Bf), 1)
for i in range(real_B.size(0)):
x, y = ax_real[i, :]
# Takes all the image
boxes = np.asarray([[y, x, y + self.region_height, x + self.region_width]], dtype=np.float32)
box_index_data = np.asarray([0], dtype=np.int32)
boxes = to_varabile(boxes, requires_grad=False, is_cuda=True)
box_index = to_varabile(box_index_data, requires_grad=False, is_cuda=True)
fake_Br.append(self.roialign(fake_B[i].view(-1, 3, real_B.size(2), real_B.size(3)), boxes, box_index))
real_Ar.append(self.roialign(real_A[i].view(-1, 3, real_A.size(2), real_A.size(3)), boxes, box_index))
fake_Br, real_Ar = torch.cat(fake_Br, dim=0), torch.cat(real_Ar, dim=0)
real_ABr = torch.cat((real_Ar, fake_Br), 1)
return fake_Br, real_Ar, fake_Bf, real_Af, fake_ABf, real_ABr
def forward_B(self, real_A, fake_A, real_B, score_map):
ax_fake, ax_real = self._localize(score_map, real_A)
fake_Ar, real_Br, fake_Af, real_Bf = [], [], [], []
for i in range(real_A.size(0)):
x, y = ax_fake[i, :]
# Takes all the image
boxes = np.asarray([[y, x, y + self.region_height, x + self.region_width]], dtype=np.float32)
box_index_data = np.asarray([0], dtype=np.int32)
boxes = to_varabile(boxes, requires_grad=False, is_cuda=True)
box_index = to_varabile(box_index_data, requires_grad=False, is_cuda=True)
fake_Af.append(self.roialign(fake_A[i].view(-1, 3, real_A.size(2), real_A.size(3)), boxes, box_index))
real_Bf.append(self.roialign(real_B[i].view(-1, 3, real_B.size(2), real_B.size(3)), boxes, box_index))
fake_Af, real_Bf = torch.cat(fake_Af, dim=0), torch.cat(real_Bf, dim=0)
fake_BAf = torch.cat((real_Bf, fake_Af), 1)
for i in range(real_A.size(0)):
x, y = ax_real[i, :]
# Takes all the image
boxes = np.asarray([[y, x, y + self.region_height, x + self.region_width]], dtype=np.float32)
box_index_data = np.asarray([0], dtype=np.int32)
boxes = to_varabile(boxes, requires_grad=False, is_cuda=True)
box_index = to_varabile(box_index_data, requires_grad=False, is_cuda=True)
fake_Ar.append(self.roialign(fake_A[i].view(-1, 3, real_A.size(2), real_A.size(3)), boxes, box_index))
real_Br.append(self.roialign(real_B[i].view(-1, 3, real_B.size(2), real_B.size(3)), boxes, box_index))
fake_Ar, real_Br = torch.cat(fake_Ar, dim=0), torch.cat(real_Br, dim=0)
real_BAr = torch.cat((real_Br, fake_Ar), 1)
return fake_Ar, real_Br, fake_Af, real_Bf, fake_BAf, real_BAr
# def _mask_operation_R_B(self, real_A, fake_A, real_B, rec_B, ax):
# # _ax = np.expand_dims(ax, axis=1)
# # _ax = np.repeat(_ax, real_AB.size(1), axis=1)
# mask = Variable(torch.zeros(real_A.size(0), 3, real_A.size(2), real_A.size(3)).cuda())
# for i in range(real_A.size(0)):
# x, y = ax[i, :].astype(int)
# mask[i, :, x:x + int(self.receptive_field), y:y + int(self.receptive_field)] = 1.
# fake_mA = fake_A * mask + real_A * (1 - mask)
# real_Bl = real_B * mask
# rec_Bl = rec_B * mask
# return fake_mA, real_Bl, rec_Bl
#
# def _mask_operation_R_A(self, real_B, fake_B, real_A, rec_A, ax_fake, ax_real):
# # _ax = np.expand_dims(ax, axis=1)
# # _ax = np.repeat(_ax, real_AB.size(1), axis=1)
# mask_fake = Variable(torch.zeros(real_B.size(0), 3, real_B.size(2), real_B.size(3)).cuda())
# mask_real = Variable(torch.zeros(real_B.size(0), 3, real_B.size(2), real_B.size(3)).cuda())
# for i in range(real_B.size(0)):
# x, y = ax_fake[i, :].astype(int)
# mask_fake[i, :, x:x + int(self.receptive_field), y:y + int(self.receptive_field)] = 1.
# for i in range(real_B.size(0)):
# x, y = ax_real[i, :].astype(int)
# mask_real[i, :, x:x + int(self.receptive_field), y:y + int(self.receptive_field)] = 1.
# real_Al = real_A * mask
# rec_Al = rec_A * mask
# return real_Al, rec_Al
| [
"jiangyufeng77@163.com"
] | jiangyufeng77@163.com |
8e2285e97c33aaae42dc1d4463e35d6f6d1a9b56 | dffee54c9c40b495e56cd56d191aef0e4ebe6064 | /composer/core/algorithm.py | 25317300f7dca6dce28ebd33f352a1721d4460c4 | [
"Apache-2.0"
] | permissive | zeeroocooll/composer | 3afb0427e713c3e19197c780f03b510fbf6c936b | 6dd0a0f297cafb404333d6280a5344bcb7f3bee6 | refs/heads/main | 2023-08-20T04:21:51.536149 | 2021-10-13T20:34:29 | 2021-10-13T20:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,933 | py | # Copyright 2021 MosaicML. All Rights Reserved.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from composer.core.serializable import Serializable
if TYPE_CHECKING:
from composer.core import Event, Logger, State
class Algorithm(Serializable, ABC):
"""Base class for algorithms.
Algorithms are pieces of code which run at specific events in the training
loop. Algorithms modify the trainer's state, generally with the effect of
improving the model's quality, or
increasing the efficiency and throughput of the training loop.
Algorithms must implement two methods:
:func:`match`, which returns whether the algorithm should be run given
the current event and state, and :func:`apply`, which makes an in-place
change to the State.
"""
@property
def find_unused_parameters(self) -> bool:
"""Indicates that the effect of this algorithm may cause some model
parameters to be unused.
Used to tell DDP that some parameters will be frozen during
training and hence it should not expect gradients from them.
All algorithms which do any kind of parameter freezing should
override this function to return True.
"""
return False
@abstractmethod
def match(self, event: Event, state: State) -> bool:
"""Determines whether this algorithm should run, given the current
:class:`Event` and :class:`State`.
Examples:
To only run on a specific event:
>>> return event == Event.BEFORE_LOSS
Switching based on state attributes:
>>> return state.epoch > 30 && state.world_size == 1
See :class:`State` for accessible attributes.
Args:
event (:class:`Event`): The current event.
state (:class:`State`): The current state.
Returns:
bool: True if this algorithm should run now.
"""
raise NotImplementedError(f'implement match() required for {self.__class__.__name__}')
@abstractmethod
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
"""Applies the algorithm to make an in-place change to the State
Can optionally return an exit code to be stored in a :class:`Trace`.
Args:
event (:class:`Event`): The current event.
state (:class:`State`): The current state.
logger (:class:`Logger`): A logger to use for
logging algorithm-specific metrics.
Returns:
``int`` or ``None``: exit code that is stored in :class:`Trace`
and made accessible for debugging.
"""
raise NotImplementedError(f'implement apply() required for {self.__class__.__name__}')
def __str__(self) -> str:
"""Returns the class name."""
return self.__class__.__name__
| [
"averylamp@gmail.com"
] | averylamp@gmail.com |
babcd86669606969ca94181114c3944258ecfa56 | 6bdb32ddbd72c4337dab12002ff05d6966538448 | /gridpack_folder/mc_request/LHEProducer/Spin-1/Wprime_WZ_WhadZlep/Wprime_WZ_WhadZlep_narrow_M2000_13TeV-madgraph_cff.py | aef83982aeb269928c449b90de344527b31a631c | [] | no_license | cyrilbecot/DibosonBSMSignal_13TeV | 71db480de274c893ba41453025d01bfafa19e340 | d8e685c40b16cde68d25fef9af257c90bee635ba | refs/heads/master | 2021-01-11T10:17:05.447035 | 2016-08-17T13:32:12 | 2016-08-17T13:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/master/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-1/Wprime_WZ_WhadZlep/Wprime_WZ_WhadZlep_narrow_M2000
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-1/Wprime_WZ_WhadZlep/narrow/v2/Wprime_WZ_WhadZlep_narrow_M2000_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
| [
"syu@cern.ch"
] | syu@cern.ch |
ff0f808215c2519b32558f33049af40cc7bae534 | fcd927827816696d56502979f9c02e4f71695ce9 | /getNews/getNews/pipelines.py | 65604560b3b21cf038df05b1bcdd41baa840fd6d | [
"MIT"
] | permissive | loserking/MLB-Search-Engine | 3b628272859b7cb595a29c1e6d8b62cdf2334005 | bcaccf403efdbb6b229e323906624c8f0f2c671b | refs/heads/master | 2020-12-08T05:24:22.773039 | 2019-09-20T09:32:33 | 2019-09-20T09:32:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import json
class GetnewsPipeline(object):
def __init__(self):
#打开文件
self.file = open('data_new.json', 'w', encoding='utf-8')
#该方法用于处理数据
def process_item(self, item, spider):
#读取item中的数据
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
#写入文件
self.file.write(line)
#返回item
return item
#该方法在spider被开启时被调用。
"""
class TeamPipeline(object):
def __init__(self):
#打开文件
self.file = open('team.json', 'w', encoding='utf-8')
#该方法用于处理数据
def process_item(self, item, spider):
#读取item中的数据
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
#写入文件
self.file.write(line)
#返回item
return item
"""
| [
"zex18@mails.tsinghua.edu.cn"
] | zex18@mails.tsinghua.edu.cn |
3999edfc7af1cd19427c2363a2b1ecbaa9faaafa | 522b300449b87cd4554492ccb66b0f4232379be1 | /Dev_Aprender/03_Colecoes/aula01.py | c05b59fca82e68c37c4ed912666a22d07035050d | [] | no_license | victor1cg/Python | 6e569ec95fa6553918c34b0fba08c614f24a138f | 3ae6c2da34f6af4f79e79b952fd15ba5acca46c6 | refs/heads/master | 2023-01-08T09:40:32.590723 | 2023-01-05T20:32:54 | 2023-01-05T20:32:54 | 254,467,366 | 2 | 1 | null | 2022-10-31T12:59:56 | 2020-04-09T20:04:01 | Python | UTF-8 | Python | false | false | 1,152 | py |
#! LISTAS
precos = [10,20,30,40,50,60,80,55,69,25]
print(precos[1]) #Indice
print(precos.index(25)) #acha o valor, retorna o indice
#* Multiplicação de valores
lista_de_noves = [9]*10
print(lista_de_noves)
#* Usando gerador range
faixa_numeros = list(range(20))
print(list('Cavalo'))
#* Lista de Lista
matriz_nomes = [['Carol',30],['Marcos',28]]
#* Adicionando valores
valores = [1,2,4]
anos = [2020,2021,2022]
#Adicionar ao final da lista
valores.append(11)
#Unir listas /porem não cria uma nova, modifica a existente
valores.extend()
#Juntar duas lista, e criar uma nova:
nova_lista = valores + anos
#Inserir um novo valor. (indice,valor)
anos.insert(2,2031)
#Deletar com base no indice
anos.pop(0)
del anos[0] #Aqui podemos passar uma faixa de valores [1:3]
#Deletar com base no valor
anos.remove(2020)
#Resetar os valores de uma lista
anos.clear()
#Contar a qtde de ocorrencias
anos.count(2) #qtde de vezes que aparece o numero 2
#! Listas - ENUMERATE - percorrer a lista, onde estamos atualmente
#sempre retorna um indice e o valor real. Indice começa em 1
""" for i,v in enumerate(pessoa):
print(i,v) """ | [
"victor1cg@hotmail.com"
] | victor1cg@hotmail.com |
5531e802e6e0131bfab313bbb6fe0f400f8fc8d2 | 698cb8d24879fe75669af6f2667c3f88660a0a1e | /FM/deepfm/deepfm_movielens_sample.py | 4d5736c139d3a64e02b438bc0dbd2fbacb19ae68 | [] | no_license | HuichuanLI/Recommand-Algorithme | c83c5d34d75eebd127e2aef7abc8b7152fc54f96 | 302e14a3f7e5d72ded73b72a538596b6dc1233ff | refs/heads/master | 2023-05-11T03:01:30.940242 | 2023-04-30T08:03:19 | 2023-04-30T08:03:19 | 187,097,782 | 71 | 19 | null | null | null | null | UTF-8 | Python | false | false | 1,684 | py | import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from deepctr.models import DeepFM
from deepctr.inputs import SparseFeat,get_feature_names
#数据加载
data = pd.read_csv("movielens_sample.txt")
sparse_features = ["movie_id", "user_id", "gender", "age", "occupation", "zip"]
target = ['rating']
# 对特征标签进行编码
for feature in sparse_features:
lbe = LabelEncoder()
data[feature] = lbe.fit_transform(data[feature])
# 计算每个特征中的 不同特征值的个数
fixlen_feature_columns = [SparseFeat(feature, data[feature].nunique()) for feature in sparse_features]
print(fixlen_feature_columns)
linear_feature_columns = fixlen_feature_columns
dnn_feature_columns = fixlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# 将数据集切分成训练集和测试集
train, test = train_test_split(data, test_size=0.2)
train_model_input = {name:train[name].values for name in feature_names}
test_model_input = {name:test[name].values for name in feature_names}
# 使用DeepFM进行训练
model = DeepFM(linear_feature_columns, dnn_feature_columns, task='regression')
model.compile("adam", "mse", metrics=['mse'], )
history = model.fit(train_model_input, train[target].values, batch_size=256, epochs=1, verbose=True, validation_split=0.2, )
# 使用DeepFM进行预测
pred_ans = model.predict(test_model_input, batch_size=256)
# 输出RMSE或MSE
mse = round(mean_squared_error(test[target].values, pred_ans), 4)
rmse = mse ** 0.5
print("test RMSE", rmse) | [
"lhc14124908@163.com"
] | lhc14124908@163.com |
7e085178d4d5b0eff678f9e232ffb896608bd78f | 508aa3493b65812b418a2b73ae4313f07f53e928 | /alunop.py | 1056c0a88c4b9d409a936420d7b575c095640ea4 | [] | no_license | robinhosz/ataDePresen-a | 6834ff18ef8edf9ac3a6c8b663015fcded475cfc | 969495972bf1485a8a182908f5cff610994dec28 | refs/heads/main | 2023-09-01T06:17:30.650675 | 2021-10-31T02:36:21 | 2021-10-31T02:36:21 | 423,007,918 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | from tkinter import *
import os
import banco
def gravarDados():
if tb_nmatricula.get() != "":
vnmatricula=tb_nmatricula.get()
vnome=tb_nome.get()
vdata=tb_data.get()
vturma=tb_turma.get()
vobs=tb_obs.get("1.0",END)
vquery="INSERT INTO P_alunos (N_Matricula, Nome, Data, Turma, Obs) VALUES ('"+vnmatricula+"','"+vnome+"','"+vdata+"','"+vturma+"','"+vobs+"')"
banco.dml(vquery)
tb_nmatricula.delete(0,END)
tb_nome.delete(0,END)
tb_data.delete(0,END)
tb_turma.delete(0,END)
tb_obs.delete("1.0",END)
print("Dados Gravados")
else:
print("ERRO")
app=Tk()
app.title("Ata de Presenca")
app.geometry("270x400")
app.configure(background="#1C1C1C")
Label(app,text="N-Matricula",background="#1C1C1C",foreground="#FFFAFA",anchor=W).place(x=32,y=10,width=100,height=30)
tb_nmatricula=Entry(app)
tb_nmatricula.place(x=32,y=30,width=200,height=20)
Label(app,text="Nome",background="#1C1C1C",foreground="#FFFAFA",anchor=W).place(x=32,y=60,width=100,height=30)
tb_nome=Entry(app)
tb_nome.place(x=32,y=80,width=200,height=20)
Label(app,text="Data",background="#1C1C1C",foreground="#FFFAFA",anchor=W).place(x=32,y=110,width=120,height=30)
tb_data=Entry(app)
tb_data.place(x=32,y=130,width=200,height=20)
Label(app,text="Turma",background="#1C1C1C",foreground="#FFFAFA",anchor=W).place(x=32,y=160,width=100,height=30)
tb_turma=Entry(app)
tb_turma.place(x=32,y=180,width=200,height=20)
Label(app,text="OBS",background="#1C1C1C",foreground="#FFFAFA",anchor=W).place(x=32,y=209,width=100,height=20)
tb_obs=Text(app)
tb_obs.place(x=32,y=230,width=200,height=100)
Button(app,text="Marcar Presenca",background="#00008B",foreground="#FFFAFA",command=gravarDados).place(x=80,y=350,width=100,height=20)
app.mainloop() | [
"joserobsonsiqueira23@hotmail.com"
] | joserobsonsiqueira23@hotmail.com |
f82d94ad5533aa17f9c433b5546780f562802e2a | d1507ee333bf9453a197fe997b58871b527811bf | /venv/bin/automat-visualize | 51f0d1222abf19fd9b8ca755d742738686858191 | [] | no_license | hirossan4049/screenshare | a336f2cf0e0584866356a82f13683480d9d039f6 | 004f0e649116a6059af19d6489aeb13aed1741f3 | refs/heads/master | 2021-01-27T09:21:48.891153 | 2020-04-12T04:55:40 | 2020-04-12T04:55:40 | 243,476,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | #!/Users/linear/Documents/pg/pythonnnnn/screenshare/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from automat._visualize import tool
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(tool())
| [
"haruto405329@gmail.com"
] | haruto405329@gmail.com | |
539b7ee6d20575f474c0cc7e6c7c3bbaf0bd818b | cbfcf61131bff227d01550e197e55f42e2b2437e | /mons/mons/wsgi.py | b961cc038d4503864ee4daf23b3b2f6f1ef5a113 | [] | no_license | thomblr/mons-challenge | e23ec4a9b3b0bc631f6dbf5bb6903bd487c6a044 | e3f5ee1d4724e79eaeef3d723ccce12b8822a75d | refs/heads/master | 2023-03-03T05:04:44.668170 | 2021-02-05T10:06:08 | 2021-02-05T10:06:08 | 335,672,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
WSGI config for mons project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mons.settings')
application = get_wsgi_application()
| [
"thomas.1111@live.be"
] | thomas.1111@live.be |
c6fad8cd2fd91208b4855b48d7b68e779e4b475d | c5188ce9a28532d594578972bd4c7f2087f8e200 | /test/test_builder.py | 81975e2810bc5f82a65fa7d7f9766cb5a084cacd | [] | no_license | allnightlight/BanditProblemReinforcementLearningPractice | f111fc47341111c562e582ac51658b1e49bb4f0d | e4b546ca76c4ea31157760b390108dff908dced6 | refs/heads/master | 2022-07-23T16:12:12.217071 | 2020-05-10T13:04:00 | 2020-05-10T13:04:00 | 260,108,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,168 | py | '''
Created on 2020/05/05
@author: ukai
'''
import os
import shutil
import unittest
from ConcAgent import ConcAgent
from ConcAgentFactory import ConcAgentFactory
from ConcBuildOrder import ConcBuildOrder
from ConcEnvrionmentFactory import ConcEnvironmentFactory
from ConcMyLogger import ConcMyLogger
from ConcRewardGiverFactory import ConcRewardGiverFactory
from ConcStore import ConcStore
from ConcTrainerFactory import ConcTrainerFactory
from ConcValueFunctionApproximatorFactory import ConcValueFunctionApproximatorFactory
from MyArray import MyArray
from framework import Builder
class Test(unittest.TestCase):
def setUp(self):
builder = Builder(ConcStore()
, ConcAgentFactory()
, ConcEnvironmentFactory()
, ConcTrainerFactory()
, ConcValueFunctionApproximatorFactory()
, ConcRewardGiverFactory()
, ConcMyLogger())
self.builder = builder
ConcAgent.checkpointFolderPath = "testCheckPointFolder"
ConcStore.saveFolderPath = "testSaveFolder"
def tearDown(self):
for path in [ConcAgent.checkpointFolderPath, ConcStore.saveFolderPath]:
if os.path.exists(path):
shutil.rmtree(path)
def test001(self):
builder = self.builder
assert isinstance(builder, Builder)
def test002(self):
builder = self.builder
buildOrders = MyArray()
for k1 in range(3):
buildOrder = ConcBuildOrder(nIteration=100
, nSeq=2
, nHorizonValueOptimization=1
, nIntervalPolicyOptimization=10
, nBatchPolicyOptimization=2**5
, nSaveInterval=2**5
, description="test %d/3" % (k1+1)
, nLevers=3)
buildOrders.add(buildOrder)
builder.build(buildOrders)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test001']
unittest.main() | [
"shouta.ukai@gmail.com"
] | shouta.ukai@gmail.com |
b67bdc2778d6912fd32a3f2f82c23d26caaaadec | a273c33036b697eaa90b01a22e5f01a31c61fda5 | /edx/ProblemSet7/NewStory.py | e25f77c0fba45482c4914593f5b08683b955e539 | [] | no_license | allaok/codestores | 1a55ed8798f6c99476fe24f27fda9a3c3fa03116 | f000bbb2518a8202875cbbcf6cc3a11e57db5792 | refs/heads/master | 2021-01-19T05:44:06.981591 | 2015-07-29T22:56:16 | 2015-07-29T22:56:16 | 39,902,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | from ProblemSet7 import ps7
__author__ = 'root'
from ps7 import *
class NewsStory(object):
def __init__(self,guid, title, subject, summary, link):
self.guid=guid
self.title=title
self.subject=subject
self.summary=summary
self.link=link
def getGuid(self):
return self.guid
def getTitle(self):
return self.title
def getSubject(self):
return self.subject
def getSummary(self):
return self.summary
def getLink(self): return self.link
test = NewsStory('foo', 'myTitle', 'mySubject', 'some long summary', 'www.example.com')
print test.getGuid()
test = NewsStory('foo', 'myTitle', 'mySubject', 'some long summary', 'www.example.com')
| [
"alexis.koalla@orange.com"
] | alexis.koalla@orange.com |
723e50bc489b2f7410ac7e7cd702faa4037c038e | 6dbc2a8a88bf7b42d6964c3aa8ffee96dcc1e92e | /articles/views.py | d2bd8ed4b901a2936fbb6ce02c10731954235dc0 | [] | no_license | Lynextion/Expery_Share | 82c2473ab3a415737b886239ce8bf6e1cc1ec7ab | b4c396cff29006ae27001000b33c9f131d7ad346 | refs/heads/master | 2023-01-23T14:46:32.089315 | 2020-11-24T13:17:58 | 2020-11-24T13:17:58 | 315,044,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | from django.shortcuts import render,redirect
from .models import Article
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from . import form
import time
from account.models import Profile
def article_list(request):
articles = Article.objects.all().order_by('date')
return render(request,'articles/article_list.html',{'articles':articles})
def article_details(request,slug):
#return HttpResponse(slug)
article =Article.objects.get(slug=slug)
return render(request,'articles/article_detail.html',{'article':article})
@login_required(login_url="/account/selection/")
def article_create(request):
if request.method == 'POST':
forms = form.CreateArticle(request.POST,request.FILES)
if forms.is_valid():
instance = forms.save(commit=False)
instance.author = request.user
instance.save()
update = Profile.objects.get(user=instance.author)
last = instance.date - update.last_update
if last.total_seconds() <= 300:
update.artilce_num = update.artilce_num + 1
if update.artilce_num == 3:
print("stfp")
update.artilce_num = 0
return render(request,"articles/warning.html")
else:
update.artilce_num = 0
return redirect("articles:list")
else:
forms = form.CreateArticle()
return render(request,"articles/article_create.html",{'form':forms}) | [
"47065577+Lynextion@users.noreply.github.com"
] | 47065577+Lynextion@users.noreply.github.com |
8f466c4ee1cf92fbffc72bc1988d40b2eb3d0412 | 31155acd49915b9d0ce0731670c2b2e86d087953 | /afheuristics/comparework.py | 2851093b8c1cf402f789e95287683efe7fa65899 | [] | no_license | rudi-c/computational-photography-research | 2660d507ba2329d819f3eb5850b066d8b1f9c289 | 24ac27f6686afea7e1396f4caa5507ac59f42240 | refs/heads/master | 2020-04-28T21:28:28.273609 | 2014-11-11T07:35:42 | 2014-11-11T07:35:42 | 13,477,775 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,996 | py | #!/usr/bin/python
"""Runs a set of peak search algorithms on our scenes.
"""
import getopt
import sys
import coarsefine
import random
from cameramodel import CameraModel
from direction import Direction
from scene import load_scenes
seed = 1
simulate_backlash = True
simulate_noise = True
def print_aligned_data_rows(rows):
"""Print rows of data such that each column is aligned."""
column_lengths = [ len(max(cols, key=len)) for cols in zip(*rows) ]
for row in rows:
print "|".join(" " * (length - len(col)) + col
for length, col in zip(column_lengths, row))
def search_perfect(scenes):
print (">>> Perfect local search\n"
"Assumes perfect information. Start at any lens position and take\n"
"coarse steps in the direction of the closest peak until the lens\n"
"is within 10 lens positions of a peak. Then, switch to fine steps.\n"
"When the lens has passed the peak four lens positions ago, turn\n"
"around and go to the peak.\n")
data_rows = [("filename", "steps")]
for scene in scenes:
total_count = 0
initial_positions = range(2, scene.step_count)
# Perform a search for each initial starting position.
for ini_pos in initial_positions:
step_count = 2
lens_pos = ini_pos
passed_peak = False
if (scene.distance_to_closest_left_peak(ini_pos) <
scene.distance_to_closest_right_peak(ini_pos)):
# Sweep left
lens_pos -= 2
while lens_pos > 0:
diff = scene.fvalues[lens_pos] - scene.fvalues[lens_pos+1]
if scene.distance_to_closest_right_peak(lens_pos) < 4:
lens_pos -= 1 #fine
elif passed_peak:
lens_pos += 4
break
elif (scene.distance_to_closest_left_peak(lens_pos) <= 10
or diff > 0.01):
lens_pos -= 1 #fine
else:
lens_pos -= 8 #coarse
if lens_pos in scene.maxima:
passed_peak = True
step_count += 1
else:
# Sweep right
while lens_pos < scene.step_count - 1:
diff = scene.fvalues[lens_pos] - scene.fvalues[lens_pos-1]
if scene.distance_to_closest_left_peak(lens_pos) < 4:
lens_pos += 1 #fine
elif passed_peak:
lens_pos += 4
break
elif (scene.distance_to_closest_right_peak(lens_pos) <= 10
or diff > 0.01):
lens_pos += 1 #fine
else:
lens_pos += 8 #coarse
if lens_pos in scene.maxima:
passed_peak = True
step_count += 1
total_count += step_count
average = float(total_count) / len(initial_positions)
data_rows.append((scene.filename, "%.1f" % average))
print_aligned_data_rows(data_rows)
def search_standard(scenes, scene_to_print):
print ("Perform a standard hill-climbing search, where coarse steps are\n"
"taken until some stopping condition occurs, at which point the\n"
"movement is reversed, at which point fine steps are taken to\n"
"maximize the focus value. This is the method described in\n"
"[He2003] and [Li2005].\n\n"
"To visualize the steps taken for simulation of a specific scene,\n"
"use the command-line argument --scene-to-print=something.txt")
step_size = 8
data_rows = [("filename", "success %", "steps")]
# Redirect stdout to a file for printing R script.
orig_stdout = sys.stdout
file_to_print = open("comparison.R", "w+")
sys.stdout = file_to_print
total_success = 0
for scene in scenes:
success_count = 0
total_step_count = 0
initial_positions = range(0, scene.step_count - step_size)
for initial_position in initial_positions:
camera = CameraModel(scene, initial_position,
simulate_backlash=simulate_backlash,
simulate_noise=simulate_noise)
first_measure = camera.last_fmeasure()
camera.move_coarse(Direction("right"))
# Determine whether to start moving left or right.
if camera.last_fmeasure() < first_measure:
direction = Direction("left")
else:
direction = Direction("right")
# If the first step decreases focus value, switch direction.
# This is a simple backtracking, basically.
first_measure = camera.last_fmeasure()
camera.move_coarse(direction)
if camera.last_fmeasure() < first_measure:
direction = direction.reverse()
# Sweep
max_value = camera.last_fmeasure()
while not camera.will_hit_edge(direction):
camera.move_coarse(direction)
max_value = max(max_value, camera.last_fmeasure())
# Have we found a peak?
if camera.last_fmeasure() < max_value * 0.9:
# Stop searching
break
# Hillclimb until we're back at the peak.
while not camera.will_hit_edge(direction.reverse()):
prev_measure = camera.last_fmeasure()
camera.move_fine(direction.reverse())
if prev_measure > camera.last_fmeasure():
camera.move_fine(direction)
break
# Record if we succeeded.
if scene.distance_to_closest_peak(camera.last_position()) <= 1:
success_count += 1
evaluation = "succeeded"
else:
evaluation = "failed"
if scene.filename == scene_to_print:
camera.print_script(evaluation)
total_step_count += camera.steps_taken
success = float(success_count) / len(initial_positions) * 100
line = (scene.name,
"%.1f" % success,
"%.1f" % (float(total_step_count) / len(initial_positions)))
data_rows.append(line)
total_success += success
# Restore original stdout
sys.stdout = orig_stdout
file_to_print.close()
print_aligned_data_rows(data_rows)
print "average success : %.1f" % (total_success / len(scenes))
def search_sweep(scenes, always_coarse):
print ("Search for a peak by sweeping from the first lens position, stop\n"
"when a peak is found.")
if always_coarse:
print "Sweeping is done with coarse steps only."
else:
print "Sweeping is done using ml-based heuristics."
data_rows = [("filename", "status", "steps")]
for scene in scenes:
last_step_coarse = True
max_val = scene.fvalues[0]
f_cur, f_prev, f_prev2 = scene.get_focus_values([0, 0, 0])
current_pos = 1
step_count = 1
# Sweep in search of a maxima.
while current_pos < scene.step_count - 1:
# Size of the next step.
if always_coarse:
step_coarse = True
else:
f_prev2, f_prev, f_cur = \
f_prev, f_cur, scene.fvalues[current_pos]
# Decide on size of the next step using the right decision tree
if last_step_coarse:
step_coarse = coarsefine.coarse_if_previously_coarse(
f_prev2, f_prev, f_cur)
else:
step_coarse = coarsefine.coarse_if_previously_fine(
f_prev2, f_prev, f_cur)
if step_coarse:
current_pos = min(scene.step_count - 1, current_pos + 8)
else:
current_pos = min(scene.step_count - 1, current_pos + 1)
step_count += 1
max_val = max(max_val, scene.fvalues[current_pos])
if scene.fvalues[current_pos] < 0.7 * max_val:
break
last_step_coarse = step_coarse
# Go back to peak using local search hillclimbing.
while current_pos > 0:
if scene.fvalues[current_pos] < scene.fvalues[current_pos - 1]:
current_pos -= 1
step_count += 1
elif (current_pos > 1 and
scene.fvalues[current_pos] < scene.fvalues[current_pos - 2]):
# Tolerance of two fine steps.
current_pos -= 2
step_count += 2
else:
# Number of steps to move forward and back,
# due to two step tolerance
step_count += 4
break
first_column = "%s (%d)" % (scene.filename, len(scene.maxima))
if scene.distance_to_closest_peak(current_pos) < 1:
if scene.distance_to_highest_peak(current_pos) <= 1:
line = (first_column, "found highest", str(step_count))
else:
line = (first_column, "found a peak", str(step_count))
else:
line = (first_column, "failed", str(step_count))
data_rows.append(line)
print_aligned_data_rows(data_rows)
def search_full(scenes):
print ("Perform a full sweep of coarse steps accross all the lens\n"
"positions, the go to the position where the focus value was\n"
"highest and do a local search.\n")
sweep_steps = 19
data_rows = [("filename", "status", "steps")]
for scene in scenes:
# The camera does a full sweep.
highest_pos = 0
for pos in range(0, scene.step_count, 8):
if scene.fvalues[pos] > scene.fvalues[highest_pos]:
highest_pos = pos
# Number of large steps needed to go back to the highest position.
large_steps = (scene.step_count - 1 - highest_pos) / 8
current_pos = (scene.step_count - 1) - (large_steps * 8)
fine_steps = 0
# Local search.
while current_pos > 0:
if scene.fvalues[current_pos] < scene.fvalues[current_pos - 1]:
current_pos -= 1
fine_steps += 1
elif (current_pos > 1 and
scene.fvalues[current_pos] < scene.fvalues[current_pos - 2]):
# Tolerance of two fine steps.
current_pos -= 2
fine_steps += 2
else:
# Number of steps to move forward and back,
# due to two step tolerance
fine_steps += 4
break
step_count = sweep_steps + large_steps + fine_steps
first_column = "%s (%d)" % (scene.filename, len(scene.maxima))
if scene.distance_to_closest_peak(current_pos) <= 1:
if scene.distance_to_highest_peak(current_pos) <= 1:
line = (first_column, "found highest", str(step_count))
else:
line = (first_column, "found a peak", str(step_count))
else:
line = (first_column, "failed", str(step_count))
data_rows.append(line)
print_aligned_data_rows(data_rows)
def print_script_usage():
print >> sys.stderr, \
"""Script usage : ./benchmark.py
[--low-light <evaluate low light benchmarks>]
[--specific-scene=<a scene's filename, will print R script,
but only for "search simple"> ]
"""
def main(argv):
# Parse script arguments
try:
opts, _ = getopt.getopt(argv, "", [ "lowlight", "low-light",
"lowlightgauss", "low-light-gauss",
"scene-to-print=" ])
except getopt.GetoptError:
print_script_usage()
sys.exit(2)
scene_to_print = None
scenes_folder = "focusraw/"
for opt, arg in opts:
if opt in ("--lowlight", "--low-light"):
scenes_folder = "lowlightraw/"
elif opt in ("--lowlightgauss", "--low-light-gauss"):
scenes_folder = "lowlightgaussraw/"
elif opt == "--scene-to-print":
scene_to_print = arg
else:
print_script_usage()
sys.exit(2)
random.seed(seed)
scenes = load_scenes(folder=scenes_folder,
excluded_scenes=["cat.txt", "moon.txt",
"projector2.txt", "projector3.txt"])
search_perfect(scenes)
print "\n"
search_standard(scenes, scene_to_print)
print "\n"
search_sweep(scenes, False)
print "\n"
search_sweep(scenes, True)
print "\n"
search_full(scenes)
main(sys.argv[1:]) | [
"rudichen@gmail.com"
] | rudichen@gmail.com |
f9b029536257b33e2c5061ee0df46395eef5ae31 | 9a3f201f5acc2941b0f74b1f1946aeb040b7aec7 | /3Drefine_parser.py | 1313b9377d1b28bfcd7f0a1030e96b4c625b4c4f | [] | no_license | DavisOwen/cvcRunAll | bf678d45945ae4798e549a36b55d51864950dfbb | 271d62368d865eb3490c8a045336f213973a14e7 | refs/heads/master | 2021-01-01T06:48:01.644761 | 2017-07-24T18:30:24 | 2017-07-24T18:30:24 | 97,517,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | #!/usr/bin/env python
import os
import sys
import subprocess
import shutil
f = open(sys.argv[2],'r')
length = len(sys.argv[1])
for line in f:
string = line.split()
if len(string) >= 5:
if string[1] == 'SUMMARY' and string[2] == 'OF' and string[3] == 'JOB':
direct = string[4]
if len(string) >= 4:
if string[0] == 'Starting' and string[1] == 'Model':
pdb = string[3]
pdb = pdb[length:length+5]
Results = subprocess.check_output('ls '+direct+'/RESULT', shell = True)
Results = Results.split()
for i in range(len(Results)):
os.rename(direct+'/RESULT/'+Results[i],sys.argv[1]+pdb+'.pdb')
shutil.rmtree(direct)
if string[0] == 'Job' and string[1] == 'ID':
jobid = string[3]
if string[0] == 'Refining' and string[1] == 'model...Exception':
a = open(jobid+'/LOG/DSSP_1.txt','r')
for foo in a:
st = foo.split()
if st[0] == 'HEADER':
fail = st[-2]
break
b = open(jobid+'/LOG/LOG_1.txt','r')
for foo in b:
st = foo.split()
if st[0] == 'assignRandomCaCoordinates':
chain = st[2][-1]
break
os.rename(jobid,fail+chain+'_FAILED')
os.remove(sys.argv[2])
| [
"sdowen12@gmail.com"
] | sdowen12@gmail.com |
6d79d1bdfaa99f326c1686dd06a25a83dd5f4f71 | 7a066aec96ae67e0177808c347ad296b0d4c17a2 | /node.py | cd6c80bb730a031591a9040fca1839538855a53b | [] | no_license | varun-sundar-rabindranath/automatic-differentiation | c4e9dddb6aa4ed2a7a88af01bf2f8ad6b42dceac | 805521474240a4d768e755a45975bc412766b287 | refs/heads/master | 2020-12-28T16:50:32.713518 | 2020-02-08T01:32:42 | 2020-02-08T01:32:42 | 238,412,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | # Node class for graph
from ad_numpy import ndarray_
from decorators import primitive
from grad_fns import grad_fn_mapping
class Node:
def __init__(self):
self.inputs = {"args" : None, "kwargs" : None}
self.outputs = None
self.op = None
self.name = None
self.grad_fn = None
self.grad = 0.0
self.grad_wrt_args = {}
self.grad_wrt_kwargs = {}
self.inputs_order = {}
def make_node(self, *, args, kwargs, outputs, op, name):
self.inputs = {"args" : args, "kwargs" : kwargs}
self.outputs = outputs
self.op = op
self.name = name
# assign the grad function mapping
if grad_fn_mapping.get(self.op) is None:
print ("Grad function not implemented for ", self.op)
assert False and "You are a failure"
self.grad_fn = grad_fn_mapping[self.op]
def __str__(self):
s = ""
s = s + "--- Node : " + self.name + " --- \n"
if (self.inputs["args"] is not None):
args_lst = list(self.inputs["args"])
for arg in args_lst:
s = s + " Arg : " + str(arg) + "\n"
if (self.inputs["kwargs"] is not None):
for kw in self.inputs["kwargs"].keys():
s = s + " KW : " + kw + str(self.inputs["kwargs"][kw]) + "\n"
s = s + " Outputs : " + str(self.outputs) + "\n"
s = s + " Opeeration : " + str(self.op) + "\n"
s = s + " Grad function : " + str(self.grad_fn) + "\n"
s = s + " Grad : " + str(self.grad) + "\n"
s = s + " Grad wrt args : " + str(self.grad_wrt_args) + "\n"
return s
| [
"varunsundar08@gmail.com"
] | varunsundar08@gmail.com |
9922f2132d7a55e28ab30681e4779b4cd437e51a | 0a973640f0b02d7f3cf9211fcce33221c3a50c88 | /.history/src/easy-money_20210201120223.py | e793aeaa52c922c7f1eb6842bef7196a3a28ad87 | [] | no_license | JiajunChen123/IPO_under_review_crawler | 5468b9079950fdd11c5e3ce45af2c75ccb30323c | 031aac915ebe350ec816c05a29b5827fde588567 | refs/heads/main | 2023-02-26T08:23:09.622725 | 2021-02-04T10:11:16 | 2021-02-04T10:11:16 | 332,619,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,707 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# 东方财富网 首发申报
import re
import pickle
from datetime import datetime, timedelta
from urllib.parse import urlencode
import pandas as pd
import requests
import re
import time
from bs4 import BeautifulSoup
import configparser
config = configparser.ConfigParser()
config.read('Config.ini')
headers = config['eastmoney']['headers']
base_url = onfig['eastmoney']['base_url']
def date_gen():
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',
headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
dateList = [i.text for i in soup.findAll('option')]
yield dateList
def update_date():
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',
headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
newDate = soup.find('option').get_text()
return newDate
from pathlib import Path
def update_eastmoneyData(newDate):
eastmoney_raw_data = Path(config['eastmoney']['raw_data'])
# 如果文件存在,执行更新
if eastmoney_raw_data.is_file():
# newDate = update_date()
# 如果有更新
if newDate != config['eastmoney']['lastDate']:
query = {
'type': 'NS',
'sty': 'NSFR',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '1',
'fd': newDate,
'rt': '53721774'
}
url = base_url + urlencode(query)
rs = requests.get(url, headers=headers)
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
temp = [i.split(',') for i in data]
columns = [
'会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
'是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df = df[[
'机构名称', '类型', '板块', '注册地', '保荐机构', '保荐代表人', '律师事务所', '签字律师', '会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业', '日期', 'xxx', '时间戳', '简称', '文件链接'
]]
df = df[df['板块'] != '创业板']
df.replace({'是否提交财务自查报告': ' '}, '是')
df.replace({'是否提交财务自查报告': '不适用'}, '是')
df['机构名称'] = df['机构名称'].replace(r'\*', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'股份有限公司', '', regex=True)
df.to_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv',mode='a',
index=False, header=False, encoding='utf-8-sig')
else:
dateList = date_gen()
get_eastmoneyData(dateList)
return df
def get_eastmoneyData(dateList):
query = {
'type': 'NS',
'sty': 'NSFR',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '1',
'rt': '53721774'
}
main_data = []
for date in dateList:
print('fetching date: ',date)
query['fd'] = date
# start = datetime.strptime('2017-01-05','%Y-%m-%d').date()
# while start < datetime.today().date():
# query['fd'] = start
url = base_url + urlencode(query)
# yield url
# start += timedelta(days=7)
rs = requests.get(url, headers=headers)
if rs.text == '':
continue
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
main_data.extend(data)
time.sleep(2)
temp = [i.split(',') for i in main_data]
columns = [
'会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
'是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df = df[[
'机构名称', '类型', '板块', '注册地', '保荐机构', '保荐代表人', '律师事务所', '签字律师', '会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业', '日期', 'xxx', '时间戳', '简称', '文件链接'
]]
df = df[df['板块'] != '创业板']
df.replace({'是否提交财务自查报告': ' '}, '是')
df.replace({'是否提交财务自查报告': '不适用'}, '是')
df['机构名称'] = df['机构名称'].replace(r'\*', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'股份有限公司', '', regex=True)
df = df[df['板块'] != '创业板']
df.to_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv',
index=False,
encoding='utf-8-sig')
return df
def get_meetingData():
meetingInfo = []
for marketType in ['2', '4']: # 2 为主板, 4 为中小板
query = {
'type': 'NS',
'sty': 'NSSH',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': marketType,
'rt': '53723990'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
meetingInfo.extend(data)
temp = [j.split(',') for j in meetingInfo]
columns = [
'时间戳', 'yyy', '公司代码', '机构名称', '详情链接', '申报日期', '上会日期', '申购日期', '上市日期',
'9', '拟发行数量', '发行前总股本', '发行后总股本', '13', '占发行后总股本比例', '当前状态', '上市地点',
'主承销商', '承销方式', '发审委委员', '网站', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df['详情链接'] = df['公司代码'].apply(
lambda x: "data.eastmoney.com/xg/gh/detail/" + x + ".html")
df = df[[
'机构名称', '当前状态', '上市地点', '拟发行数量', '申报日期', '上会日期', '申购日期', '上市日期',
'主承销商', '承销方式', '9', '发行前总股本', '发行后总股本', '13', '占发行后总股本比例', '发审委委员',
'网站', '公司代码', 'yyy', '时间戳', '简称', '详情链接', '文件链接'
]]
df.to_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_meeting.csv',
index=False,
encoding='utf-8-sig')
return df
def update_zzscDate(newDate):
if Path(config['eastmoney']['zzsc_pkl']).is_file:
if newDate != config['eastmoney']['lastDate']:
zzsc_dict = pickle.load(config['eastmoney']['zzsc_pkl'])
query = {
'type': 'NS',
'sty': 'NSSE',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '500',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '4',
'stat': 'zzsc',
'fd': newDate,
'rt': '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
return
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
else:
date = g
def get_zzscData(dateList):
zzsc_dict = {}
for date in dateList:
query = {
'type': 'NS',
'sty': 'NSSE',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '500',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '4',
'stat': 'zzsc',
'fd': date,
'rt': '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
continue
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
time.sleep(2)
zzsc = pd.DataFrame(zzsc_dict.items(), columns=['机构名称', '决定终止审查时间'])
zzsc.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_zzsc.csv',
encoding='utf-8-sig',
index=False)
return zzsc
def eastmoney_cleanUP():
east_money = pd.read_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv')
east_money.replace({'是否提交财务自查报告': ' '}, '是')
east_money.replace({'是否提交财务自查报告': '不适用'}, '是')
east_money['机构名称'] = east_money['机构名称'].replace(r'\*', '', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'股份有限公司', '', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'\(', '(', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'\)', ')', regex=True)
east_money = east_money[east_money['板块'] != '创业板']
# east_money.sort_values(['机构名称','类型','受理日期'],ascending=[True, True,True],inplace=True)
# east_money.to_csv('C:/Users/chen/Desktop/IPO_info/pre_cleab.csv',encoding='utf-8-sig',index=False)
east_money.drop_duplicates(subset=['机构名称', '类型'],
keep='first',
inplace=True)
east_money.to_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_cleaned.csv',
encoding='utf-8-sig',
index=False)
return east_money
def gen_finalData(cleaned_easymoney_df, meetingInfo_df, zzsc_df):
'''
主板、中小板 = {'机构名称':'',
'简称':'',
'Wind代码':'',
'统一社会信用代码':'',
'板块':'',
'注册地':'',
'所属行业':'',
'经营范围':'',
'预先披露':'[日期]',
'已反馈':'[日期]',
'预先披露更新':'[日期]',
'发审会':{'中止审查':'[日期]',
'已上发审会,暂缓表决':'[日期]',
'已提交发审会讨论,暂缓表决:'[日期]',
'已通过发审会':'[日期]'},
'终止审查':'[日期]',
'上市日期':'[日期]',
'保荐机构':'',
'律师事务所':,
'会计师事务所':'',
'发行信息':{'拟发行数量':'',
'发行前总股本':'',
'发行后总股本':''},
'反馈文件':'[链接]'
}
'''
shzb = {} # 上海主板
szzxb = {} # 深圳中小板
all_data = {} # 总数据
ekk = cleaned_easymoney_df.values.tolist()
for i in ekk:
if i[0] not in all_data:
all_data[i[0]] = {
'机构名称': i[0] + '股份有限公司',
'简称': i[15],
'Wind代码': '',
'统一社会信用代码': '',
'板块': i[2],
'注册地': '',
'所属行业': '',
'经营范围': '',
'预先披露': '',
'已反馈': '',
'预先披露更新': '',
'发审会': {
'中止审查': '',
'已上发审会,暂缓表决': '',
'已提交发审会讨论,暂缓表决': '',
'已通过发审会': ''
},
'终止审查': '',
'上市日期': '',
'保荐机构': i[4],
'保荐代表人': '',
'律师事务所': i[6],
'签字律师': '',
'会计师事务所': i[8],
'签字会计师': '',
'发行信息': {
'拟发行数量(万)': '',
'发行前总股本(万)': '',
'发行后总股本(万)': ''
},
'反馈文件': ''
}
if i[1] == '已受理':
all_data[i[0]]['预先披露'] = i[12]
elif i[1] == '已反馈':
all_data[i[0]]['已反馈'] = i[12]
elif i[1] == '预先披露更新':
all_data[i[0]]['预先披露更新'] = i[12]
elif i[1] == '已通过发审会':
all_data[i[0]]['发审会']['已通过发审会'] = i[12]
elif i[1] == '已提交发审会讨论,暂缓表决':
all_data[i[0]]['发审会']['已提交发审会讨论,暂缓表决'] = i[12]
elif i[1] == '已上发审会,暂缓表决':
all_data[i[0]]['发审会']['已上发审会,暂缓表决'] = i[12]
elif i[1] == '中止审查':
all_data[i[0]]['发审会']['中止审查'] = i[12]
if all_data[i[0]]['注册地'] == '' and i[3] != '':
all_data[i[0]]['注册地'] = i[3]
if all_data[i[0]]['所属行业'] == '' and i[11] != '':
all_data[i[0]]['所属行业'] = i[11]
if all_data[i[0]]['保荐代表人'] == '' and i[5] != '':
all_data[i[0]]['保荐代表人'] = i[5]
if all_data[i[0]]['签字律师'] == '' and i[7] != '':
all_data[i[0]]['签字律师'] = i[7]
if all_data[i[0]]['签字会计师'] == '' and i[9] != '':
all_data[i[0]]['签字会计师'] = i[9]
ekk2 = meetingInfo_df.values.tolist()
error_set = {}
for i in ekk2:
i[0] = i[0].replace(r'股份有限公司', '')
if i[0] not in all_data:
print("Error: Cannot find ", i[0])
error_set.update({i[0]: i[5]})
continue
if i[1] == '上会未通过':
all_data[i[0]]['发审会']['上会未通过'] = i[5]
elif i[1] == '取消审核':
all_data[i[0]]['发审会']['取消审核'] = i[5]
elif i[1] == '上会通过':
all_data[i[0]]['发审会']['已通过发审会'] = i[5]
if i[7] != '':
all_data[i[0]]['上市时间'] = i[7]
all_data[i[0]]['发行信息']['拟发行数量'] = "{:.2f}".format(int(i[3]) / 10000)
all_data[i[0]]['发行信息']['发行前总股本'] = "{:.2f}".format(int(i[11]) / 10000)
all_data[i[0]]['发行信息']['发行后总股本'] = "{:.2f}".format(int(i[12]) / 10000)
ekk3 = zzsc_df.values.tolist()
for i in ekk3:
name = i[0].replace(r'股份有限公司', '')
if name not in all_data:
print("Error: Cannot find in zzsc", i[0])
error_set.update({name: i[1]})
continue
all_data[name]['终止审查'] = i[1]
# for key, value in all_data.items():
# if value['板块'] == '中小板' and value['终止审查'] == '' and value['上市日期'] == '':
# szzxb.update({key: value})
# if value['板块'] == '主板企业' and value['终止审查'] == '' and value['上市日期'] == '':
# shzb.update({key: value})
return all_data, error_set
if __name__ == '__main__':
# dateList = date_gen()
# get_eastmoneyData(dateList)
east_money_df = eastmoney_cleanUP()
# east_money_df = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/easymoney_data_new.csv',keep_default_na=False)
meetingInfo_df = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_meeting.csv',keep_default_na=False)
# meetingInfo_df = get_meetingData()
zzsc_df = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/zzsc.csv')
all_data,_ = gen_finalData(east_money_df,meetingInfo_df,zzsc_df)
print('Complete!')
with open('C:/Users/chen/Desktop/IPO_info/zb_zxb_info.pkl','wb') as f:
pickle.dump(all_data, f, pickle.HIGHEST_PROTOCOL) | [
"chenjiajun.jason@outlook.com"
] | chenjiajun.jason@outlook.com |
c10038b3362c2b7e3e9c1956fd39e8f43a1d2c38 | 72e42be7ad8cea3a55ffb67cf71a37502f63373a | /samples/migrations/0010_patient_consent.py | 10cdfa6bcf24e0ae319256700b8193dafdf5b5d5 | [] | no_license | joshv2/biomarker2 | 117cee7b693529df0090626aa77b3346559a31a5 | 71ed1296c9231b56d44a8e84afbf78f573277dc1 | refs/heads/master | 2020-04-22T13:27:43.322480 | 2020-02-19T06:41:32 | 2020-02-19T06:41:32 | 170,410,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # Generated by Django 2.0.1 on 2018-05-24 00:39
from django.db import migrations, models
import samples.models
class Migration(migrations.Migration):
dependencies = [
('samples', '0009_remove_patient_consent'),
]
operations = [
migrations.AddField(
model_name='patient',
name='consent',
field=models.FileField(default='', upload_to=samples.models.user_directory_path),
),
]
| [
"joshv2@gmail.com"
] | joshv2@gmail.com |
5649179f8c1bb20ed44f3c4504259fd0c3f51967 | 3c868540c8f5b0b9b46440e9b8e9160de9e8988f | /ch06/handle_with_condition.py | fe8d59c97207d94fc31608b8c1b50584d2ba69ac | [] | no_license | sarte3/python | cc8f41b8b22b0a980252d6546358dd212324e2cd | 15d984e5df03387950692092b6b5569adab845bb | refs/heads/master | 2023-01-18T18:37:40.720326 | 2020-11-17T08:43:27 | 2020-11-17T08:43:27 | 304,824,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | user_input_a = input('정수 입력 > ')
if user_input_a.isdigit():
number_input_a = int(user_input_a)
print('원의 반지름 : ', number_input_a)
print('원의 둘레 : ', 2 * 3.14 * number_input_a)
print('원의 넓이 : ', 3.14 * number_input_a * number_input_a)
else:
print('정수를 입력하지 않았습니다') | [
"sarte@outlook.kr"
] | sarte@outlook.kr |
6dc1a9ada1602097156dbad46de7e233470ba7bf | 53912aab38b4f155db2642b7b62bede9df9edc0a | /my_script.py | e28271662bce4a1c463aa2f9a43bb72ee382fe70 | [] | no_license | rbeyhum/my-own-repo-2021 | b2b69e2d550ce828e5a93be055042a3b5231b14c | e3e0fec9aab05b9c91916b9da3208cc569b9273b | refs/heads/main | 2023-03-17T13:40:48.458186 | 2021-03-15T21:47:13 | 2021-03-15T21:47:13 | 348,128,248 | 0 | 0 | null | 2021-03-15T21:47:14 | 2021-03-15T21:30:36 | null | UTF-8 | Python | false | false | 19 | py |
print("Hello.") | [
"noreply@github.com"
] | noreply@github.com |
d4bec57822cd7f1cce6d006733f6096f23077dcf | ea0db4285f76da55e48be1b718afebea2f0d6b87 | /src/models/stores/store.py | de3907bc841eb0207722d77073385640b50cbf61 | [] | no_license | isabelitagr/price_of_chair_web | dc160873248f652e95cfa7fa9d236f44a78c72ef | 9fdebefe1046c892b6bc3b1ef56da6aad59004f4 | refs/heads/master | 2021-01-20T18:09:56.394387 | 2016-08-10T19:26:37 | 2016-08-10T19:26:37 | 65,105,016 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,502 | py | import uuid
import src.models.stores.constants as StoreConstants
from src.common.database import Database
import src.models.stores.errors as StoreErrors
class Store(object):
def __init__(self, name, url_prefix, tag_name, query, _id=None):
self.name = name
self.url_prefix = url_prefix
self.tag_name = tag_name
self.query = query
self._id = uuid.uuid4().hex if _id == None else _id
def __repr__(self):
return "<Store {}>".format(self.name)
def json(self):
return {
'_id': self._id,
'name': self.name,
'url_prefix': self.url_prefix,
'tag_name': self.tag_name,
'query': self.query
}
@classmethod
def get_by_id(cls, id):
return cls(**Database.find_one(StoreConstants.COLLECTION, {'_id': id}))
def save_to_mongo(self):
Database.update(StoreConstants.COLLECTION, {'_id': self._id}, self.json())
@classmethod
def get_by_name(cls, store_name):
return cls(**Database.find_one(StoreConstants.COLLECTION, {'name': store_name}))
@classmethod
def get_by_url_prefix(cls, url_prefix):
#Al ponerle el prefijo va a ir buscando letra por letra si encuentra un match en la bd con las url de las stores
return cls(**Database.find_one(StoreConstants.COLLECTION, {'url_prefix': {"$regex":'^{}'.format(url_prefix)}})) # {"$regex":'^{}'.format(url_prefix)} --> decimos que va a ser regex y ^ marca el inicio y metemos en formato el url_prefix
@classmethod
def find_by_url(cls, url):
'''
Rerurrn a stores from a url like "http://www.johnlewis.com/item/ndcbbckjebceui"
:param url: item's url
:return: a stores, or raises a StoreNotFoundException if no Store matches the url
'''
for i in range(0, len(url)+1): # +1 porque en [:] el ultimo no se tiene en cuenta
try:
store = cls.get_by_url_prefix(url[:i])
return store
except:
# pass # en este caso es lo mismo que return None porque por default pthon devuelve None si no encunetra
raise StoreErrors.StoreNotFoundException("The URL Prefix used to find the stores didn't give us any result!")
@classmethod
def all(cls):
return [cls(**elem) for elem in Database.find(StoreConstants.COLLECTION, {})]
def delete(self):
Database.remove(StoreConstants.COLLECTION, {'_id':self._id}) | [
"isabelitagr"
] | isabelitagr |
7069d8dae75b1aa649b24c927694adb46dc57f3c | 732e1285934470ae04b20d64921a8cba20932875 | /neuedu_cnblogs_spider/pipelines.py | d19805a40bcea08c1a72fa65eb9c955cfba04a39 | [] | no_license | infant01han/neuedu_django_scrapy_es_cnblogs | 69ee11c7840b25b8ae6d37b21324389dfdacf371 | d293bae6ab5a7a360289afe35b7c3320dbce2dc8 | refs/heads/master | 2021-04-19T05:43:49.618157 | 2020-03-24T07:51:20 | 2020-03-24T07:51:20 | 249,584,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class NeueduCnblogsSpiderPipeline(object):
def process_item(self, item, spider):
item.save_to_es()
return item
| [
"you@example.com"
] | you@example.com |
6babdc36ffef1bb282e9e6628a2ecb4feb57f075 | fdbb86a474ca935a68882ec5630c4b0e35b24c1a | /quqs/front/migrations/0001_initial.py | 7c6b0ba020d3ecee19b72e03fc8f2daf003372fd | [] | no_license | Ravall/quqs.ru | 24d565614b3f17af6102b1fd538fa7b8b18cf2cb | cc4a3ff2f5a1fe3251d27d2aa56741dbe068f734 | refs/heads/master | 2021-05-04T10:36:10.156850 | 2017-05-02T19:31:27 | 2017-05-02T19:31:27 | 51,449,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,168 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Autor'
db.create_table(u'front_autor', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('public_name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('comments', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'front', ['Autor'])
# Adding model 'Postcard'
db.create_table(u'front_postcard', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('art_number', self.gf('django.db.models.fields.IntegerField')(unique=True, db_index=True)),
('autor', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['front.Autor'])),
('pc_image', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal(u'front', ['Postcard'])
def backwards(self, orm):
# Deleting model 'Autor'
db.delete_table(u'front_autor')
# Deleting model 'Postcard'
db.delete_table(u'front_postcard')
models = {
u'front.autor': {
'Meta': {'object_name': 'Autor'},
'comments': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'front.postcard': {
'Meta': {'object_name': 'Postcard'},
'art_number': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'db_index': 'True'}),
'autor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['front.Autor']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pc_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['front'] | [
"valery.ravall@gmail.com"
] | valery.ravall@gmail.com |
b5719efc41c1787dbdbf3f5fd14e1e331769b2cf | 55a4d7ed3ad3bdf89e995eef2705719ecd989f25 | /main/law/spark_short/spark_short_limai_and_wenshu_origin/lawlist_to_lawid_2018-05-10_imp_other_etl_online.py | e9734a7e27e63e8f7b1081c614d979c3b4078dbe | [] | no_license | ichoukou/Bigdata | 31c1169ca742de5ab8c5671d88198338b79ab901 | 537d90ad24eff4742689eeaeabe48c6ffd9fae16 | refs/heads/master | 2020-04-17T04:58:15.532811 | 2018-12-11T08:56:42 | 2018-12-11T08:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,190 | py | # -*- coding: utf-8 -*-
from pyspark import SparkContext,SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.types import *
import re
def p(x):
if x[1]:
print type(x)
print x
# print x[1]
# exit(0)
def filter_(x):
if x[1] and x[1] != '': #过滤掉数据库中,lawlist为Null或''的行。
return True
return False
def get_uuids(uuids):
l = []
for x in uuids:
l.append(x) #将分组结果ResultIterable转换为List
return "||".join(l) #列表不能直接存入Mysql
def get_lawlist_ids(uuid_ids):
uuid,ids = uuid_ids[0],uuid_ids[1]
lawlist_id = []
for x in ids:
lawlist_id.append(x)
return (uuid,"||".join(lawlist_id))
def get_title_short_id(x): #保证lawlist和law_id的有序!
k = x[0] + "|" + x[1]
v = str(x[2])
return (k,v)
if __name__ == "__main__":
conf = SparkConf()
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
# sc.setLogLevel("ERROR") # ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
# lawlist = sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/civil', table='uuid_reason_lawlist',column='id',lowerBound=0,upperBound=100000,numPartitions=70,properties={"user": "root", "password": "HHly2017."})
lawlist_id = sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/laws_doc_v3', table='(select id,title_short,art_num,lawlist_id from law_rule_result2) tmp',column='id',lowerBound=1,upperBound=2881160,numPartitions=30,properties={"user": "weiwc", "password": "HHly2017."})
# lawlist= sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/civil', table='uuid_reason_lawlist',predicates=["id >= 1 and id <= 100"],properties={"user": "root", "password": "HHly2017."})
lawlist= sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/laws_doc_imp_other', table='(select id,uuid,lawlist from imp_other_etl ) tmp2',column='id',lowerBound=1,upperBound=4733848,numPartitions=108,properties={"user": "weiwc", "password": "HHly2017."})
def etl_lawlist(p1, p2, lawlist):
if lawlist and lawlist.strip() != '':
# if not (lawlist.strip().startswith("[") and lawlist.strip().endswith("]")): # 去掉前后的所有"
r1 = re.findall(ur'"{0,5}\["{0,5}', lawlist.strip())
r2 = re.findall(ur'"{0,5}\]"{0,5}', lawlist.strip())
if r1 and r2:
start = r1.pop(0)
end = r2.pop()
lawlist = lawlist.strip().replace(start, "").replace(end, "")
# l = list(eval(lawlist.strip())) #有脏数据不能直接使用eval()
l = lawlist.split('", "') #lawlist类似于:《最高人民法院关于审理建设工程施工合同纠纷案件适用法律问题的解释》第三条", "《中华人民共和国合同法》第九十七条", "最高人民法院关于审理建设工程施工合同纠纷案件适用法律问题的解释》第十条", "《中华人民共和国合同法》第九十八条
if l:
tl = []
for i in l:
r1 = re.split(p2, i)
if len(r1) > 2: #确保既有《,又有》
r2 = re.search(p1, r1[2])
if r2: #判断是否找到了条
tl.append(r1[1] + "|" + r2.group(0))
return list(set(tl)) # 去重
return []
return []
return []
lawlist_id2 = lawlist_id.select('title_short','art_num','lawlist_id').map(lambda x:get_title_short_id(x))
p1 = ur'\u7b2c[\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d\u5341\u767e\u5343]{1,10}\u6761'
p2 = ur'[\u300a\u300b]' # 按《》切分
c = lawlist.select('uuid','lawlist').map(lambda x:(x[0],x[1])).flatMapValues(lambda x: etl_lawlist(p1, p2, x)).filter(filter_).map(lambda x: (x[1], x[0])).groupByKey().mapValues(lambda v: get_uuids(v))
# flatMapValues(lambda x: etl_lawlist(p1, p2, x)).filter(filter_).map(lambda x: (x[1].encode("utf-8"), x[0]))
# groupByKey().mapValues(lambda v: get_uuids(v))
# filter(filter_).map(lambda x: (x[1].encode("utf-8"), x[0])).groupByKey().mapValues(lambda v: get_uuids(v))
# print str(c.count()) + "======================"
# c.foreach(p)
lawlist_title_id_result = lawlist_id2.join(c).map(lambda x:x[1]).filter(filter_).flatMapValues(lambda x:(x.split("||"))).map(lambda x:(x[1],x[0])).groupByKey().map(lambda x:(get_lawlist_ids(x)))
schema = StructType([StructField("uuid", StringType(), False),StructField("law_id", StringType(), True)])
f = sqlContext.createDataFrame(lawlist_title_id_result, schema=schema)
# , mode = "overwrite"
# useUnicode = true & characterEncoding = utf8,指定写入mysql时的数据编码,否则会乱码。
# print str(f.count()) + "======================"
f.write.jdbc(url='jdbc:mysql://cdh-slave1:3306/laws_doc_imp_other?useUnicode=true&characterEncoding=utf8', table='imp_other_uuid_law_id',properties={"user": "weiwc", "password": "HHly2017."})
sc.stop() | [
"985819225@qq.com"
] | 985819225@qq.com |
a2f96c692c168e1bf683f2c7e038bc39c1564e38 | a16c547e3a205870b683eba93b73a83aaa18c70d | /main.py | f6d982b83d7f406c1b061914f6e2c4ee630f32fc | [] | no_license | ehayes9/halifax-crime-data | 553f87e021d11f2502152ea1aafe70f6a186dff9 | 9cd9a45fcdc8957a342b6b22c860548d200fb402 | refs/heads/master | 2022-11-17T02:03:36.714820 | 2020-07-16T13:05:23 | 2020-07-16T13:05:23 | 274,170,512 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,885 | py | import requests
import json
import pandas as pd
from google.cloud import bigquery
client = bigquery.Client()
# TODO: update variables to match your project, update project, dataset & table name in query below
TABLE_NAME = ""
DATASET_NAME = ""
def extract_values(obj, key):
"""source: https://hackersandslackers.com/extract-data-from-complex-json-python """
"""Pull all values of specified key from nested JSON."""
arr = []
def extract(obj, arr, key):
"""Recursively search for values of key in JSON tree."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
results = extract(obj, arr, key)
return results
def extract_hfx_crime_data(request):
"""HTTP Cloud Function.
Extracts Data from the HFX OpenData portal, and imports into BigQuery table
Args:
request (flask.Request): The request object.
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>.
Response = 'SUCCESS' implies that the load was successful
"""
# get crime data from Halifax open data
response = requests.get("https://opendata.arcgis.com/datasets/f6921c5b12e64d17b5cd173cafb23677_0.geojson")
data = response.json()
# convert data to a string
data_string = json.dumps(data)
# load json object as python dictionary
data_dict = json.loads(data_string)
# create list of fields we want to extract
column_names = ['OBJECTID','evt_rt','evt_rin','evt_date','location','zone','rucr','rucr_ext_d']
#TODO: write a loop for this process, add X & Y coordinates
#pull data from the json object for each nested key using extract_values function
# X = extract_values(data_dict,'X')
# Y = extract_values(data_dict,'Y')
OBJECTID = extract_values(data_dict,'OBJECTID')
evt_rt = extract_values(data_dict,'evt_rt')
evt_rin = extract_values(data_dict,'evt_rin')
evt_date = extract_values(data_dict,'evt_date')
location = extract_values(data_dict,'location')
zone = extract_values(data_dict,'zone')
rucr= extract_values(data_dict,'rucr')
rucr_ext_d = extract_values(data_dict,'rucr_ext_d')
# create df by zipping lists together
df = pd.DataFrame(list(zip(OBJECTID,evt_rt,evt_rin,evt_date,location,zone,rucr,rucr_ext_d)),columns=column_names)
""" perform cleaning functions """
df.columns = df.columns.str.lower()
df = df.apply(lambda x: x.astype(str).str.lower())
# convert evt_date column to timestamp
df['evt_date'] = pd.to_datetime(df['evt_date'])
# rename columns to make them move intuitive
df.rename(columns={'rucr_ext_d':'description',
'evt_date':'date'},
inplace=True)
""" TODO: update project, dataset & table name in query
find max objectID in existing BQ table to determine new records to append """
query = """
SELECT max(object_id) as object_id
FROM `project_name.dataset_name.table_name`
"""
query_job = client.query(query).result().to_dataframe()
max_objectid = query_job['objectid'][0]
"""query new DF to find records that aren't already in existing df """
new_records = df.query('objectid > @max_objectid')
table_id = "{}.{}".format(DATASET_NAME, TABLE_NAME)
method = 'append'
job = client.load_table_from_dataframe(
new_records, table_id, method
)
# Wait for async job to finish
job.result()
# TODO: Add Error handling here. The return message can be used to trigger other functions,
# for example - on FAILURE, send Slack notification
return 'SUCCESS'
if __name__ == '__main__':
## Use this to run locally (not necessary for cloud function)
extract_hfx_crime_data(None)
| [
"erinhayes@Erins-MacBook-Pro.local"
] | erinhayes@Erins-MacBook-Pro.local |
2a9081357518966565fbc2eb8aae7d9e6ed4aaba | 40374b6eaec92fa473b3351d0109836f80eae430 | /cranfield_testdata/ttdata.py | ca178540bf6b843abbf30d91b00ead35427c2ccc | [] | no_license | Lnna/ability | 7e0d5ce0510ae10c11254c93edeb64c3d72510aa | a2a0caf0defc3763560005189a126b2be42f2b86 | refs/heads/master | 2020-03-18T03:11:29.520050 | 2019-03-07T07:22:04 | 2019-03-07T07:22:04 | 134,227,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | from src.com.zelkova.db import DButil
def __fetch_origin():
db=DButil.DB("10.144.5.121",3306,"web_crawler","curidemo","web_crawler",charset='utf8')
res=db.fetch_all("select title,content from pages where update_time>='2018-07-01 00:00:00'")
return res
def __insert_tt(res:list):
if res:
# db=DButil.DB("10.108.233.216",3306,"xxb","mysql","nlp_test",charset='utf8')
db=DButil.DB("10.108.233.216",3306,"xxb","mysql","nlp_test",charset='utf8')
db.delete(" delete from pages ")
db.update("insert into pages(title,content) values(%s,%s)",res)
def fetch_corpus(content='title'):
db = DButil.DB("10.108.233.216", 3306, "xxb", "mysql", "nlp_test", charset='utf8')
if content=='title':
res=db.fetch_all("select title from pages")
else:
res=db.fetch_all("select content from pages")
return res
if __name__=="__main__":
__insert_tt(__fetch_origin())
| [
"lnn@lnn-X411"
] | lnn@lnn-X411 |
250f31b763d02f2dba25473438a3e6fdcc71ebc9 | 55a9b1b294d5a402c63848f9f7386e3bf93645da | /docker/src/clawpack-5.3.1/pyclaw/src/petclaw/tests/test_io.py | 56c544ed1ff6d6cd39629552d19d32f8513d88d9 | [
"LicenseRef-scancode-public-domain",
"CC-BY-4.0",
"MIT",
"BSD-3-Clause"
] | permissive | geohackweek/visualization | b606cfade5d31f59cc38602df05930aed6e19b17 | 5d29fa5b69d69ee5c18ffaef2d902bd51f5807c8 | refs/heads/gh-pages | 2021-01-21T13:34:44.622039 | 2019-09-06T23:28:08 | 2019-09-06T23:28:08 | 68,648,198 | 11 | 13 | NOASSERTION | 2019-09-06T23:28:09 | 2016-09-19T21:27:33 | Jupyter Notebook | UTF-8 | Python | false | false | 509 | py | from clawpack import pyclaw
from clawpack import petclaw
import os
class PetClawIOTest(pyclaw.IOTest):
@property
def solution(self):
return petclaw.Solution()
@property
def file_formats(self):
return ['hdf5']
@property
def this_dir(self):
return os.path.dirname(os.path.abspath(__file__))
@property
def test_data_dir(self):
return os.path.join(self.this_dir, '../../pyclaw/tests/test_data')
def test_io_from_binary(self):
return | [
"arendta@uw.edu"
] | arendta@uw.edu |
92d1db39462651296a9f6cb842c88db3256465a3 | aa490f6d0562edb70560716a6de0982e1fe852b4 | /2017/tree/Flatten_Binary_Tree_to_Linked_List.py | d653434904bf9bda884c0b0deb6825aacbcb2886 | [] | no_license | buhuipao/LeetCode | 30bb9293d4e2db2c2020bc2e0b583ec216ce9974 | 9687f8e743a8b6396fff192f22b5256d1025f86b | refs/heads/master | 2023-05-26T05:45:08.742410 | 2023-05-22T01:11:10 | 2023-05-22T01:11:10 | 94,600,745 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | # _*_ coding: utf-8 _*_
'''
Given a binary tree, flatten it to a linked list in-place.
For example,
Given
1
/ \
2 5
/ \ \
3 4 6
The flattened tree should look like:
1
\
2
\
3
\
4
\
5
\
6
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
先序遍历
"""
if not root:
return root
stack = [root]
pre = TreeNode(None)
while stack:
node = stack.pop()
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
pre.right = node
pre.left = None
pre = node
| [
"chenhua22@outlook.com"
] | chenhua22@outlook.com |
27b05a4a438b2511c23bed2d0bad0d8de56a97fc | 8a3a180e23db62df84f6d8a600dce371846cad65 | /french/urls.py | 858d188b3c17ca5dfdacead758b80fed86986d60 | [] | no_license | johnofkorea/kimoujoon_com | 2cca288adfe677f6292d1b7197cfb65a6068e51e | 1d68ad9aac08c0b2a40e4291488ad17d70d9eb87 | refs/heads/master | 2021-07-12T18:09:21.955932 | 2019-01-01T10:00:33 | 2019-01-01T10:00:33 | 143,883,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.home),
url(r'^newsfactory/(?P<year_month>\S+)$', views.newsfactory),
url(r'^thought/(?P<yy_mm_dd>\S+)$', views.thought),
url(r'^search/$', views.search),
url(r'^contributors/$', views.contributors),
url(r'^contributor/(?P<user_id>\S+)$', views.contributor),
]
| [
"john.of.korea@gmail.com"
] | john.of.korea@gmail.com |
a681e35076faabaf9130db917c45481b60e479f7 | dd83170701699d8f36ed0bb88d32d24ed0311f23 | /PugliaEventi_recommender/api/serializers.py | 19d678c4b74486f9c41978e1e3ffecfa4b9920fe | [] | no_license | gperniola/PugliaEventiDocker | f6519f987a529aa85951fef8888498c758d6dada | dc9755addfd7696bb98dce523660447d853cee71 | refs/heads/master | 2022-12-11T12:44:41.626753 | 2019-10-23T15:44:40 | 2019-10-23T15:44:40 | 170,752,839 | 0 | 0 | null | 2022-12-08T01:38:26 | 2019-02-14T20:21:49 | TSQL | UTF-8 | Python | false | false | 7,221 | py | from rest_framework import serializers
from .models import Utente, Place, Event, Distanza, PrevisioniEventi, PrevisioniComuni, Valutazione, Sperimentazione
from datetime import datetime
class UtenteSerializer(serializers.ModelSerializer):
class Meta:
model = Utente
fields = ('id', 'username', 'location', 'first_configuration')
class PlaceSerializer(serializers.ModelSerializer):
#distanza = serializers.SerializerMethodField('get_distanza_AB')
#centro_distanza = serializers.SerializerMethodField('get_centro')
tags = serializers.SerializerMethodField('get_taglist')
eventi_programmati = serializers.SerializerMethodField('get_eventi')
valutato = serializers.SerializerMethodField('get_is_valutato')
class Meta:
model = Place
fields = ('placeId', 'name','tipo', 'location', 'indirizzo', 'location', 'telefono' ,'sitoweb', 'chiusura', 'link', 'tags', 'eventi_programmati', 'valutato')
def get_centro(self,obj):
user_location = self.context.get("user_location")
if user_location != '':
return user_location
else: return ''
def get_distanza_AB(self,obj):
user_location = self.context.get("user_location")
place_location = obj.location
if user_location != '' and user_location != place_location:
distanza_AB = Distanza.objects.filter(cittaA=user_location, cittaB=place_location)
return distanza_AB[0].distanza
else: return ''
def get_taglist(self,obj):
tags = []
if obj.informale == 1: tags.append('informale')
if obj.raffinato == 1: tags.append('raffinato')
if obj.benessere == 1: tags.append('benessere')
if obj.bere == 1: tags.append('bere')
if obj.mangiare == 1: tags.append('mangiare')
if obj.dormire == 1: tags.append('dormire')
if obj.goloso == 1: tags.append('goloso')
if obj.libri == 1: tags.append('libri')
if obj.romantico == 1: tags.append('romantico')
if obj.museo == 1: tags.append('museo')
if obj.spiaggia == 1: tags.append('spiaggia')
if obj.freeEntry == 1: tags.append('free entry')
if obj.arte == 1: tags.append('arte')
if obj.avventura == 1: tags.append('avventura')
if obj.cinema == 1: tags.append('cinema')
if obj.cittadinanza == 1: tags.append('cittadinanza')
if obj.musica_classica == 1: tags.append('musica classica')
if obj.geek == 1: tags.append('geek')
if obj.bambini == 1: tags.append('bambini')
if obj.folklore == 1: tags.append('folklore')
if obj.cultura == 1: tags.append('cultura')
if obj.jazz == 1: tags.append('jazz')
if obj.concerti == 1: tags.append('concerti')
if obj.teatro == 1: tags.append('teatro')
if obj.vita_notturna == 1: tags.append('vita notturna')
return tags
def get_eventi(self,obj):
eventi_programmati = []
date_today = datetime.today().date()
for ev in Event.objects.filter(place=obj.name, date_to__gte=date_today):
eventi_programmati.append({"titolo":ev.title,"link":ev.link,"data_da":ev.date_from,"data_a":ev.date_to})
return eventi_programmati
def get_is_valutato(self,obj):
user_id = self.context.get("user_id")
if Valutazione.objects.filter(place = obj.placeId, user=user_id).exists():
return True
else:
return False
class PrevisioniComuniSerializer(serializers.ModelSerializer):
stagione = serializers.SerializerMethodField('get_stagione_giorno')
condizioni = serializers.SerializerMethodField('get_condizioni_giorno')
temp = serializers.SerializerMethodField('get_temp_giorno')
vento = serializers.SerializerMethodField('get_vento_giorno')
class Meta:
model = PrevisioniComuni
fields=['data','stagione', 'condizioni', 'temp', 'vento']
def get_condizioni_giorno(self,obj):
if obj.sereno == 1: return 'sereno'
if obj.coperto == 1: return 'coperto'
if obj.poco_nuvoloso == 1: return 'poco nuvoloso'
if obj.pioggia == 1: return 'pioggia'
if obj.temporale == 1: return 'temporale'
if obj.nebbia == 1: return 'nebbia'
if obj.neve == 1: return 'neve'
def get_stagione_giorno(self,obj):
if obj.inverno == 1: return 'inverno'
if obj.primavera == 1: return 'primavera'
if obj.estate == 1: return 'estate'
if obj.autunno == 1: return 'autunno'
def get_temp_giorno(self,obj):
return int(obj.temperatura)
def get_vento_giorno(self,obj):
return int(obj.velocita_vento)
class PrevisioniEventiSerializer(serializers.ModelSerializer):
bollettino = PrevisioniComuniSerializer(source='idprevisione')
class Meta:
model = PrevisioniEventi
fields = ['bollettino']
depth=3
class EventSerializer(serializers.ModelSerializer):
titolo = serializers.SerializerMethodField('get_title')
data_da = serializers.SerializerMethodField('get_date_from')
data_a = serializers.SerializerMethodField('get_date_to')
posto_nome = serializers.SerializerMethodField('get_place')
popolarita = serializers.SerializerMethodField('get_popularity')
tags = serializers.SerializerMethodField('get_taglist')
distanza = serializers.SerializerMethodField('get_distanza_AB')
centro_distanza = serializers.SerializerMethodField('get_centro')
previsioni_evento = PrevisioniEventiSerializer(many=True, read_only=True)
class Meta:
model = Event
fields = ('eventId', 'titolo', 'descrizione', 'link', 'posto_nome', 'posto_link', 'comune', 'data_da' ,'data_a', 'popolarita', 'tags', 'distanza', 'centro_distanza', 'previsioni_evento')
depth=3
def get_title(self,obj):
return obj.title
def get_date_from(self,obj):
return obj.date_from
def get_date_to(self,obj):
return obj.date_to
def get_place(self,obj):
return obj.place
def get_popularity(self,obj):
return int(obj.popularity)
def get_centro(self,obj):
user_location = self.context.get("user_location")
if user_location != '':
return user_location
else: return ''
def get_distanza_AB(self,obj):
user_location = self.context.get("user_location")
event_location = obj.comune
if user_location != '' and user_location != event_location:
distanza_AB = Distanza.objects.filter(cittaA=user_location, cittaB=event_location)
return distanza_AB[0].distanza
else: return ''
def get_taglist(self,obj):
tags = []
if obj.free_entry == 1: tags.append('free entry')
if obj.arte == 1: tags.append('arte')
if obj.avventura == 1: tags.append('avventura')
if obj.cinema == 1: tags.append('cinema')
if obj.cittadinanza == 1: tags.append('cittadinanza')
if obj.musica_classica == 1: tags.append('musica classica')
if obj.geek == 1: tags.append('geek')
if obj.bambini == 1: tags.append('bambini')
if obj.folklore == 1: tags.append('folklore')
if obj.cultura == 1: tags.append('cultura')
if obj.jazz == 1: tags.append('jazz')
if obj.concerti == 1: tags.append('concerti')
if obj.teatro == 1: tags.append('teatro')
if obj.vita_notturna == 1: tags.append('vita notturna')
if obj.featured == 1: tags.append('featured')
#prendi tags anche dal posto
#if obj.place:
# p = Place.objects.get(name=obj.place)
# for x in p.labels().split(','):
# x.strip()
# tags.append(x)
# tags.pop()
# tags = list(set(tags))
return tags
class ValutazioneSerializer(serializers.ModelSerializer):
class Meta:
model = Valutazione
fields = ('mood', 'companionship', 'place','rating')
class SperimentazioneSerializer(serializers.ModelSerializer):
class Meta:
model = Sperimentazione
fields = '__all__'
| [
"g.perniola22@gmail.com"
] | g.perniola22@gmail.com |
d3154802016831a9607ed823f19441a46c3f2353 | e1f67984994dac861dfd03a9218c5afc37076037 | /venv/badtouch-develop/setup.py | 0d597e14961fe652d7d737e8188ec7ff1e4dd00f | [] | no_license | rishikanthc/Basklitball-mlh-prime | ba04c4732de4e1103798c0375184975db3461a06 | dde506c58d5fd882b50786d29194a2c20721f514 | refs/heads/master | 2021-01-09T20:45:07.297711 | 2016-08-08T22:48:18 | 2016-08-08T22:48:18 | 65,244,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,715 | py | # -*- coding: utf-8 -*-
import codecs
from setuptools import setup, find_packages
install_requires = open("requirements.txt").readlines()
test_requires = []
for line in open("requirements-test.txt").readlines():
if line.strip() and not line.startswith("-r"):
test_requires.append(line.strip())
long_description = codecs.open('README.rst', "r", "utf-8").read()
setup(
name='badtouch',
version="0.1",
description='A friendly python library for the Bose SoundTouch (R) API',
long_description=long_description,
author="Christian Assing",
author_email="chris@ca-net.org",
url="http://github.com/chassing/badtouch/",
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'test': test_requires,
},
license="BSD",
platforms='any',
keywords='nidhogg',
classifiers=[
# Picked from
# http://pypi.python.org/pypi?:action=list_classifiers
# "Development Status :: 1 - Planning",
"Development Status :: 2 - Pre-Alpha",
# "Development Status :: 3 - Alpha",
# "Development Status :: 4 - Beta",
# "Development Status :: 5 - Production/Stable",
# "Development Status :: 6 - Mature",
# "Development Status :: 7 - Inactive",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Topic :: Multimedia :: Sound/Audio",
],
test_suite='tests',
)
| [
"rishikanth@dyn-160-39-142-100.dyn.columbia.edu"
] | rishikanth@dyn-160-39-142-100.dyn.columbia.edu |
372a1ab30f84b9a8c5f673b6c1f21e2219d23e8e | 66a588eaff6714569b918eb6d898d56aad06227e | /ovirt_list-vm.py | 59afcf8714e1360d63735312f9e9539f20bafea4 | [] | no_license | Fabian1976/python-hypervisor-api | 19e3b58c518550814348b4cb574a44f83aac7e87 | eda7068a1f765644f5a32aa9b7733c5c56698689 | refs/heads/master | 2020-12-25T05:45:56.431215 | 2015-02-11T13:06:00 | 2015-02-11T13:06:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | #! /usr/bin/python
#this script requires ovirt-engine-sdk-python
from ovirtsdk.api import API
from ovirtsdk.xml import params
from time import sleep
def main():
URL='https://<ovirt-host>:443/api'
USERNAME='admin@internal'
PASSWORD='secretpass'
api = API(url=URL, username=USERNAME, password=PASSWORD,insecure=True)
vm_list=api.vms.list()
for vm in vm_list:
print vm.name
api.disconnect()
if __name__ == '__main__':
main()
| [
"jensdepuydt@gmail.com"
] | jensdepuydt@gmail.com |
336c253c1eb34cbc6702a52383fca4b3c9ac0f56 | 00c13d6096bb216a88efc9d623fa36acfd36b2e0 | /restuygulama/urls.py | 141c8ab27678755af0db417eebea221f5ed2f946 | [] | no_license | bunyadahmadli/django_rest_framework_example | 7d0fa0e735bcca24220b3332a319d3113dc52c67 | 33a99db1d2df6ee98760ff550c2c651a036c25f1 | refs/heads/master | 2022-10-31T21:46:26.042952 | 2020-06-17T09:21:44 | 2020-06-17T09:21:44 | 272,548,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | """restuygulama URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from rest_framework import routers
from app1 import views
router = routers.DefaultRouter()
router.register(r'yazarlar', views.YazarViewSet)
router.register(r'kitaplar', views.KitapViewSet)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^', include(router.urls))
] | [
"bunyad.ahmadli@gmail.com"
] | bunyad.ahmadli@gmail.com |
265a5e2c314e412b545f2390b981e49d3b9d7a25 | 09ae3f372d1000f118ad80874870ae420a4be66f | /scikit-learn-master/examples/compose/plot_digits_pipe.py | c5b0fb2a136094f0d16c180883cdcc3175896a9d | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | lqkweb/learnMLflow | 998f80c3828879b8d542125bc95c6345b8e9b29a | 13c5decaebba95b1b90f92021be35e343b4764af | refs/heads/master | 2022-10-18T06:17:23.584172 | 2019-01-18T09:51:38 | 2019-01-18T09:51:38 | 166,145,472 | 2 | 0 | Apache-2.0 | 2022-09-30T18:26:17 | 2019-01-17T02:22:29 | Python | UTF-8 | Python | false | false | 2,395 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# Define a pipeline to search for the best combination of PCA truncation
# and classifier regularization.
logistic = SGDClassifier(loss='log', penalty='l2', early_stopping=True,
max_iter=10000, tol=1e-5, random_state=0)
pca = PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Parameters of pipelines can be set using ‘__’ separated parameter names:
param_grid = {
'pca__n_components': [5, 20, 30, 40, 50, 64],
'logistic__alpha': np.logspace(-4, 4, 5),
}
search = GridSearchCV(pipe, param_grid, iid=False, cv=5)
search.fit(X_digits, y_digits)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
# Plot the PCA spectrum
pca.fit(X_digits)
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax0.plot(pca.explained_variance_ratio_, linewidth=2)
ax0.set_ylabel('PCA explained variance')
ax0.axvline(search.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
ax0.legend(prop=dict(size=12))
# For each number of components, find the best classifier results
results = pd.DataFrame(search.cv_results_)
components_col = 'param_pca__n_components'
best_clfs = results.groupby(components_col).apply(
lambda g: g.nlargest(1, 'mean_test_score'))
best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score',
legend=False, ax=ax1)
ax1.set_ylabel('Classification accuracy (val)')
ax1.set_xlabel('n_components')
plt.tight_layout()
plt.show()
| [
"leiqk@dxy.cn"
] | leiqk@dxy.cn |
1cd72e89e784bd4076712d2ec310a468dc2d12d0 | f598881a0679dc4af35b6f92f99257fde1119f1a | /Solutions/Problem 1.py | c4b822d35e59a315baf175ecb8b20b6fd48a9196 | [] | no_license | Ashton-Sidhu/Daily-Coding-Problems | 05b0b6424bde946df776ee29b72ab7d03cedab8a | 10ddf98cc2a8c17e3bb6df0c91dbfcc88786230c | refs/heads/master | 2020-03-20T23:25:53.197547 | 2019-01-21T16:25:17 | 2019-01-21T16:25:17 | 137,845,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | #QUESTION:
#Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
#For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
#SOLUTION:
def AddToK(li, k):
#Go through each element and check if the compliment of itself has been seen before and store it in a set.
#Lookup in Set is O(1), Iterate through list is O(N)
#Complexity is O(N)
nummap = set([])
for item in li:
#Calculate compliment of number and if it exists in set return true.
compliment = k - item
if item in nummap:
return True
nummap.add(compliment)
return False
def main():
print(AddToK([8], 8))
print(AddToK([1,2,4,4], 8))
print(AddToK([1,2,4,5], 8))
print(AddToK([1,2,4,6,7], 8))
print(AddToK([], 8))
if __name__ == '__main__':
main()
| [
"ashton.sidhu1994@gmail.com"
] | ashton.sidhu1994@gmail.com |
7a9326929a7f53b2662c7335eb71b4aa4bf2e76a | 8ef21c0efa61d77beb7633643f38704d980dba2a | /docker/worker/docker_run.py | f8fb42d20fbbb36747ce8b2106c3e2e96fa664ed | [
"MIT"
] | permissive | nachtmaar/androlyze | 9d813c61f25d9788ff205c3b43f2f5e76f5ec82c | cfdbf5ddc7da0eff8554de1ac89e3acf8142100d | refs/heads/master | 2021-01-10T21:49:34.080866 | 2015-06-28T22:31:18 | 2015-06-28T22:31:18 | 38,138,174 | 18 | 4 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | #!/usr/bin/env python
# encoding: utf-8
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
import sys
# DO not move this line under androlyze specific imports!
sys.path.append(".")
from androlyze.docker.start_worker import start_workers
start_workers() | [
"schmidt89@mathematik.uni-marburg.de"
] | schmidt89@mathematik.uni-marburg.de |
df6757eea70ca6beff741d0f7edfea3f0b34882d | e6d4c0b873e84dbfc4ac006338a24e34712bf32b | /test/psf/psf_calib_plot.py | 1dcbaaf4d49854160eaf2ea55ab90f05106a9099 | [] | no_license | streeto/SpecMorph | d965f865806ebab70ce90155be37196cf7839f74 | b21248d112f01a621b80ce28e014f39b20d921f9 | refs/heads/master | 2020-05-16T22:25:17.550976 | 2015-08-25T12:19:09 | 2015-08-25T12:19:09 | 10,758,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,256 | py | # -*- coding: utf-8 -*-
'''
Created on 10/09/2014
@author: andre
'''
import numpy as np
from pylab import normpdf
import matplotlib.pyplot as plt
import glob
from os import path
import sys
################################################################################
def plot_setup():
plotpars = {'legend.fontsize': 8,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.fontsize': 10,
'axes.titlesize': 12,
'lines.linewidth': 0.5,
'font.family': 'Times New Roman',
# 'figure.subplot.left': 0.08,
# 'figure.subplot.bottom': 0.08,
# 'figure.subplot.right': 0.97,
# 'figure.subplot.top': 0.95,
# 'figure.subplot.wspace': 0.42,
# 'figure.subplot.hspace': 0.1,
'image.cmap': 'GnBu',
}
plt.rcParams.update(plotpars)
plt.ioff()
################################################################################
func = sys.argv[2] # 'Kolmogorov', 'Moffat, 'Gaussian'
beta4 = len(sys.argv) > 3 and sys.argv[3] == 'beta4'
if beta4:
name = '%sBeta4' % (func)
else:
name = func
galaxiesV500 = glob.glob(path.join(sys.argv[1], '%s_[a-zA-Z0-9]*.[0-9]*.V500.v1.5.PSF.dat' % name))
print galaxiesV500
param_dtype = [('lambda', 'float64'), ('I_0', 'float64'),
('fwhm', 'float64'), ('beta', 'float64'),
('x0', 'float64'), ('y0', 'float64'),
('PA', 'float64'), ('ell', 'float64'),
('good', 'float64'), ('flag', 'bool'), ('chi2', 'float64')]
nlambdaV500 = 10
nlambdaV1200 = 30
fwhmV500 = np.ma.empty((len(galaxiesV500), nlambdaV500))
betaV500 = np.ma.empty((len(galaxiesV500), nlambdaV500))
x0V500 = np.ma.empty((len(galaxiesV500), nlambdaV500))
y0V500 = np.ma.empty((len(galaxiesV500), nlambdaV500))
ellV500 = np.ma.empty((len(galaxiesV500), nlambdaV500))
chi2V500 = np.ma.empty((len(galaxiesV500), nlambdaV500))
#===============================================================================
# fwhmV1200 = np.ma.empty((len(galaxiesV1200), nlambdaV1200))
# betaV1200 = np.ma.empty((len(galaxiesV1200), nlambdaV1200))
# x0V1200 = np.ma.empty((len(galaxiesV1200), nlambdaV1200))
# y0V1200 = np.ma.empty((len(galaxiesV1200), nlambdaV1200))
# ellV1200 = np.ma.empty((len(galaxiesV1200), nlambdaV1200))
#===============================================================================
for i, galaxy in enumerate(galaxiesV500):
p = np.genfromtxt(galaxy, dtype=param_dtype)
wlV500 = p['lambda']
mask = p['flag'] | (p['good'] < 0.6)
fwhmV500[i] = p['fwhm']
fwhmV500[i, mask] = np.ma.masked
betaV500[i] = p['beta']
betaV500[i, mask] = np.ma.masked
x0V500[i] = p['x0']
x0V500[i, mask] = np.ma.masked
y0V500[i] = p['y0']
y0V500[i, mask] = np.ma.masked
ellV500[i] = p['ell']
ellV500[i, mask] = np.ma.masked
chi2V500[i] = p['chi2']
chi2V500[i, mask] = np.ma.masked
#===============================================================================
# for i, galaxy in enumerate(galaxiesV1200):
# cube = 'psf/%s.%s.v1.5.PSF.dat' % (galaxy, 'V1200')
# p = np.genfromtxt(cube, dtype=param_dtype)
# wlV1200 = p['lambda']
# mask = p['flag'] | (p['good'] < 0.7)
# fwhmV1200[i] = p['fwhm']
# fwhmV1200[i, mask] = np.ma.masked
# betaV1200[i] = p['beta']
# betaV1200[i, mask] = np.ma.masked
# x0V1200[i] = p['x0']
# x0V1200[i, mask] = np.ma.masked
# y0V1200[i] = p['y0']
# y0V1200[i, mask] = np.ma.masked
# ellV1200[i] = p['ell']
# ellV1200[i, mask] = np.ma.masked
#===============================================================================
fwhmV500_b = (fwhmV500 * (1.0 - ellV500))
wlmin = wlV500.min()
wlmax = wlV500.max()
def getstats1(p, wei):
p_wei = np.sum(p * wei)
p_var = np.sum((p - p_wei)**2 * wei)
p_std = np.sqrt(p_var)
return p_wei, p_std
def getstats(p, wei):
p_wei = np.sum(p * wei, axis=0)
p_var = np.sum((p - p_wei)**2 * wei, axis=0)
p_std = np.sqrt(p_var)
return p_wei, p_std
wei = np.exp(-0.5 * chi2V500 / chi2V500.min())
wei /= np.sum(wei, axis=0)
wei1 = wei / wei.sum()
fwhmV500_wei, fwhmV500_std = getstats(fwhmV500, wei)
fwhmbV500_wei, fwhmbV500_std = getstats(fwhmV500_b, wei)
betaV500_wei, betaV500_std = getstats(betaV500, wei)
fwhmV500_wei1, fwhmV500_std1 = getstats1(fwhmV500, wei1)
fwhmbV500_wei1, fwhmbV500_std1 = getstats1(fwhmV500_b, wei1)
betaV500_wei1, betaV500_std1 = getstats1(betaV500, wei1)
plot_setup()
width_pt = 448.07378
width_in = width_pt / 72.0 * 0.9
fig = plt.figure(figsize=(width_in, width_in * 1.0))
gs = plt.GridSpec(2, 1, height_ratios=[1.0, 1.0])
ax = plt.subplot(gs[0])
ax.plot(wlV500, fwhmV500_wei, 'ko-', mfc='none')
ax.plot(wlV500, fwhmV500_wei - fwhmV500_std, 'k--')
ax.plot(wlV500, fwhmV500_wei + fwhmV500_std, 'k--')
#ax.plot(wlV500, fwhmbV500_wei, ls='-', color='pink')
#ax.plot(wlV500, fwhmbV500_wei - fwhmbV500_std, ls='--', color='pink')
#ax.plot(wlV500, fwhmbV500_wei + fwhmbV500_std, ls='--', color='pink')
ax.set_ylabel(r'FWHM $[\mathrm{arcsec}]$')
if func != 'Moffat' or beta4:
ax.set_xlabel(r'Comprimento de onda $[\AA]$')
else:
ax.set_xticklabels([])
ax.set_ylim(0.0, 4.0)
#ax.set_xlim(wlmin, wlmax)
ax.set_xlim(3700, 7500)
if func == 'Moffat' and not beta4:
plt.subplot(212)
plt.plot(wlV500, betaV500_wei, 'r-')
plt.plot(wlV500, betaV500_wei - betaV500_std, 'r--')
plt.plot(wlV500, betaV500_wei + betaV500_std, 'r--')
plt.ylabel(r'$\beta$')
plt.xlabel(r'wavelength $[\AA]$')
plt.ylim(0.0, 4.0)
#plt.xlim(wlmin, wlmax)
plt.xlim(3700, 7500)
else:
nsigma = 2.5
nbin = 10
ax = plt.subplot(gs[1])
r = [fwhmV500_wei1 - nsigma * fwhmV500_std1, fwhmV500_wei1 + nsigma * fwhmV500_std1]
ax.hist(fwhmV500.compressed(), weights=wei.compressed(), bins=nbin, range=r, normed=True,
color='k', histtype='step')
x = np.linspace(r[0], r[1], 100)
ax.plot(x, normpdf(x, fwhmV500_wei1, fwhmV500_std1), 'k--')
ax.vlines(fwhmV500_wei1, ymin=0, ymax=2, color='k', linestyles='--')
ax.text(fwhmV500_wei1 - 0.05, 1.4, r'$\mathrm{FWHM}\ =\ %.3f\,^{\prime\prime}\pm\, %.3f$' % (fwhmV500_wei1, fwhmV500_std1), ha='right')
#ax.set_xlim(r[0], r[1])
ax.set_xlim(1.5, 4)
ax.set_ylim(0, 1.6)
ax.set_ylabel(r'Densidade de probabilidade')
ax.set_xlabel(r'FWHM $[\mathrm{arcsec}]$')
if func == 'Moffat' and beta4:
plt.suptitle(u'Perfil de Moffat - estrelas de calibração')
elif func == 'Moffat':
plt.suptitle(r'%s | $\beta=%.3f \pm %.3f$ | $\mathrm{FWHM}=%.3f \pm %.3f$' % ((func,) + getstats1(betaV500, wei1) + getstats1(fwhmV500, wei1)))
else:
plt.suptitle(r'%s | $\mathrm{FWHM}=%.3f \pm %.3f$' % ((func,) + getstats1(fwhmV500, wei1)))
gs.tight_layout(fig, rect=[0, 0, 1, 0.97])
plt.savefig(path.join(sys.argv[1], '%s_PSF_all.pdf' % name))
print 'Summary:'
print 'fwhm(a) = %.3f +- %.3f' % (fwhmV500_wei1, fwhmV500_std1)
print 'fwhm(b) = %.3f +- %.3f' % (fwhmbV500_wei1, fwhmbV500_std1)
if func == 'Moffat' and not beta4:
print 'beta = %.3f +- %.3f' % (betaV500_wei1, betaV500_std1)
print 'ell = %.3f +- %.3f' % getstats1(ellV500, wei1)
| [
"streetomon@gmail.com"
] | streetomon@gmail.com |
12e211c6a0b585809fd340585465804edab79022 | f43476c365eb0f87f61f1ffa81ef57e8924f6294 | /src/smartbin.py | d2c5a734b5c509842cbbdc1ccf5934ce3bf1f32c | [] | no_license | DeltaC6/RPI_SmartBin | e4476ab9701b36bb1ee6171660ab90643f82d7d8 | 50d68d17472fd0a38fed6ae0ad56a7b1b595125c | refs/heads/master | 2023-03-19T03:25:39.019683 | 2021-03-15T12:26:01 | 2021-03-15T12:26:01 | 317,552,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,903 | py | ################################################################################
##
# @author Syed Asad Amin
# @date Dec 1st, 2020
# @file smartbin.py
# @version v1.0.0
# | v1.0.0 -> Added the smartbin class.
# Integrated the GPS and ultrasonic sensors.
# Added Ubidots request functions.
# | v1.0.1 -> Added the API for server communication.
#
# @note This is a program written in python to implement Smart Bin project.
#
# This project uses a GPS module and an ultrasonic module to get
# locaiton and status of the BIN respectively.
################################################################################
import RPi.GPIO as GPIO
import serial
import json
import socket
import time
import threading
class SmartBin:
HOST = 'industrial.api.ubidots.com'
PORT = 80
SERIAL_PORT = '/dev/ttyS0'
SERIAL_BAUD = 115200
LED_PIN = 21
TRIG_PIN = 16
ECHO_PIN = 20
BIN_DEPTH = 90
def __init__(self):
print('[INFO] Initializing components.')
# Variables
self.isRunning = True
self.ledState = False
self.lat = 0.0 # This is the current latitude of BIN
self.lng = 0.0 # This is the current longitide of BIN
self.status = 0.0 # This is the filled status of BIN in %
# Initializations
self.InitGPIO()
self.InitSerial()
def InitGPIO(self):
try:
GPIO.setmode(GPIO.BCM) # BCM config
GPIO.setup(self.LED_PIN, GPIO.OUT) # LED as output
GPIO.setup(self.TRIG_PIN, GPIO.OUT) # TRIG as output
GPIO.setup(self.ECHO_PIN, GPIO.IN) # ECHO as input
GPIO.output(self.LED_PIN, self.ledState)
GPIO.output(self.TRIG_PIN, GPIO.LOW)
self.blink()
except Exception as e:
print('[ERROR] Could not config gpio')
print(e)
GPIO.cleanup()
self.isRunning = False
def InitSerial(self):
try:
self.ser = serial.Serial(self.SERIAL_PORT, self.SERIAL_BAUD)
except Exception as e:
print('[ERROR] Could not open serial.')
print(e)
self.ser.close()
self.isRunning = False
def blink(self):
try:
self.t = threading.Timer(1.0, self.blink)
self.t.setName('blinker')
self.ledState = not self.ledState
GPIO.output(self.LED_PIN, self.ledState)
self.t.start()
except Exception as e:
print('[ERROR] Blinker thread error.')
print(e)
def run(self):
try:
print('[INFO] Running application')
while self.isRunning:
# Acquiring device locaiotn and status
self.getLocaiton()
self.getStatus()
print('[INFO] DATA: {}, {}, {}'.format(self.lat, self.lng, self.status))
# Sending data to server
self.uploadData()
# Delay
time.sleep(10.0)
except KeyboardInterrupt:
print('[WARN] Force colsed application')
self.isRunning = False
def getLocaiton(self):
try:
data = self.ser.readline()
packets = data.split('\n')
for packet in packets:
if '$GPRMC' in packet:
contents = packet.split(',')
rawlat = float(contents[3])
rawlng = float(contents[5])
self.lat = self.__convertRaw(rawlat)
self.lng = self.__convertRaw(rawlng)
else:
continue
except Exception as e:
print('[ERROR] Failed to acquire GPS location.')
print(e)
def __convertRaw(self, val):
a = int(val / 100)
b = val - (a * 100)
return a + (b / 60)
def getStatus(self):
try:
GPIO.output(self.TRIG_PIN, GPIO.HIGH)
time.sleep(0.00001) # 10us pulse
GPIO.output(self.TRIG_PIN, GPIO.LOW)
while GPIO.input(self.ECHO_PIN) == 0:
startTime = time.time()
while GPIO.input(self.ECHO_PIN) == 1:
stopTime = time.time()
deltaT = stopTime - startTime
depth = round(deltaT * (34300 / 2.0), 2)
self.status = (1.0 - (depth / self.BIN_DEPTH)) * 100.0
except Exception as e:
print('[ERROR] Failed to acquire bin status.')
print(e)
def uploadData(self):
jsonStr = self.createJson()
postStr = self.createPacket(jsonStr)
self.sendData(postStr)
def createJson(self):
timestamp = int(time.time() * 1000)
msg = {
"position": {
"value": 1,
"timestamp": timestamp,
"context": {
"lat": self.lat,
"lng": self.lng
}
},
"status": {
"value": self.status,
"timestamp": timestamp,
"context": {
"lat": self.lat,
"lng": self.lng
}
}
}
return json.dumps(msg)
def createPacket(self, data):
DEVICE_LABEL = 'sb1'
USER_AGENT = 'RPI/3'
TOKEN = ''
postStr = "POST /api/v1.6/devices/{} HTTP/1.1\r\n".format(DEVICE_LABEL)
postStr += "Host: {}\r\n".format(self.HOST)
postStr += "User-Agent: {}\r\n".format(USER_AGENT)
postStr += "X-Auth-Token: {}\r\n".format(TOKEN)
postStr += "Content-Type: application/json\r\n"
postStr += "Content-Length: {}\r\n\r\n".format(len(data))
postStr += data + "\r\n"
return postStr
def sendData(self, msg):
try:
serverAddress = (self.HOST, self.PORT)
sendBuffer = msg.encode()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(serverAddress)
s.sendall(sendBuffer)
recvBuffer = s.recv(1024)
recv = recvBuffer.decode()
if '200 OK' in recv:
print('[INFO] Data uploaded to server.')
else:
print('[ERROR] Could not upload data to server.')
print(recv)
except Exception as e:
print('[ERROR] Server communication error.')
print(e)
def close(self):
print('[INFO] Closing application')
self.isRunning = False # Closing current thread.
self.t.cancel() # Closing blinker thread.
self.ser.close() # Closing serial.
GPIO.cleanup() # Closng GPIO
| [
"s.asad.amin@gmail.com"
] | s.asad.amin@gmail.com |
d4a1f096540f26beec481251d4c1aff36fd8c6cc | 5bb7eab7b6eae5acfd000a9ca36a2a9549a188b1 | /lab2_submission/wordseg/lstm/7/develop_set.py | 65a4cd52249dee96bf7eb0496acf149162184565 | [] | no_license | Clarity-1021/SmartSystem_Lab2 | 91893ea8f0c987450173860b8708cbee06e11585 | e5f14259b0fbe3d38de0f2dfbbe3e311b011a2da | refs/heads/main | 2023-01-30T02:07:42.661325 | 2020-12-16T16:51:34 | 2020-12-16T16:51:34 | 317,869,658 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,093 | py | [(['推', '行', '股', '份', '制', '要', '遵', '循', '经', '济', '规', '律', ',', '尊', '重', '群', '众', '意', '愿', '。', '改', '革', '的', '最', '终', '目', '的', '是', '让', '群', '众', '得', '实', '惠', ',', '使', '企', '业', '有', '活', '力', '。', '不', '切', '实', '际', '地', '强', '制', '推', '行', ',', '把', '职', '工', '完', '全', '置', '于', '被', '动', '的', '位', '置', ',', '其', '结', '果', '往', '往', '是', '事', '与', '愿', '违', '。', '股', '份', '制', '最', '具', '活', '力', '的', '细', '胞', '是', '人', ',', '职', '工', '的', '积', '极', '性', '是', '股', '份', '制', '的', '最', '大', '资', '本', '。', '应', '当', '加', '强', '政', '策', '宣', '传', '、', '典', '型', '宣', '传', '和', '热', '点', '释', '疑', ',', '和', '职', '工', '一', '起', ',', '科', '学', '地', '论', '证', ',', '科', '学', '地', '决', '策', ',', '选', '择', '最', '佳', '的', '改', '制', '方', '式', ',', '这', '样', '才', '能', '把', '改', '制', '工', '作', '做', '好', '。'], ['B', 'E', 'B', 'I', 'E', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'I', 'I', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'S', 'S', 'B', 'E', 'B', 'E', 'S', 'B', 'I', 'I', 'E', 'S', 'B', 'I', 'E', 'S', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'S', 'S', 'B', 'E', 'S', 'B', 'I', 'E', 'S', 'B', 'I', 'E', 'S', 'S', 'S', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'S', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'S', 'S', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'S']), (['农', '业', '部', '优', '质', '农', '产', '品', '开', '发', '服', '务', '中', '心', '的', '俞', '东', '平', '说', ',', '这', '是', '北', '京', '有', '史', '以', '来', '第', '一', '次', '在', '隆', '冬', '季', '节', '举', '办', '这', '样', '大', '规', '模', '的', '综', '合', '性', '花', '展', '。', '随', '着', '园', '艺', '技', '术', '的', '进', '步', ',', '今', '天', '的', '北', '京', '市', '民', '完', '全', '可', '以', '像', '广', '州', '人', '一', '样', '在', '花', '团', '锦', '簇', '中', '迎', '接', '新', '春', '。'], ['B', 'I', 'E', 'B', 'E', 'B', 'I', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'S', 'S', 'B', 'E', 'S', 'S', 'S', 'S', 'B', 'E', 'B', 'I', 'I', 'E', 'B', 'E', 'S', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'I', 'E', 'S', 'B', 'I', 'E', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'I', 'I', 'E', 'S', 'B', 'E', 'B', 'E', 'S']), (['移', '民', '多', '艰', '辛', '寄', '语', '须', '三', '思'], ['B', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'S', 'S']), (['本', '报', '北', '京', '1', '月', '2', '3', '日', '讯', '记', '者', '李', '长', '云', '报', '道', ':', '射', '击', '射', '箭', '运', '动', '走', '向', '市', '场', '研', '讨', '会', '今', '天', '在', '北', '京', '射', '击', '场', '举', '行', '。', '体', '育', '工', '作', '者', '、', '企', '业', '界', '人', '士', '和', '首', '都', '新', '闻', '记', '者', '一', '起', '探', '讨', '发', '展', '射', '击', '射', '箭', '运', '动', '的', '新', '路', '子', '。'], ['B', 'E', 'B', 'E', 'B', 'E', 'B', 'I', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'I', 'E', 'B', 'E', 'S', 'B', 'E', 'B', 'I', 'E', 'B', 'E', 'S', 'B', 'E', 'B', 'I', 'E', 'S', 'B', 'I', 'E', 'B', 'E', 'S', 'B', 'E', 'B', 'I', 'I', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'B', 'E', 'S', 'S', 'B', 'E', 'S'])]
| [
"798098945@qq.com"
] | 798098945@qq.com |
94540f69b51e297493e925141b698ebc43c6f40c | 933ba604582e8dee2640c0b8a89208cd8d723792 | /apps/camb_legacy/bin/camb_legacy_work_generator | 21d67ae0045f75bc9575c575e1b901b4872b94a5 | [] | no_license | marius311/cosmohome | 0feb2ed940eb283268e68d88dc1ed6c187ec82d5 | ea3a19ff1660c7d0058be4343a6f3639d9c1373e | refs/heads/master | 2021-07-08T16:34:01.047646 | 2020-06-10T22:54:51 | 2020-06-10T22:54:51 | 39,478,943 | 22 | 12 | null | 2016-12-15T12:30:00 | 2015-07-22T01:37:28 | PostScript | UTF-8 | Python | false | false | 395 | #!/usr/bin/env python
import sys
sys.path.append('../bin')
import boinc_path_config
from Boinc.work_generator import WorkGenerator
from Boinc.create_work import check_output
if __name__ == '__main__':
class MyWorkGenerator(WorkGenerator):
def make_jobs(self,num=1):
check_output(['../bin/camb_legacy_make_params',str(num)])
MyWorkGenerator(appname='camb').run()
| [
"mmillea@ucdavis.edu"
] | mmillea@ucdavis.edu | |
b3d3d006e1aa95b8e587502ae2d8da532a1e6b67 | bfd5c5af9fe7b42d82e0aded8b70fa3cbcec434b | /nolearn_mnist/simple_mnist_2.py | 7cb82b233047a18dd42a65c11009df82300e0c34 | [] | no_license | BassyKuo/Neural-Network | 40f53a2e6fea1bee782bcf2bef79c78070c7a3dd | fc1f846c2685109144825f1be42cbbccf2e45cdb | refs/heads/master | 2020-12-07T06:07:34.696571 | 2016-09-02T12:46:12 | 2016-09-02T12:46:12 | 66,934,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,605 | py | #!/usr/bin/env python
# Filename: simple_mnist.py
#Source: http://nbviewer.jupyter.org/github/dnouri/nolearn/blob/master/docs/notebooks/CNN_tutorial.ipynb
import matplotlib.pyplot as plt
import numpy as np
from load_mnist import load_mnist_set
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.layers import DropoutLayer
from lasagne.layers import Conv2DLayer
from lasagne.layers import MaxPool2DLayer
from lasagne.nonlinearities import softmax
from lasagne.updates import adam
from lasagne.layers import get_all_params
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import TrainSplit
from nolearn.lasagne import objective
from nolearn.lasagne import PrintLayerInfo
X, y, X_test, y_test = load_mnist_set()
## here will print the label and image
#figs, axes = plt.subplots(4, 4, figsize=(6, 6))
#for i in range(4):
# for j in range(4):
# axes[i, j].imshow(-X[i + 4 * j].reshape(28, 28),
# cmap='gray',
# interpolation='none')
# axes[i ,j].set_xticks([])
# axes[i, j].set_yticks([])
# axes[i, j].set_title("Label: {}".format(y[i + 4 * j]))
# axes[i, j].axis('off')
# try an architecture that uses a lot of convolutional layers but only one maxpooling layer.
layers2 = [
(InputLayer, {'shape': (None, X.shape[1], X.shape[2], X.shape[3])}),
(Conv2DLayer, {'num_filters': 32, 'filter_size': (3, 3)}),
(Conv2DLayer, {'num_filters': 32, 'filter_size': (3, 3)}),
(Conv2DLayer, {'num_filters': 32, 'filter_size': (3, 3)}),
(Conv2DLayer, {'num_filters': 32, 'filter_size': (3, 3)}),
(Conv2DLayer, {'num_filters': 32, 'filter_size': (3, 3)}),
(Conv2DLayer, {'num_filters': 64, 'filter_size': (3, 3)}),
(Conv2DLayer, {'num_filters': 64, 'filter_size': (3, 3)}),
(Conv2DLayer, {'num_filters': 64, 'filter_size': (3, 3)}),
(Conv2DLayer, {'num_filters': 64, 'filter_size': (3, 3)}),
(Conv2DLayer, {'num_filters': 64, 'filter_size': (3, 3)}),
(MaxPool2DLayer, {'pool_size': (2, 2)}),
(DenseLayer, {'num_units': 64}),
(DropoutLayer, {}),
(DenseLayer, {'num_units': 64}),
(DenseLayer, {'num_units': 10, 'nonlinearity': softmax}),
]
net2 = NeuralNet(
layers=layers2,
max_epochs = 10;
update_learning_rate=0.01,
verbose=2,
)
# Show more information
net2.initialize()
layer_info = PrintLayerInfo()
layer_info(net2)
# train the net
net2.fit(X, y)
# test
print "Start to test....."
y_pred = net2.predict(X_test)
print "The accuracy of this network is: %0.2f" % (y_pred == y_test).mean()
# store the network module
import cPickle as pickle
with open('results/simple_net2.pickle','wb') as f:
pickle.dump(net2, f, -1)
| [
"aaammmyyy27@gmail.com"
] | aaammmyyy27@gmail.com |
a3479fa7ee212668e8fb1c930b24849b72f17174 | 6ac88d95b4c1bb157384da0bb4d814d32602d391 | /Lesson2-1.py | 1b8325cec0097f914418bd4dae6656c3ad3cc366 | [] | no_license | hamar82/HomeWork | cd5a729c946589896472f7c3977deba74a4d843a | b8023f209f3948883150c7b3ffe3ae9e92da0000 | refs/heads/main | 2023-07-01T01:32:08.380202 | 2021-08-01T06:00:32 | 2021-08-01T06:00:32 | 391,538,552 | 0 | 0 | null | 2021-08-01T06:46:45 | 2021-08-01T05:46:55 | Python | UTF-8 | Python | false | false | 633 | py |
"""1. Создать список и заполнить его элементами различных типов данных.
Реализовать скрипт проверки типа данных каждого элемента. Использовать функцию type() для проверки типа.
Элементы списка можно не запрашивать у пользователя, а указать явно, в программе."""
my_list = [5, 2.2,'Text', True, None, []]
def my_type(el):
for el in range(len(my_list)):
print(type(my_list[el]))
return
my_type(my_list)
| [
"hamar82@gmail.com"
] | hamar82@gmail.com |
f0c172434f75157fd2f9d04b95acc34c5ef8c9c2 | 7705ea99590a6d495b78fbfdf65019efbd9c7f0d | /main.py | 1683e0f7b8012f364009142bcbc0bf60e3d181eb | [
"MIT"
] | permissive | Lotayou/point2mesh | e9d3657dfe6107e5356f5f89aebde91ba2ed3f53 | 0e3d288c03b5165e28430aada841bd6c25db8cb7 | refs/heads/master | 2022-08-01T12:14:59.021377 | 2020-05-25T21:09:56 | 2020-05-25T21:09:56 | 266,933,474 | 1 | 0 | MIT | 2020-05-26T03:14:22 | 2020-05-26T03:14:22 | null | UTF-8 | Python | false | false | 3,847 | py | import torch
from models.layers.mesh import Mesh, PartMesh
from models.networks import init_net, sample_surface, local_nonuniform_penalty
import utils
import numpy as np
from models.losses import chamfer_distance
from options import Options
import time
import os
options = Options()
opts = options.args
torch.manual_seed(opts.torch_seed)
device = torch.device('cuda:{}'.format(opts.gpu) if torch.cuda.is_available() else torch.device('cpu'))
print('device: {}'.format(device))
# initial mesh
mesh = Mesh(opts.initial_mesh, device=device, hold_history=True)
# input point cloud
input_xyz, input_normals = utils.read_pts(opts.input_pc)
# normalize point cloud based on initial mesh
input_xyz /= mesh.scale
input_xyz += mesh.translations[None, :]
input_xyz = torch.Tensor(input_xyz).type(options.dtype()).to(device)[None, :, :]
input_normals = torch.Tensor(input_normals).type(options.dtype()).to(device)[None, :, :]
part_mesh = PartMesh(mesh, num_parts=options.get_num_parts(len(mesh.faces)), bfs_depth=opts.overlap)
print(f'number of parts {part_mesh.n_submeshes}')
net, optimizer, rand_verts, scheduler = init_net(mesh, part_mesh, device, opts)
for i in range(opts.iterations):
num_samples = options.get_num_samples(i % opts.upsamp)
if opts.global_step:
optimizer.zero_grad()
start_time = time.time()
for part_i, est_verts in enumerate(net(rand_verts, part_mesh)):
if not opts.global_step:
optimizer.zero_grad()
part_mesh.update_verts(est_verts[0], part_i)
num_samples = options.get_num_samples(i % opts.upsamp)
recon_xyz, recon_normals = sample_surface(part_mesh.main_mesh.faces, part_mesh.main_mesh.vs.unsqueeze(0), num_samples)
# calc chamfer loss w/ normals
recon_xyz, recon_normals = recon_xyz.type(options.dtype()), recon_normals.type(options.dtype())
xyz_chamfer_loss, normals_chamfer_loss = chamfer_distance(recon_xyz, input_xyz, x_normals=recon_normals, y_normals=input_normals,
unoriented=opts.unoriented)
loss = (xyz_chamfer_loss + (opts.ang_wt * normals_chamfer_loss))
if opts.local_non_uniform > 0:
loss += opts.local_non_uniform * local_nonuniform_penalty(part_mesh.main_mesh).float()
loss.backward()
if not opts.global_step:
optimizer.step()
scheduler.step()
part_mesh.main_mesh.vs.detach_()
if opts.global_step:
optimizer.step()
scheduler.step()
end_time = time.time()
if i % 1 == 0:
print(f'{os.path.basename(opts.input_pc)}; iter: {i} out of: {opts.iterations}; loss: {loss.item():.4f};'
f' sample count: {num_samples}; time: {end_time - start_time:.2f}')
if i % opts.export_interval == 0 and i > 0:
print('exporting reconstruction... current LR: {}'.format(optimizer.param_groups[0]['lr']))
with torch.no_grad():
part_mesh.export(os.path.join(opts.save_path, f'recon_iter:{i}.obj'))
if (i > 0 and (i + 1) % opts.upsamp == 0):
mesh = part_mesh.main_mesh
num_faces = int(np.clip(len(mesh.faces) * 1.5, len(mesh.faces), opts.max_faces))
if num_faces > len(mesh.faces):
mesh = utils.manifold_upsample(mesh, opts.save_path, Mesh,
num_faces=min(num_faces, opts.max_faces),
res=opts.manifold_res, simplify=True)
part_mesh = PartMesh(mesh, num_parts=options.get_num_parts(len(mesh.faces)), bfs_depth=opts.overlap)
print(f'upsampled to {len(mesh.faces)} faces; number of parts {part_mesh.n_submeshes}')
net, optimizer, rand_verts, scheduler = init_net(mesh, part_mesh, device, opts)
with torch.no_grad():
mesh.export(os.path.join(opts.save_path, 'last_recon.obj')) | [
"github@hanocka.com"
] | github@hanocka.com |
4661f874d007a11754a46c3beedde6041690f9e9 | f6fafa5ade66f3168a4c8960389d6fb75539cf9b | /authmobile/views.py | c589d37ac540e48d45157b3ada270cf700ef5c9a | [] | no_license | tokibito/nullpobug-mobile-twitter-client | 7fc6593bd086017eaa7fad96f60efa43193ff526 | dbfb75a16d4020f471187bb1398e06ef42fc9862 | refs/heads/master | 2020-07-25T07:39:49.730289 | 2009-07-23T07:27:06 | 2009-07-23T07:27:06 | 208,217,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | # vim:fileencoding=utf8
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.conf import settings
from django.views.generic.simple import direct_to_template
from django.contrib.auth import authenticate, login
from django.core.urlresolvers import reverse
from authmobile.models import MobileUser
def login_easy(request):
"""
かんたんログイン
"""
if request.agent.is_nonmobile():
return HttpResponseBadRequest(u'モバイル端末でアクセスしてください')
# サブスクライバーIDを取得
if request.agent.is_docomo():
guid = request.agent.guid
else:
guid = request.agent.serialnumber
user = authenticate(subscriber_id=guid)
if not user:
return direct_to_template(request, 'authmobile/error.html', extra_context={
'message': u'ユーザが見つかりません。',
})
login(request, user)
return HttpResponseRedirect(reverse('site_index'))
| [
"xxshss@yahoo.co.jp"
] | xxshss@yahoo.co.jp |
f23c218410aec4dc0f9a350359c03997fa9f593d | cd323d09192ca9d8a9e258a0455ff6f07f874896 | /src/map_module/map_builder/structure_generators/_structure_generator.py | e1477f18baf856d4fcd1dcd511b04612acbdb729 | [] | no_license | matszach/wildrealm | d9cc03c4b71e79d074cd166242cc3143e67e254a | 6e46a4b2de2c24fa18703fa1f482b95ef5114e8b | refs/heads/master | 2020-08-07T12:53:32.412078 | 2019-10-24T21:28:56 | 2019-10-24T21:28:56 | 213,458,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | from src.map_module.worldmap import WorldMap
from random import random
from math import sqrt
class StructureGenerator:
def generate(self, wmap: WorldMap, possible_floor_ids: list, spawn_chance: float, min_range: int = 100):
for x in range(wmap.x_size):
for y in range(wmap.y_size):
if wmap.floors[x, y] in possible_floor_ids:
if random() < spawn_chance:
if not self._similar_structure_in_range(x, y, min_range):
self._already_build.append((x, y))
self._build(wmap, x, y)
"""
makes sure that no structures of the same have already been created nearby
:param x_origin, y_origin - central location of the structure
:param min_range - minimum distance between the closest instance
of the currently generated structure
"""
def _similar_structure_in_range(self, x_origin: int, y_origin: int, min_range: int = 100):
for structure in self._already_build:
if sqrt((structure[0] - x_origin)**2 + (structure[1] - y_origin)**2) < min_range:
return True
return False
"""
describes structure's creation algorithm
:param wmap - map being worked on
:param x_origin, y_origin - central location of the structure
"""
def _build(self, wmap: WorldMap, x_origin: int, y_origin: int):
pass
"""
safe handles index errors when generating structures
on the border of the map
"""
@staticmethod
def _place_wall(wmap: WorldMap, x: int, y: int, wall_id: int):
try:
wmap.walls[x, y] = wall_id
except IndexError:
pass
@staticmethod
def _place_floor(wmap: WorldMap, x: int, y: int, floor_id: int):
try:
wmap.floors[x, y] = floor_id
except IndexError:
pass
def __init__(self):
# holds origin points
self._already_build = []
| [
"lkaszubowski@gmail.com"
] | lkaszubowski@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.