blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a95c01ed96b05945f6d8503894e978ae44c6d5fc | 373227ba4efbb2a98098c987a3be9fdbb47a09c9 | /admin/migrations/0034_auto_20180926_1613.py | 80bf1f8919758d0eb595fcdaa6c6e6b6ca3cd784 | [
"MIT"
] | permissive | rodlukas/UP-admin | 758a6614084383cbcb6a820366bad951930a621b | 324cb30f4382b98908bbf75536040f27313032d0 | refs/heads/master | 2023-08-16T08:03:09.852037 | 2023-07-12T05:59:39 | 2023-07-12T05:59:39 | 122,991,375 | 6 | 4 | MIT | 2023-07-25T23:37:13 | 2018-02-26T15:44:24 | TypeScript | UTF-8 | Python | false | false | 317 | py | # Generated by Django 2.1.1 on 2018-09-26 14:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("admin", "0033_auto_20180926_1043")]
operations = [
migrations.AlterField(model_name="client", name="phone", field=models.TextField(blank=True))
]
| [
"rodlukas@fit.cvut.cz"
] | rodlukas@fit.cvut.cz |
95dbf1a9e95316107759f6413119a0410eb5a9b4 | 4ace3913648b302d8663d187fd1de598d299fe82 | /app.py | 8cd02dae0836de684ac6040544c6bdb2ac71e0d8 | [] | no_license | Ronak-B/Share_Extension_backend | 09e0cd4afdbc604333f2ac4e321b2d8756b89e09 | 3cbaea4b5fa3a6336b0f41eb2efd9b237142e7a7 | refs/heads/master | 2020-05-03T09:04:45.259721 | 2019-03-30T10:31:21 | 2019-03-30T10:31:21 | 178,544,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash,check_password_hash
app=Flask(__name__)
app.config['SECRET_KEY']='5791628bb0b13ce0c676dfde280ba245'
app.config['SQLALCHEMY_DATABASE_URI'] = r'sqlite:///extension.db'
db=SQLAlchemy(app)
class User(db.Model):
id=db.Column(db.Integer,primary_key=True)
username=db.Column(db.String)
name=db.Column(db.String)
email=db.Column(db.String)
password=db.Column(db.String)
class Message(db.Model):
id=db.Column(db.Integer,primary_key=True)
sender=db.Column(db.String)
receiver=db.Column(db.String)
message=db.Column(db.String(200))
@app.route("/signup",methods=["POST"])
def signup():
password=generate_password_hash(request.form['password'],method='sha256')
new_user=User(username=request.form['username'],name=request.form['name'],email=request.form['email'],password=password)
print(request.form['username']+request.form['name']+request.form['email']+password)
db.session.add(new_user)
db.session.commit()
return jsonify({'result':'success'})
@app.route('/login',methods=['POST'])
def login():
user=User.query.filter_by(username=request.form['username']).first()
if user:
if check_password_hash(user.password,request.form['password']):
return jsonify({'result':'success'})
else :
return jsonify({'result':'failed'})
else :
return jsonify({'result':'failed'})
if __name__ == "__main__":
app.run(debug=True) | [
"noreply@github.com"
] | noreply@github.com |
0d9bd3122e1a45a7875d6e02d805193f680f46af | aeefe478e5f625b2e10dd3596010b6f8881ae727 | /tests/conftest.py | d7707a41b2528e30cd046bfedd19211dd5245c9b | [] | no_license | cshields143/climate_indices_issue_419a | 53666c8ea7c007ba9dfe1a0600b2ac5fffd8c298 | c14c7a11e2969bcc157cfd6b4958d9dffc53e9b8 | refs/heads/main | 2023-03-22T16:19:04.333040 | 2021-03-15T23:32:01 | 2021-03-15T23:32:01 | 347,898,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | import numpy as np
import pytest
@pytest.fixture(scope='module')
def values():
return np.loadtxt('tests/fixture/values')
@pytest.fixture(scope='module')
def skews():
return np.loadtxt('tests/fixture/skew')
@pytest.fixture(scope='module')
def locs():
return np.loadtxt('tests/fixture/loc')
@pytest.fixture(scope='module')
def scales():
return np.loadtxt('tests/fixture/scale')
@pytest.fixture(scope='module')
def outputs():
return np.loadtxt('tests/fixture/out')
| [
"christopher.shields143@gmail.com"
] | christopher.shields143@gmail.com |
f530002bae0140a6232ff9d319658f4de9263843 | 509acbe71f3a4d8a9315b15d99ad7063f6bdb656 | /Advanced data structure in Python (GEOG-389)/Advanced data structure in Python (GEOG-389 (1)/Vector data/Selecting data by attributes/task.py | 5781a269f37da8c8df59abcc2e7d91a64e1aa6a4 | [] | no_license | md1321/PostGradCoding | 62e37b49450749911a2a621c2c8337c736054328 | 132bcf34c7edace0fb87dc12eef0170c123403b4 | refs/heads/master | 2022-12-04T01:41:43.884546 | 2020-08-23T17:57:19 | 2020-08-23T17:57:19 | 285,395,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | # Load the geopandas package, and name it gdp
import geopandas as gdp
# Read the shapefile crime.shp into data.
# Change the file path accordingly. You may copy from the previous task
data = gdp.read_file("type here")
# Get all unique offence types
data[type here].unique()
# Get all burglary incidents in Oahu.
data[data[type here]==type here]
# Read census tract boundaries in Oahu
# Change the file path accordingly
data_ct = gdp.read_file("type here")
# Create a map of the tract boundaries and assign the map to base1
base1 = data_ct.plot(color='white', edgecolor='black')
# Plot all these theft incidents in Oahu, using census tracts as the base map
data[type here].plot(ax=base1, marker='*', color='green', markersize=0.5)
# Create another map of the tract boundaries and assign the map to base2
base2 = data_ct.plot(color='white', edgecolor='black')
# Plot all graffiti incidents in Oahu.
type here
# Visually compare the spatial distributions of the two crime types in the maps (no need to write code here)
| [
"mike.donaher@gmail.com"
] | mike.donaher@gmail.com |
ea4bd4c24d7b177c44d44b2885a68be6ee48dbad | be5f737b902df73ee19f7d74347b37c4656c2b11 | /main/page/desktop_v3/index/pe_index.py | e7749fab5ace9d977cba2c0387cd13f9d8ff6b9b | [] | no_license | niufuzhen/selenium | 965d3791e6ff4b81457d80d63f034d4fd7d5c150 | e3bf21c77efc3f0954836c2318b87f88fd276d0a | refs/heads/master | 2021-01-10T22:33:54.652490 | 2016-02-24T14:24:41 | 2016-02-24T14:24:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,262 | py | from main.page.base import BasePage
from selenium.webdriver.common.by import By
from utils.lib.user import *
class IndexPage(BasePage):
_pl = ""
# LOCATORS
#PANEL LEFT
_username_loc = (By.CSS_SELECTOR, 'div#side-profile div.clear-b div.span8 small.pull-left a')
_deposit_amount_loc = (By.CSS_SELECTOR, 'div.ellipsis a.deposit-link strong#include-deposit')
_shop_name_loc = (By.XPATH, '/html/body/div[1]/div[5]/div/div[1]/div/div[2]/div/div[2]/small/a')
_shop_status_loc = (By.CSS_SELECTOR, 'div.top-admin div.clear-b div.span8 div.ellipsis a')
#Panel Left -- INBOX
_panel_my_inbox_loc = {
'_inbox_message_loc': (By.XPATH, '//*[@id="accordion-inbox"]/div/ul/li[1]/a'),
'_inbox_talk_loc': (By.XPATH, '//*[@id="accordion-inbox"]/div/ul/li[2]/a'),
'_inbox_review_loc': (By.XPATH, '//*[@id="accordion-inbox"]/div/ul/li[3]/a'),
'_inbox_price_alert_loc': (By.XPATH, '//*[@id="accordion-inbox"]/div/ul/li[4]/a'),
'_inbox_ticket_loc': (By.XPATH, '//*[@id="accordion-inbox"]/div/ul/li[5]/a'),
'_inbox_resolution_center_loc': (By.XPATH, '//*[@id="accordion-inbox"]/div/ul/li[6]/a')
}
#Panel Left -- MY SHOP
_panel_my_shop_loc = {
'_myshop_order_loc': (By.XPATH, '//*[@id="accordion-shop"]/div/ul/li[1]/a'),
'_add_product_loc': (By.XPATH, '//*[@id="accordion-shop"]/div/ul/li[2]/a'),
'_product_list_loc': (By.XPATH, '//*[@id="accordion-shop"]/div/ul/li[3]/a'),
'_topads_loc': (By.XPATH, '//*[@id="accordion-shop"]/div/ul/li[4]/a'),
'_manage_shop_loc': (By.XPATH, '//*[@id="accordion-shop"]/div/ul/li[5]/a'),
'_manage_admin_loc': (By.XPATH, '//*[@id="accordion-shop"]/div/ul/li[6]/a')
}
#Panel Left -- MY PROFILE
_panel_my_profile_loc = {
'_tx_payment_confirm_loc': (By.XPATH, '//*[@id="accordion-profile"]/div/ul/li[1]/a'),
'_my_favorite_shop_loc': (By.XPATH, '//*[@id="accordion-profile"]/div/ul/li[2]/a'),
'_my_profile_setting_loc': (By.XPATH, '//*[@id="accordion-profile"]/div/ul/li[3]/a')
}
#Panel Left -- INSIGHT
_panel_insight_loc = {
'_insight_talk_loc': (By.XPATH, '/html/body/div[2]/div[5]/div/div[1]/ul/li[4]/div[2]/div/ul/li[1]/a'),
'_insight_price_alert_loc': (By.XPATH, '/html/body/div[2]/div[5]/div/div[1]/ul/li[4]/div[2]/div/ul/li[2]/a')
}
#Hot List content
_view_all_hotlist_loc = (By.CSS_SELECTOR, 'div.maincontent-admin a.fs-12')
_left_hotlist_img_loc = (By.XPATH, '//*[@id="content-container"]/div[5]/div/div[2]/div[1]/div[1]/a/div/div[1]/img')
_left_hotlist_loc = (By.XPATH, '//*[@id="content-container"]/div[5]/div/div[2]/div[1]/div[1]/a/div/div[2]/div[1]')
_mid_hotlist_img_loc = (By.XPATH, '//*[@id="content-container"]/div[5]/div/div[2]/div[1]/div[2]/a/div/div[1]/img')
_mid_hotlist_loc = (By.XPATH, '//*[@id="content-container"]/div[5]/div/div[2]/div[1]/div[2]/a/div/div[2]/div[1]')
_right_hotlist_img_loc = (By.XPATH, '//*[@id="content-container"]/div[5]/div/div[2]/div[1]/div[3]/a/div/div[1]/img')
_right_hotlist_loc = (By.XPATH, '//*[@id="content-container"]/div[5]/div/div[2]/div[1]/div[3]/a/div[2]/div[1]')
#Tab Locator
_tab_product_feed_loc = (By.XPATH, '/html/body/div[1]/div[5]/div/div[2]/div[2]/div[1]/div[1]/div/ul/li[1]/a')
_tab_fav_shop_loc = (By.XPATH, '/html/body/div[1]/div[5]/div/div[2]/div[2]/div[1]/div[1]/div/ul/li[2]/a')
_tab_recently_viewed_loc = (By.XPATH, '/html/body/div[1]/div[5]/div/div[2]/div[2]/div[1]/div[1]/div/ul/li[3]/a')
#Total Product displayed
_total_list_product_loc = (By.XPATH, '//*[@id="fav-prod-grid"]/div')
_total_list_product_img_loc = (By.XPATH, '//*[@id="fav-prod-grid"]/div/a/div/div[1]/img')
#Total Promote displayed
_total_list_promo_loc = (By.XPATH, '//*[@id="promo-right-c-0"]/div/div')
#ACTIONS
def open(self, site=""):
self._open(site, self._pl)
def check_my_username(self):
print("my username : %s" % (self.find_element(*self._username_loc).text))
return self.find_element(*self._username_loc).text
def check_my_deposit(self):
self.check_visible_element(*self._deposit_amount_loc)
print("Current deposit : %s" % (self.find_element(*self._deposit_amount_loc).text))
return self.find_element(*self._deposit_amount_loc).text
def check_all_product_listed(self):
total_product = 20
current_product = 0
for each_product in self.find_elements(*self._total_list_product_img_loc):
#print (each_product.get_attribute('src')) #For Debug
current_product += 1
if current_product == total_product:
print("Total listed product checked has reached 20 (Maximum)!")
elif current_product > total_product:
print("Total listed product checked has exceeding 20(Maximum)! Please check this now!")
#[Element] Panel Left
def check_all_panel_left(self):
print("Now checking all panel elements..")
for each_element_at_my_inbox_panel in self._panel_my_inbox_loc:
self.check_visible_element(*self._panel_my_inbox_loc[each_element_at_my_inbox_panel])
print ("Panel 'Inbox' checked!")
for each_element_at_my_shop_panel in self._panel_my_shop_loc:
self.check_visible_element(*self._panel_my_shop_loc[each_element_at_my_shop_panel])
print ("Panel 'Shop' checked!")
for each_element_at_my_profile_panel in self._panel_my_profile_loc:
self.check_visible_element(*self._panel_my_profile_loc[each_element_at_my_profile_panel])
print ("Panel 'Profile' checked!")
for each_element_at_insight_panel in self._panel_insight_loc:
self.check_visible_element(*self._panel_insight_loc[each_element_at_insight_panel])
print ("Panel 'Insight' checked!")
print("All panel elements has been checked and status OK..!")
def check_all_panel_left_no_shop(self):
print("Now checking all panel elements..")
for each_element_at_my_inbox_panel in self._panel_my_inbox_loc:
self.check_visible_element(*self._panel_my_inbox_loc[each_element_at_my_inbox_panel])
#print (*self._panel_my_inbox_loc[each_element_at_my_inbox_panel]) #For debug
for each_element_at_my_profile_panel in self._panel_my_profile_loc:
self.check_visible_element(*self._panel_my_profile_loc[each_element_at_my_profile_panel])
#print (*self._panel_my_profile_loc[each_element_at_my_profile_panel]) #For debug
print("All panel elements has been checked and status OK..!")
#[Element] Panel Left - User Information
def click_my_username_at_panel_left(self):
my_username = self.find_element(*self._username_loc)
self._click(my_username)
def click_shop_name_at_panel_left(self):
my_shop = self.find_element(*self._shop_name_loc)
self._click(my_shop)
#[Element] Panel Left - Inbox
def click_inbox_message_at_panel_left(self):
panel_inbox_message = self.find_element(*self._panel_my_inbox_loc['_inbox_message_loc'])
self._click(panel_inbox_message)
def click_inbox_talk_at_panel_left(self):
panel_inbox_talk = self.find_element(*self._panel_my_inbox_loc['_inbox_talk_loc'])
self._click(panel_inbox_talk)
def click_inbox_review_at_panel_left(self):
panel_inbox_review = self.find_element(*self._panel_my_inbox_loc['_inbox_review_loc'])
self._click(panel_inbox_review)
def click_inbox_price_alert_at_panel_left(self):
panel_inbox_price_alert = self.find_element(*self._panel_my_inbox_loc['_inbox_price_alert_loc'])
self._click(panel_inbox_price_alert)
def click_inbox_ticket_at_panel_left(self):
panel_inbox_ticket = self.find_element(*self._panel_my_inbox_loc['_inbox_ticket_loc'])
self._click(panel_inbox_ticket)
def click_inbox_resolution_center_at_panel_left(self):
panel_inbox_resolution_center = self.find_element(*self._panel_my_inbox_loc['_inbox_resolution_center_loc'])
self._click(panel_inbox_resolution_center)
#[Element] Panel Left - My Shop
def click_sales_at_panel_left(self):
panel_myshop_order = self.find_element(*self._panel_my_shop_loc['_myshop_order_loc'])
self._click(panel_myshop_order)
def click_add_product_at_panel_left(self):
panel_add_product = self.find_element(*self._panel_my_shop_loc['_add_product_loc'])
self._click(panel_add_product)
def click_product_list_at_panel_left(self):
panel_product_list = self.find_element(*self._panel_my_shop_loc['_product_list_loc'])
self._click(panel_product_list)
def click_topads_at_panel_left(self):
panel_topads = self.find_element(*self._panel_my_shop_loc['_topads_loc'])
self._click(panel_topads)
def click_manage_shop_at_panel_left(self):
panel_manage_shop = self.find_element(*self._panel_my_shop_loc['_manage_shop_loc'])
self._click(panel_manage_shop)
def click_manage_admin_at_panel_left(self):
panel_manage_admin = self.find_element(*self._panel_my_shop_loc['_manage_admin_loc'])
self._click(panel_manage_admin)
#[Element] Panel Left - My Shop
def click_purchase_at_panel_left(self):
panel_tx_payment_confirm = self.find_element(*self._panel_my_profile_loc['_tx_payment_confirm_loc'])
self._click(panel_tx_payment_confirm)
def click_favorite_shops_at_panel_left(self):
panel_fav_shop = self.find_element(*self._panel_my_profile_loc['_my_favorite_shop_loc'])
self._click(panel_fav_shop)
def click_settings_at_panel_left(self):
panel_settings = self.find_element(*self._panel_my_profile_loc['_my_profile_setting_loc'])
self._click(panel_settings)
#[Element] Panel Left - Insight
def click_insight_talk(self):
panel_insight_talk = self.find_element(*self._panel_insight_loc['_insight_talk_loc'])
self._click(panel_insight_talk)
def click_insight_price_alert(self):
panel_insight_price_alert = self.find_element(*self._panel_insight_loc['_insight_price_alert_loc'])
self._click(panel_insight_price_alert)
| [
"herman.wahyudi02@gmail.com"
] | herman.wahyudi02@gmail.com |
e93e6c381fdef0c3bfe2156535d997bdfa9c4f6d | 3ddf87217c7e2528c83c683aa9d6f44e42256bd5 | /plone/fab_config.py | e9fe9772702386f3774faed1d352fb2e62911be3 | [] | no_license | jbeyers/projecttools | 57bb26f84acaef92d378162d763f2317bea9f57a | cb4062f440812e27de8ebd4b6cec0cbc8e9e5c2f | refs/heads/master | 2020-04-04T00:42:26.855069 | 2011-11-07T08:24:57 | 2011-11-07T08:24:57 | 2,293,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | """
Fabric settings for hosts.
"""
from fabric.api import env
def qa():
"""
Settings for the qa server.
"""
# If your buildout file for QA is qa.cfg, the following line is correct:
#env.buildout_config = 'qa'
# A list of hostnames to deploy on. The following will try to connect to
# myqaserver.mysite.com as your username:
#env.hosts = ['myqaserver.mysite.com']
# The deploy user. Most deploy commands will be run as this user.
#env.deploy_user = 'plone'
# The root of your Plone instance. By convention, I put the plone instances
# in an 'instances' directory in the deploy users home directory.
#env.directory = '/home/%s/instances/qa.mysite' % env.deploy_user
| [
"jbeyers@juizi.com"
] | jbeyers@juizi.com |
1e854c44ac331f5e2e31a0b037cdde5960614efa | 027b1a3a0d4dbec73db6f4fab970f8fe6c5a8dc7 | /myshop/settings.py | 9a09a9b56eeaa0fa5463dc108cd679212f916eac | [] | no_license | avodha871/dressvlog | b8a120a7ab368e660e651d60345f460204198619 | c84008d9d9c29b84d16e4f01a7870dd2e7ee8d87 | refs/heads/master | 2023-07-06T05:46:10.589812 | 2021-08-10T08:10:11 | 2021-08-10T08:10:11 | 394,574,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,478 | py | """
Django settings for myshop project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-o^32@4ep_z38en0f2+4l4_0%+%alnp*v^i8fb1!n2(=fn06bn+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'cart',
'shop',
'account',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[os.path.join(BASE_DIR,'static')]
STATIC_ROOT=os.path.join(BASE_DIR,'assets')
MEDIA_URL='/media/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"divyaantony98@gmail.com"
] | divyaantony98@gmail.com |
3cffc81fe48cbf26f24cfcdcb758d4739d6dddfc | 97ae00e3691f3154d06b84204453d9e2e1a327b8 | /Assignment1/cs231n/classifiers/k_nearest_neighbor.py | 5de98f782dbec782db1b34040c5dbb4016a75ac9 | [] | no_license | zhangsz1998/cs231n-Assignments | 5165239a549b01d6054f6e3746d0dbabf1bd1517 | 11d7cce984d47278fcdd71c0f4080188570eac25 | refs/heads/master | 2021-07-09T07:23:21.129102 | 2017-10-09T12:50:29 | 2017-10-09T12:50:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,239 | py | import numpy as np
from past.builtins import xrange
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError('Invalid value %d for num_loops' % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
for j in xrange(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension. #
#####################################################################
dists[i, j] = np.linalg.norm(X[i] - self.X_train[j], 2)
#####################################################################
# END OF YOUR CODE #
#####################################################################
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
#######################################################################
#np.linalg.norm seems a little slower than the direct implementation
#dists[i, :] = np.linalg.norm(np.subtract(self.X_train, X[i]), ord = 2, axis = 1)
dists[i, :] = np.sqrt(np.sum(np.square(np.subtract(self.X_train, X[i])), axis=1))
#######################################################################
# END OF YOUR CODE #
#######################################################################
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy. #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
dists = np.sqrt(- 2 * np.dot(X, self.X_train.T) + np.sum(np.square(X), axis = 1, keepdims = True) + np.sum(np.square(self.X_train.T), axis = 0, keepdims = True))
#########################################################################
# END OF YOUR CODE #
#########################################################################
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in xrange(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
closest_y = self.y_train[np.argsort(dists[i])[:k]]
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
y_pred[i] = np.argmax(np.bincount(closest_y))
#########################################################################
# END OF YOUR CODE #
#########################################################################
return y_pred
| [
"zhangshangzhio@qq.com"
] | zhangshangzhio@qq.com |
010a928df8cce43b54998f72374c7d6d4ffcb1e0 | 73c6f4adaba5c8d86dbd42635ec9ba20573feb1c | /src/website/documents/migrations/0006_auto__add_field_category_description.py | b3c51319a83470f3c68bc5100a83ff30ad4a82de | [] | no_license | dmitryro/mqm | a3d7135f668108e70f8468bf17f0b819d1b38190 | 0517af5dced8dc45b0e391188b6ddfdd4110b2f1 | refs/heads/master | 2016-08-07T03:25:27.454916 | 2014-08-09T11:38:40 | 2014-08-09T11:38:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,696 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Category.description'
db.add_column(u'documents_category', 'description',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Category.description'
db.delete_column(u'documents_category', 'description')
models = {
u'accounts.skill': {
'Meta': {'ordering': "('name',)", 'object_name': 'Skill'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'})
},
u'accounts.user': {
'Meta': {'object_name': 'User'},
'biography': ('django.db.models.fields.TextField', [], {'max_length': '350', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'email': ('website.utils.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job_title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'local_mind': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users'", 'null': 'True', 'to': u"orm['local_minds.LocalMind']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'privileges': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounts.Skill']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "('first_name', 'last_name')", 'overwrite': 'False'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'user_avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'documents.category': {
'Meta': {'ordering': "('sort_value',)", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_image': ('mediastore.fields.related.MediaField', [], {'blank': 'True', 'related_name': "'document_category_image'", 'null': 'True', 'to': "orm['mediastore.Media']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'sort_value': ('django.db.models.fields.IntegerField', [], {'default': '2', 'db_index': 'True'})
},
u'documents.document': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Document'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'documents'", 'blank': 'True', 'to': u"orm['documents.Category']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'download_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'file_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_mind': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': u"orm['local_minds.LocalMind']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'privacy': ('website.privacy.fields.PrivacyField', [], {'default': "'national'", 'max_length': '12'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "('title',)", 'overwrite': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': u"orm['accounts.User']"})
},
u'local_minds.ethnicity': {
'Meta': {'object_name': 'Ethnicity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'local_minds.localmind': {
'Meta': {'object_name': 'LocalMind'},
'_latitude_postcode': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'_longitude_postcode': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'area_of_benefit': ('django.db.models.fields.CharField', [], {'max_length': '350', 'blank': 'True'}),
'average_volunteer_hours': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ceo_one': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ceo_one_of'", 'null': 'True', 'to': u"orm['local_minds.Person']"}),
'ceo_two': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ceo_two_of'", 'null': 'True', 'to': u"orm['local_minds.Person']"}),
'chair': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'chair_one_of'", 'null': 'True', 'to': u"orm['local_minds.Person']"}),
'charity_no': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'charity_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deficit': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'group_avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'hours': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'income_restricted': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'income_unrestricted': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'reserves': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'staff_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'statement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'trustees_active': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'trustees_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'volunteers_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'local_minds.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'ethnicity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['local_minds.Ethnicity']", 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
'mediastore.media': {
'Meta': {'ordering': "('created',)", 'object_name': 'Media'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['documents'] | [
"gregor@muellegger.de"
] | gregor@muellegger.de |
edae603671b42da4d38c87b7291b196fa5646365 | aa5398d549b8838bca542a5225c2ea6ef018ebc5 | /hard14.py | bf4ce2ce8f44acd8d2d8f58f2c3bc868f841bdd3 | [
"Apache-2.0"
] | permissive | amrutra07j/abnamro | 99c70fe2869a6b900a93785130241fcee21cb178 | b31724f799238b28fe04230a2e66cbfb7fae3f63 | refs/heads/main | 2023-05-03T22:59:01.868187 | 2021-05-17T04:45:10 | 2021-05-17T04:45:10 | 368,053,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | from sys import argv
script, user_name = argv
prompt = '> '
print "Hi %s, I'm the %s script." % (user_name, script)
print "I'd like to ask you a few questions."
print "Do you like me %s?" % user_name
likes = raw_input(prompt)
print "Where do you live %s?" % user_name
lives = raw_input(prompt)
print "What kind of computer do you have?"
computer = raw_input(prompt)
print """
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And you have a %r computer. Nice.
""" % (likes, lives, computer) | [
"noreply@github.com"
] | noreply@github.com |
83b6521cbb69e918d5adf86d3847e2be974e7380 | 33c1c5d0f48ad952776fe546a85350a441d6cfc2 | /ABC/125/B.py | 8f6837817b70aa2610cc83b398e4c54f113aa439 | [] | no_license | hisyatokaku/Competition | 985feb14aad73fda94804bb1145e7537b057e306 | fdbf045a59eccb1b2502b018cab01810de4ea894 | refs/heads/master | 2021-06-30T18:48:48.256652 | 2020-11-16T11:55:12 | 2020-11-16T11:55:12 | 191,138,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | N = int(input())
S = input()
K = int(input())
tar = S[K-1]
ans = ""
for c in S:
if c != tar:
ans += "*"
else:
ans += c
print(ans)
| [
"hisyatokaku2005@yahoo.co.jp"
] | hisyatokaku2005@yahoo.co.jp |
f9ef9ad1927778c242c79e67d2e0f7ff53fd4308 | 5a38d66a8c462369cc643352764b2a0492ce24dc | /backend/customers/models.py | e7e38996e1776754db1c47837bd2a4aa9bfd8284 | [] | no_license | Jayesh2812/car-service | 774fbff2c507402c72d1050a11246b5b8927ff2d | 2f2d417b0a84edc0cf69640673c0af4d17a26142 | refs/heads/main | 2023-03-28T03:37:14.024955 | 2021-03-27T21:47:55 | 2021-03-27T21:47:55 | 351,890,451 | 0 | 1 | null | 2021-03-27T21:44:09 | 2021-03-26T19:26:10 | Python | UTF-8 | Python | false | false | 307 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
#customer model
class Customer(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
phoneNo=models.DecimalField(max_digits=10,decimal_places=0)
emailId=models.EmailField()
| [
"abhishek.dhule.79@gmail.com"
] | abhishek.dhule.79@gmail.com |
e118248edce28ae82339bc2f39e8441b621a7f7a | 1269833599eb6c8ea01fc2354bb6f9d18884ba5a | /machine translation/training_code/train_rnn_with_attention/train_vi.py | 0f99c16b8d4fa110d4902380fb1e4ec7c4866e76 | [] | no_license | qltf8/ds-1011-nlp | c5458d14ccace26e969760fa9777708256fa9214 | c0dd4b9225e7a5ab2947197127e2605368dbe463 | refs/heads/master | 2020-04-02T02:58:05.776893 | 2018-12-15T03:55:10 | 2018-12-15T03:55:10 | 153,938,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,450 | py | import numpy as np
import torch
import torchtext
import pickle
import csv
import unicodedata
import re, random, time, string, subprocess
import os, sys, copy
TEXT_vi = torchtext.data.ReversibleField(sequential=True, use_vocab=True, batch_first = False, tokenize= lambda t:t.split(),
include_lengths=True)
TEXT_en = torchtext.data.ReversibleField(sequential=True, use_vocab=True, batch_first = False, tokenize= lambda t:t.split(),
lower=True, init_token='<sos>', eos_token='<eos>',include_lengths=True)
train_vi_en = torchtext.data.TabularDataset('/home/ql819/text_data/train_vi_en.csv', format='csv',
fields=[('source',TEXT_vi),('target',TEXT_en)])
validation_vi_en = torchtext.data.TabularDataset('/home/ql819/text_data/dev_vi_en.csv', format='csv',
fields=[('source',TEXT_vi),('target',TEXT_en)])
TEXT_vi.build_vocab(train_vi_en, min_freq=3)
TEXT_en.build_vocab(train_vi_en, min_freq=3)
train_vi_en_iter = torchtext.data.BucketIterator(train_vi_en, batch_size=1, sort_key= lambda e: len(e.source),
repeat = False, sort_within_batch=True, shuffle=True, device=torch.device(0))
validation_vi_en_iter = torchtext.data.BucketIterator(validation_vi_en, batch_size=1, sort_key= lambda e: len(e.source),
repeat = False, sort_within_batch=True, shuffle=True, device=torch.device(0))
class Bi_Multi_Layer_LSTM_Encoder(torch.nn.Module):
def __init__(self, num_vocab, input_size = 512, hidden_size = 512, dropout = 0.15):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = 1
self.dropout = dropout
self.dropout_layer = torch.nn.Dropout(self.dropout)
self.bidirectional = True
self.embedding_layer = torch.nn.Embedding(num_vocab, self.input_size)
self.lstm = torch.nn.LSTM(input_size= self.input_size, hidden_size = self.hidden_size, batch_first = False,
bidirectional = self.bidirectional, num_layers = self.num_layers)
h_0 = torch.zeros(1, self.hidden_size)
torch.nn.init.normal_(h_0, mean=0, std=0.0001)
self.h_0 = torch.nn.Parameter(h_0,requires_grad=True)
c_0 = torch.zeros(1, self.hidden_size)
torch.nn.init.normal_(c_0, mean=0, std=0.0001)
self.c_0 = torch.nn.Parameter(c_0,requires_grad=True)
if self.bidirectional:
h_1 = torch.zeros(1, self.hidden_size)
torch.nn.init.normal_(h_1, mean=0, std=0.0001)
self.h_1 = torch.nn.Parameter(h_1,requires_grad=True)
c_1 = torch.zeros(1, self.hidden_size)
torch.nn.init.normal_(c_1, mean=0, std=0.0001)
self.c_1 = torch.nn.Parameter(c_1,requires_grad=True)
def forward(self, X):
X_data,X_len = X
#X_data: source_len, 1, input_size X_len:1,1
X_data = self.embedding_layer(X_data)
h_0 = torch.cat([self.h_0]*len(X_len), dim=0).unsqueeze(1)
c_0 = torch.cat([self.c_0]*len(X_len), dim=0).unsqueeze(1)
if self.bidirectional:
h_1 = torch.cat([self.h_1]*len(X_len), dim=0).unsqueeze(1)
c_1 = torch.cat([self.c_1]*len(X_len), dim=0).unsqueeze(1)
h = torch.cat([h_0,h_1], dim=0)
c = torch.cat([c_0,c_1], dim=0)
output, (h_n, c_n) = self.lstm(X_data, (h, c))
#output: source_len, 1, 2*hidden_size
h_n = h_n.view(self.num_layers, 2, len(X_len), self.hidden_size)
c_n = c_n.view(self.num_layers, 2, len(X_len), self.hidden_size)
return output, h_n, c_n
def init_parameters(self):
for name, matrix in self.lstm.named_parameters():
if 'weight_hh_' in name:
for i in range(0, matrix.size(0), self.hidden_size):
torch.nn.init.orthogonal_(matrix[i:i+self.hidden_size], gain=0.01)
elif 'bias_' in name:
l = len(matrix)
matrix[l // 4: l //2].data.fill_(1.0)
class LSTM_Decoder_With_Attention(torch.nn.Module):
def __init__(self, num_vocab, input_size = 512, hidden_size = 512, dropout=0.15):
super().__init__()
self.num_vocab = num_vocab
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = 1
self.dropout = dropout
self.dropout_layer = torch.nn.Dropout(self.dropout)
self.embedding_layer = torch.nn.Embedding(self.num_vocab, self.input_size)
self.lstm = torch.nn.LSTM(hidden_size= self.hidden_size, input_size= self.input_size + 2 * self.hidden_size,
num_layers= self.num_layers)
self.calcu_weight_1 = torch.nn.Linear(3*self.hidden_size, hidden_size)
self.calcu_weight_2 = torch.nn.Linear(self.hidden_size, 1)
self.init_weight_1 = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.init_weight_2 = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.linear_vob = torch.nn.Linear(self.hidden_size, self.num_vocab)
def forward(self, input_word_index, hidden_vector, cell_vector, encoder_memory, is_init = False):
#input_word_index: [num]
#hidden_vector: 1, 1, hidden_size
#cell_vector: 1, 1, hidden_size
#encoder_memory: source_sen_len , 2 * hidden_size
if hidden_vector.shape[0] != self.num_layers or hidden_vector.shape[2] != self.hidden_size:
raise ValueError('The size of hidden_vector is not correct, expect '+str((self.num_layers, self.hidden_size))\
+ ', actually get ' + str(hidden_vector.shape))
if is_init:
hidden_vector = torch.tanh(self.init_weight_1(hidden_vector))
cell_vector = torch.tanh(self.init_weight_2(cell_vector))
n_hidden_vector = torch.stack([hidden_vector.squeeze()]*encoder_memory.shape[0],dim=0)
com_n_h_memory = torch.cat([n_hidden_vector, encoder_memory], dim =1)
com_n_h_temp = torch.tanh(self.calcu_weight_1(com_n_h_memory))
weight_vector = self.calcu_weight_2(com_n_h_temp)
weight_vector = torch.nn.functional.softmax(weight_vector, dim=0)
#weight_vector: source_sen_len * 1
convect_vector = torch.mm(weight_vector.transpose(1,0), encoder_memory)
#convect_vector: 1 , 2 * hidden_size
input_vector = self.embedding_layer(input_word_index).view(1,1,-1)
input_vector = self.dropout_layer(input_vector)
input_vector = torch.cat([convect_vector.unsqueeze(0), input_vector], dim=2)
output, (h_t, c_t) = self.lstm(input_vector,(hidden_vector, cell_vector))
output = output.view(1, self.hidden_size)
prob = self.linear_vob(output)
#prob 1, vob_size
prob = torch.nn.functional.log_softmax(prob, dim=1)
return prob, h_t, c_t
def init_parameters(self):
for name, matrix in self.lstm.named_parameters():
if 'weight_hh_' in name:
for i in range(0, matrix.size(0), self.hidden_size):
torch.nn.init.orthogonal_(matrix[i:i+self.hidden_size], gain=0.01)
elif 'bias_' in name:
l = len(matrix)
matrix[l // 4: l //2].data.fill_(1.0)
def train(encoder, decoder, optimizer, data_iter, teacher_forcing_ratio, batch_size = 64):
encoder.train()
decoder.train()
count = 0
loss = 0
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
for batch in data_iter:
source, target = batch.source, batch.target
target_data,target_len = target[0], target[1]
all_output, h_n, c_n = encoder(source)
output = all_output[:,0]
target_word_list = target_data.squeeze()
target_word = torch.tensor([TEXT_en.vocab.stoi['<sos>']]).cuda(0)
h_t = h_n[:,1,:]
c_t = c_n[:,1,:]
is_init = True
for word_index in range(1, target_len[0].item()):
prob, h_t, c_t = decoder(target_word, h_t, c_t, output, is_init)
is_init = False
if use_teacher_forcing:
target_word = target_word_list[[word_index]]
loss += torch.nn.functional.nll_loss(prob, target_word)
else:
right_target_word = target_word_list[[word_index]]
loss += torch.nn.functional.nll_loss(prob, right_target_word)
predict_target_word_index = prob.topk(1)[1].item()
if TEXT_en.vocab.stoi['<eos>'] == predict_target_word_index:
break
else:
target_word = torch.tensor([predict_target_word_index]).cuda(0)
count += 1
if count % batch_size == 0:
loss = loss/batch_size
loss.backward()
optimizer.step()
optimizer.zero_grad()
count = 0
loss = 0
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if count % batch_size != 0:
loss = loss/count
loss.backward()
optimizer.step()
optimizer.zero_grad()
class Bean_Search_Status_Record:
def __init__(self, h_t, c_t, predict_word_index_list, sum_log_prob):
self.h_t = h_t
self.c_t = c_t
self.predict_word_index_list = predict_word_index_list
self.sum_log_prob = sum_log_prob
self.avg_log_prob = 0
def test(encoder, decoder, data_iter, k=10):
encoder.eval()
decoder.eval()
path_name = '../eval/'+str(time.time()).replace('.','_')+'/'
os.mkdir(path_name)
predict_file_name = path_name + 'predict.txt'
target_file_name = path_name + 'target_file_name.txt'
predict_file = open(predict_file_name, 'w')
target_file = open(target_file_name, 'w')
for batch in data_iter:
source, target = batch.source, batch.target
source_data,source_len = source[0], source[1]
target_data,target_len = target[0], target[1]
all_output, h_n, c_n = encoder(source)
output = all_output[:,0]
target_word = TEXT_en.vocab.stoi['<sos>']
h_t = h_n[:,1,:]
c_t = c_n[:,1,:]
is_init = True
right_whole_sentence_word_index = target_data[1: target_len[0].item()-1,0]
right_whole_sentence_word_index = list(right_whole_sentence_word_index.cpu().numpy())
sequences = [Bean_Search_Status_Record(h_t, c_t, predict_word_index_list = [target_word],
sum_log_prob = 0.0)]
t = 0
while (t < 100):
all_candidates = []
for i in range(len(sequences)):
record = sequences[i]
h_t = record.h_t
c_t = record.c_t
predict_word_index_list = record.predict_word_index_list
sum_log_prob = record.sum_log_prob
target_word = predict_word_index_list[-1]
if TEXT_en.vocab.stoi['<eos>'] != target_word:
prob, h_t, c_t = decoder(torch.tensor([target_word]).cuda(0), h_t, c_t, output, is_init)
k_prob_value_list, k_word_index_list = prob.topk(k,dim=1)
k_prob_value_list = k_prob_value_list.cpu().detach().squeeze().numpy()
k_word_index_list = k_word_index_list.cpu().squeeze().numpy()
for prob_value, word_index in zip(k_prob_value_list, k_word_index_list):
prob_value = float(prob_value)
word_index = int(word_index)
new_record = Bean_Search_Status_Record(h_t, c_t, predict_word_index_list+[word_index], sum_log_prob+prob_value)
new_record.avg_log_prob = new_record.sum_log_prob/(len(new_record.predict_word_index_list) - 1)
all_candidates.append(new_record)
else:
all_candidates.append(record)
is_init = False
ordered = sorted(all_candidates, key = lambda r: r.sum_log_prob, reverse = True)
sequences = ordered[:k]
t += 1
final_record = sequences[0]
predict_whole_sentence_word_index = [TEXT_en.vocab.itos[temp_index] for temp_index in final_record.predict_word_index_list[1:-1]]
right_whole_sentence_word_index = [TEXT_en.vocab.itos[temp_index] for temp_index in right_whole_sentence_word_index]
predict_whole_sentence = ' '.join(predict_whole_sentence_word_index)
right_whole_sentence = ' '.join(right_whole_sentence_word_index)
predict_file.write(predict_whole_sentence.strip() + '\n')
target_file.write(right_whole_sentence.strip() + '\n')
predict_file.close()
target_file.close()
result = subprocess.run('cat {} | sacrebleu {}'.format(predict_file_name,target_file_name),shell=True,stdout=subprocess.PIPE)
result = str(result)
print(result)
sys.stdout.flush()
return get_blue_score(result)
def get_blue_score(s):
a = re.search(r'13a\+version\.1\.2\.12 = ([0-9.]+)',s)
return float(a.group(1))
def parameters_list(encoder, decoder):
para_list_1 = []
para_list_2 = []
for name, data in list(encoder.named_parameters()):
if 'embedding' in name:
para_list_1.append(data)
else:
para_list_2.append(data)
for name, data in list(decoder.named_parameters()):
if 'embedding' in name:
para_list_1.append(data)
else:
para_list_2.append(data)
return para_list_1, para_list_2
def parameters_list_change_grad(encoder, decoder):
para_list = []
for name, data in list(encoder.named_parameters()):
if 'embedding' in name:
data.requires_grad = False
else:
para_list.append(data)
for name, data in list(decoder.named_parameters()):
if 'embedding' in name:
data.requires_grad = False
else:
para_list.append(data)
return para_list
encoder = Bi_Multi_Layer_LSTM_Encoder(num_vocab=len(TEXT_vi.vocab.stoi))
decoder = LSTM_Decoder_With_Attention(num_vocab = len(TEXT_en.vocab.stoi))
encoder.init_parameters()
decoder.init_parameters()
encoder = encoder.cuda(0)
decoder = decoder.cuda(0)
early_stop = 3
best_blue_score = -1
best_index = -1
save_model_dir_name = '../save_model/teacher_vi_to_en_'
para_list_1, para_list_2 = parameters_list(encoder, decoder)
optimizer = torch.optim.Adam([{'params': para_list_1, 'lr': 0.001},
{'params': para_list_2, 'lr': 0.001}])
teacher_forcing_ratio = 0.95
for index_unique in range(100):
train(encoder, decoder, optimizer, train_vi_en_iter, teacher_forcing_ratio)
blue_score = test(encoder, decoder, validation_vi_en_iter)
print('epoch: ',index_unique, ' the blue score on validation dataset is : ', blue_score)
sys.stdout.flush()
if best_blue_score < blue_score:
best_index = index_unique
best_blue_score = blue_score
best_encoder = copy.deepcopy(encoder)
best_decoder = copy.deepcopy(decoder)
torch.save(encoder, save_model_dir_name+'encode_'+str(index_unique))
torch.save(decoder, save_model_dir_name+'decoder_'+str(index_unique))
if index_unique - best_index >= early_stop:
break
print('--------------------------------------')
sys.stdout.flush()
encoder = best_encoder
decoder = best_decoder
para_list = parameters_list_change_grad(encoder, decoder)
optimizer = torch.optim.Adam(para_list, lr = 0.001)
save_model_dir_name = '../save_model/teacher_refined_vi_to_en_'
early_stop = 3
best_blue_score = -1
best_index = -1
for index_unique in range(100):
train(encoder, decoder, optimizer, train_vi_en_iter, teacher_forcing_ratio)
blue_score = test(encoder, decoder, validation_vi_en_iter)
print('epoch: ',index_unique, ' the blue score on validation dataset is : ', blue_score)
sys.stdout.flush()
if best_blue_score < blue_score:
best_index = index_unique
best_blue_score = blue_score
torch.save(encoder, save_model_dir_name+'encode_'+str(index_unique))
torch.save(decoder, save_model_dir_name+'decoder_'+str(index_unique))
if index_unique - best_index >= early_stop:
break | [
"ql819@nyu.edu"
] | ql819@nyu.edu |
2e250dbb4c3fb7c838744418586a59e7ad45b5fa | 74793fb4aa3b39eb0fa58219039138323a2d4e31 | /Scrapy/Scrapy通用爬虫(爬取中华网科技类新闻)/scrapyuniversal/scrapyuniversal/items.py | b1cd42c601dfde34651a49ce2a802c6a7fb8b8e5 | [] | no_license | zyhang8/web_crawler_development | ae0d43eff732edb9462094e0496b50e59164c0e6 | 0117014934b8cca3224084b687467131fe48a21c | refs/heads/master | 2020-04-26T17:22:02.449389 | 2019-08-09T08:55:35 | 2019-08-09T08:55:35 | 173,710,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Field, Item
class NewsItem(Item):
title = Field()
text = Field()
datetime = Field()
source = Field()
url = Field()
website = Field() | [
"1023708557@qq.com"
] | 1023708557@qq.com |
d79c46fb13608d823353469aa85b54bae121f2aa | e58df4aeee11f8a97bdeede6a75a776d130f86d2 | /scripts/umap_fig.py | b03fd226a1d337ffa3944fbc02708089ca66778a | [
"MIT"
] | permissive | ashuein/molpal | 6ecd79767d8ef254e2c852e20f77cd9338844f35 | 1e17a0c406516ceaeaf273a6983d06206bcfe76f | refs/heads/main | 2023-01-29T03:23:10.525555 | 2020-12-15T14:17:56 | 2020-12-15T14:17:56 | 321,720,018 | 1 | 0 | MIT | 2020-12-15T16:09:48 | 2020-12-15T16:09:48 | null | UTF-8 | Python | false | false | 10,532 | py | import argparse
import csv
from operator import itemgetter
from pathlib import Path
import pickle
import sys
from typing import Dict, List, Set
import matplotlib.pyplot as plt
from matplotlib.cm import ScalarMappable
from matplotlib.patches import Ellipse
import numpy as np
import seaborn as sns
from tqdm import tqdm
sns.set_theme(style='white', context='paper')
NBINS=50
def get_num_iters(data_dir: str) -> int:
scores_csvs = [p_csv for p_csv in Path(data_dir).iterdir()
if 'iter' in p_csv.stem]
return len(scores_csvs)
def read_scores(scores_csv: str) -> Dict:
"""read the scores contained in the file located at scores_csv"""
scores = {}
failures = {}
with open(scores_csv) as fid:
reader = csv.reader(fid)
next(reader)
for row in reader:
try:
scores[row[0]] = float(row[1])
except:
failures[row[0]] = None
return scores, failures
def get_new_points_by_epoch(scores_csvs: List[str]) -> List[Dict]:
"""get the set of new points and associated scores acquired at each
iteration in the list of scores_csvs that are already sorted by iteration"""
all_points = dict()
new_points_by_epoch = []
for scores_csv in scores_csvs:
scores, _ = read_scores(scores_csv)
new_points = {smi: score for smi, score in scores.items()
if smi not in all_points}
new_points_by_epoch.append(new_points)
all_points.update(new_points)
return new_points_by_epoch
def add_ellipses(ax, invert=False):
kwargs = dict(fill=False, color='white' if invert else 'black', lw=1.)
ax.add_patch(Ellipse(xy=(6.05, -6.0), width=2.9, height=1.2, **kwargs))
ax.add_patch(Ellipse(xy=(16.05, 4.5), width=1.7, height=2.6, **kwargs))
def add_model_data(fig, gs, data_dir, i, model,
d_smi_idx, fps_embedded, zmin, zmax,
portrait, n_models):
scores_csvs = [p_csv for p_csv in Path(data_dir).iterdir()
if 'iter' in p_csv.stem]
scores_csvs = sorted(scores_csvs, key=lambda p: int(p.stem.split('_')[4]))
new_pointss = get_new_points_by_epoch(scores_csvs)
if portrait:
MAX_ROW = len(new_pointss)
else:
MAX_ROW = n_models
axs = []
for j, new_points in enumerate(new_pointss):
if portrait:
row, col = j, i
else:
row, col = i, j
ax = fig.add_subplot(gs[row, col])
smis, scores = zip(*new_points.items())
idxs = [d_smi_idx[smi] for smi in smis]
ax.scatter(
fps_embedded[idxs, 0], fps_embedded[idxs, 1],
marker='.', c=scores, s=2, cmap='plasma', vmin=zmin, vmax=zmax
)
add_ellipses(ax)
if row==0:
if portrait:
ax.set_title(model)
if row==MAX_ROW:
if not portrait:
ax.set_xlabel(j)
if col==0:
if portrait:
ax.set_ylabel(row)
else:
ax.set_ylabel(model)
ax.set_xticks([])
ax.set_yticks([])
axs.append(ax)
return fig, axs
def si_fig(d_smi_score, d_smi_idx, fps_embedded, data_dirs, models,
portrait=True):
zmin = -max(score for score in d_smi_score.values() if score < 0)
zmax = -min(d_smi_score.values())
zmin = round((zmin+zmax)/2)
n_models = len(data_dirs)
n_iters = get_num_iters(data_dirs[0])
if portrait:
fig = plt.figure(figsize=(10*1.15, 15), constrained_layout=True)
gs = fig.add_gridspec(nrows=n_iters, ncols=n_models)
else:
fig = plt.figure(figsize=(15*1.15, 10), constrained_layout=True)
gs = fig.add_gridspec(nrows=n_models, ncols=n_iters)
axs = []
for i, (parent_dir, model) in enumerate(zip(data_dirs, models)):
fig, axs_ = add_model_data(fig, gs, parent_dir, i, model,
d_smi_idx, fps_embedded, zmin, zmax,
portrait, n_models)
axs.extend(axs_)
ticks = list(range(zmin, round(zmax)))
colormap = ScalarMappable(cmap='plasma')
colormap.set_clim(zmin, zmax)
cbar = plt.colorbar(colormap, ax=axs, aspect=30, ticks=ticks)
cbar.ax.set_title('Score')
ticks[0] = f'≤{ticks[0]}'
cbar.ax.set_yticklabels(ticks)
if portrait:
fig.text(0.01, 0.5, 'Iteration', ha='center', va='center',
rotation='vertical',
fontsize=14, fontweight='bold',)
fig.text(0.465, 0.9975, 'Model', ha='center', va='top',
fontsize=14, fontweight='bold',)
else:
fig.text(0.01, 0.5, 'Model', ha='center', va='center',
rotation='vertical',
fontsize=16, fontweight='bold')
fig.text(0.48, 0.01, 'Iteration', ha='center', va='center',
fontsize=16, fontweight='bold',)
plt.savefig(f'umap_fig_si_{"portrait" if portrait else "landscape"}.pdf')
plt.clf()
def add_top1k_panel(fig, gs, d_smi_score, d_smi_idx, fps_embedded):
true_top_1k = dict(sorted(d_smi_score.items(), key=itemgetter(1))[:1000])
true_top_1k_smis = set(true_top_1k.keys())
top_1k_idxs = [d_smi_idx[smi] for smi in true_top_1k_smis]
top_1k_fps_embedded = fps_embedded[top_1k_idxs, :]
ax = fig.add_subplot(gs[0:2, 0:2])
ax.scatter(top_1k_fps_embedded[:, 0], top_1k_fps_embedded[:, 1],
c='grey', marker='.')
add_ellipses(ax)
return fig, ax
def add_density_panel(fig, gs, ax1, fps_embedded):
ax2 = fig.add_subplot(gs[0:2, 2:])
_, _, _, im = ax2.hist2d(
x=fps_embedded[:, 0], y=fps_embedded[:, 1],
bins=NBINS, cmap='Purples_r'
)
ax2_cbar = plt.colorbar(im, ax=(ax1, ax2), aspect=20)
ax2_cbar.ax.set_title('Points')
ax2.set_yticks([])
add_ellipses(ax2, True)
return fig, ax2
def add_model_row(fig, gs, parent_dir, row, iters, model,
d_smi_idx, fps_embedded, zmin, zmax):
scores_csvs = [p_csv for p_csv in Path(parent_dir).iterdir()
if 'iter' in p_csv.stem]
scores_csvs = sorted(scores_csvs, key=lambda p: int(p.stem.split('_')[4]))
col = 0
axs = []
for j, new_points in enumerate(get_new_points_by_epoch(scores_csvs)):
if j not in iters:
continue
ax = fig.add_subplot(gs[row, col])
smis, scores = zip(*new_points.items())
idxs = [d_smi_idx[smi] for smi in smis]
ax.scatter(
fps_embedded[idxs, 0], fps_embedded[idxs, 1], alpha=0.75,
marker='.', c=scores, s=2, cmap='plasma', vmin=zmin, vmax=zmax
)
add_ellipses(ax)
if row==4:
ax.set_xlabel(j)
if col==0:
ax.set_ylabel(model)
ax.set_xticks([])
ax.set_yticks([])
axs.append(ax)
col+=1
return fig, axs
def main_fig(d_smi_score, d_smi_idx, fps_embedded, data_dirs,
models=None, iters=None):
models = ['RF', 'NN', 'MPN'] or models
iters = [0, 1, 3, 5] or iters[:4]
zmax = -min(d_smi_score.values())
zmin = -max(score for score in d_smi_score.values() if score < 0)
zmin = round((zmin+zmax)/2)
nrows = 2+len(data_dirs)
ncols = 4
fig = plt.figure(figsize=(2*ncols*1.15, 2*nrows), constrained_layout=True)
gs = fig.add_gridspec(nrows=nrows, ncols=4)
fig, ax1 = add_top1k_panel(fig, gs, d_smi_score, d_smi_idx, fps_embedded)
fig, ax2 = add_density_panel(fig, gs, ax1, fps_embedded)
axs = []
for i, (data_dir, model) in enumerate(zip(data_dirs, models)):
fig, axs_ = add_model_row(fig, gs, data_dir, i+2, iters, model,
d_smi_idx, fps_embedded, zmin, zmax)
axs.extend(axs_)
colormap = ScalarMappable(cmap='plasma')
colormap.set_clim(zmin, zmax)
ticks = list(range(zmin, round(zmax)))
cbar = plt.colorbar(colormap, ax=axs, aspect=30, ticks=ticks)
cbar.ax.set_title('Score')
ticks[0] = f'≤{ticks[0]}'
cbar.ax.set_yticklabels(ticks)
fig.text(-0.03, 1.03, 'A', transform=ax1.transAxes,
fontsize=16, fontweight='bold', va='center', ha='right')
fig.text(-0.0, 1.03, 'B', transform=ax2.transAxes,
fontsize=16, fontweight='bold', va='center', ha='left')
fig.text(-0.03, -0.075, 'C', transform=ax1.transAxes,
fontsize=16, fontweight='bold', va='center', ha='right')
fig.text(0.475, 0.005, 'Iteration', ha='center', va='center',
fontweight='bold')
plt.savefig(f'umap_fig_main_2.pdf')
plt.clf()
parser = argparse.ArgumentParser()
parser.add_argument('--scores-dict-pkl',
help='the filepath of a pickle file containing the scores dictionary')
parser.add_argument('--smis-csv',
help='the filepath of a csv file containing the SMILES string each molecule in the library in the 0th column')
parser.add_argument('--fps-embedded-npy',
help='a .npy file containing the 2D embedded fingerprint of each molecule in the library. Must be in the same order as smis-csv')
parser.add_argument('--data-dirs', nargs='+',
help='the directories containing molpal output data')
parser.add_argument('--models', nargs='+',
help='the respective name of each model used in --data-dirs')
parser.add_argument('--iters', nargs=4, type=int, default=[0, 1, 3, 5],
help='the FOUR iterations of points to show in the main figure')
parser.add_argument('--si-fig', action='store_true', default=False,
help='whether to produce generate the SI fig instead of the main fig')
parser.add_argument('--landscape', action='store_true', default=False,
help='whether to produce a landscape SI figure')
if __name__ == "__main__":
args = parser.parse_args()
d_smi_score = pickle.load(open(args.scores_dict_pkl, 'rb'))
with open(args.smis_csv, 'r') as fid:
reader = csv.reader(fid); next(reader)
smis = [row[0] for row in tqdm(reader)]
d_smi_idx = {smi: i for i, smi in enumerate(smis)}
fps_embedded = np.load(args.fps_embedded_npy)
if not args.si_fig:
main_fig(d_smi_score, d_smi_idx, fps_embedded,
args.data_dirs, args.models, args.iters)
else:
si_fig(d_smi_score, d_smi_idx, fps_embedded,
args.data_dirs, args.models, not args.landscape) | [
"deg711@g.harvard.edu"
] | deg711@g.harvard.edu |
adefe9268150dc891ce092e1159c8d1543200a6b | 444472d1a8b98634e147e58f6f4065ff3877a1df | /venv/Scripts/easy_install-3.8-script.py | 543d4979b80016a5d43bac641c96f9f4764c3d75 | [] | no_license | miyafung/Django | d45aad8cfb541d5365b7d9ee2a315274699b8ae6 | 4e2680c1b9fec9a7574d8f5a4228036e34e23536 | refs/heads/master | 2020-12-13T18:44:39.300348 | 2020-01-17T07:49:47 | 2020-01-17T07:49:47 | 234,498,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | #!D:\dev\devproj\Django\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"752875397@qq.com"
] | 752875397@qq.com |
c5028c3d3dafa9bccfb46704f6871bee52f2eea5 | 480ef178622c12adbadb37d204ee7afc7be5a8d3 | /python/51-60/Jump Game.py | 95417e832c3a1ce2cc94432ca3eac0d59a677948 | [] | no_license | talentlei/leetcode | 9e71154af31c1fe563f7ad8f64dbc442d1148108 | edf983bffe51ba7842446957ae9f34af6decb917 | refs/heads/master | 2021-01-19T07:43:16.483054 | 2015-07-07T15:20:09 | 2015-07-07T15:20:09 | 30,535,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | # @param A, a list of integers
# @return a boolean
def canJump(self, A):
size = len(A)
if not A:
return True
isReach = [0 for i in range(0,size)]
isReach[0] = 1
Max=0
for i in range(0,size):
if isReach[i]==1:
if i+A[i]>=size-1:
return True
elif i+A[i]>Max:
Max=i+A[i]
for j in range(i+1,Max+1):
isReach[j]=1
return False
| [
"chenlei_0630@163.com"
] | chenlei_0630@163.com |
1c8511d43344a1e4d70820bed6125f0579cc50c8 | c112831974be5aa036a74bbe1bf3798a4f9a5907 | /Python基础教程学习代码/venv/Scripts/pip3.7-script.py | 5cd47a7d356bee3623bf1d548ec06ba97a352e59 | [] | no_license | MuSaCN/PythonLearning_old1 | 5e1cb069d80cbe9527c179877b0d2026072c45c0 | c9aa0938875959526cf607344c1094a8fbf76400 | refs/heads/master | 2020-07-31T23:51:08.591550 | 2019-09-24T10:59:46 | 2019-09-24T10:59:46 | 210,792,257 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 443 | py | #!C:\Users\i2011\PycharmProjects\Python»ù´¡½Ì³Ìѧϰ\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"39754824+MuSaCN@users.noreply.github.com"
] | 39754824+MuSaCN@users.noreply.github.com |
b90bf26ca1b076c55a7d3afb71ebd56f3ea5d44d | d46517ab5b0ed71f419baf32354514a3b8d89a3c | /school/kids/migrations/0005_auto_20170922_1322.py | f62d6c55daedee5d8d4973a84a135e2185ec2c27 | [] | no_license | NagoorBhaskarReddy/Alamanac | 246a6a69f7c7e56d0f4fd8b2edd90c64ebf96c58 | c366b1a1900edf721647ccdd3b67cdf4f6fd13a0 | refs/heads/master | 2021-07-13T11:55:56.573785 | 2017-10-14T10:14:20 | 2017-10-14T10:14:20 | 106,835,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-22 13:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kids', '0004_parents_p_mobile'),
]
operations = [
migrations.AlterField(
model_name='parents',
name='p_mobile',
field=models.CharField(blank=True, max_length=12, null=True),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
e9d2c868d8e2649e31e4788d3c263f94d72f8372 | 54e452bf63bf97dcd05b3da55b8e8deef5f2aff5 | /build/rviz_pkg/catkin_generated/pkg.develspace.context.pc.py | d148fa6a0f891e69c976c9b2c0d54aa4197f6d71 | [] | no_license | Jonathanchan1996/ELEC4010k_project | 2795f500d66059d831af9dac7d561835e490e5a5 | 4d1f022273d415edf22765eef5b4740888b8d413 | refs/heads/master | 2020-09-23T06:19:37.217485 | 2019-12-06T15:56:46 | 2019-12-06T15:56:46 | 225,425,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rviz_pkg"
PROJECT_SPACE_DIR = "/home/jonathan/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"cljchanac@ust.hk"
] | cljchanac@ust.hk |
f4d21a59dd31bfb8ed2395253e932a01579c8f76 | bee2ca22668a07c76a846bbdc34a8c32e860d110 | /Speech/KWS/infer_longterm_audio_average_duration_ms.py | 7a25d4149fba82f08889e8f4d56575300cbce261 | [] | no_license | wavelet2008/demo | 5ddaf739f6ea315fabd181b899f7ae1ffb86528c | cddcbf6461ff77fd63b816a49492ae07c1758088 | refs/heads/master | 2023-01-14T12:39:50.532300 | 2020-11-02T09:59:35 | 2020-11-02T09:59:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,907 | py | import argparse
import pandas as pd
import pickle
import sys
import torch.nn.functional as F
from tqdm import tqdm
sys.path.insert(0, '/home/huanyuan/code/demo/Speech/KWS')
from impl.pred_pyimpl import kws_load_model, dataset_add_noise, model_predict
from dataset.kws.dataset_helper import *
from utils.train_tools import *
def longterm_audio_predict(cfg, net, audio_idx, audio_file, audio_mode, audio_label, audio_label_idx, timeshift_ms, average_window_duration_ms):
# init
input_dir = os.path.join(cfg.general.data_dir, '../dataset_{}_{}'.format(cfg.general.version, cfg.general.date), 'dataset_audio', audio_mode)
input_dir = os.path.join(input_dir, audio_label)
num_classes = cfg.dataset.label.num_classes
sample_rate = cfg.dataset.sample_rate
clip_duration_ms = cfg.dataset.clip_duration_ms
desired_samples = int(sample_rate * clip_duration_ms / 1000)
# load data
data, filename = load_preload_audio(audio_file, audio_idx, audio_label, audio_label_idx, input_dir)
# # debug
# librosa.output.write_wav(os.path.join("/home/huanyuan/model/model_10_30_25_21/model/kws_xiaoyu_res15_10272020/testing/", filename.split('.')[0] + '.wav'), data, sr=sample_rate)
# alignment data
data = np.pad(data, (0, max(0, desired_samples - len(data))), "constant")
# add noise for _silence_ label
if audio_label == SILENCE_LABEL:
data = dataset_add_noise(cfg, data, bool_silence_label=True)
# calculate the average score across all the results
data_list = []
if len(data) > desired_samples:
timeshift_samples = int(sample_rate * timeshift_ms / 1000)
data_number = 1 + (len(data) - desired_samples) // timeshift_samples
for data_idx in range(data_number):
data_list.append(data[timeshift_samples * data_idx: timeshift_samples * data_idx + desired_samples])
else:
data_list.append(data)
score_list = []
for data_idx in range(len(data_list)):
# # debug
# librosa.output.write_wav(os.path.join("/home/huanyuan/model/model_10_30_25_21/model/kws_xiaoyu_res15_10272020/testing/", str(data_idx) + '.wav'),
# data_list[data_idx], sr=sample_rate)
score = model_predict(cfg, net, data_list[data_idx])
score_list.append(score[0])
average_window_length = 1 + average_window_duration_ms // timeshift_ms
if len(score_list) > average_window_length:
windows_number = 1 + len(score_list) - average_window_length
# Calculate the average score across all the results in the window.
average_scores_list = []
for windows_idx in range(windows_number):
score_list_window = score_list[windows_idx: windows_idx + average_window_length]
average_scores = np.zeros(num_classes)
for score in score_list_window:
for idx in range(num_classes):
average_scores[idx] += score[idx] / len(score_list_window)
average_scores_list.append(average_scores)
# Sort the averaged results.
average_scores_list = sorted(average_scores_list, key=lambda p: p[audio_label_idx])
average_scores = average_scores_list[0]
else:
average_scores = np.zeros(num_classes)
for score in score_list:
for idx in range(num_classes):
average_scores[idx] += score[idx] / len(score_list)
pred = np.argmax(average_scores)
return pred, average_scores
def predict(config_file, epoch, mode, augmentation_on, timeshift_ms, average_window_duration_ms):
# load configuration file
cfg = load_cfg_file(config_file)
# init
num_classes = cfg.dataset.label.num_classes
# load prediction model
model = kws_load_model(cfg.general.save_dir, int(cfg.general.gpu_ids), epoch)
net = model['prediction']['net']
net.eval()
# load label index
label_index = load_label_index(cfg.dataset.label.positive_label)
# load data
data_pd = pd.read_csv(cfg.general.data_csv_path)
data_pd_mode = data_pd[data_pd['mode'] == mode]
data_file_list = data_pd_mode['file'].tolist()
data_mode_list = data_pd_mode['mode'].tolist()
data_label_list = data_pd_mode['label'].tolist()
results_list = []
preds = []
labels = []
for audio_idx in tqdm(range(len(data_file_list))):
results_dict = {}
results_dict['file'] = data_file_list[audio_idx]
results_dict['mode'] = data_mode_list[audio_idx]
results_dict['label'] = data_label_list[audio_idx]
results_dict['label_idx'] = label_index[results_dict['label']]
assert results_dict['mode'] == mode, "[ERROR:] Something wronge about mode, please check"
# # debug
# if results_dict['file'] != "/home/huanyuan/data/speech/kws/xiaoyu_dataset_03022018/XiaoYuDataset_10272020/xiaoyu/7276078M1_唤醒词_小鱼小鱼_女_中青年_是_0192.wav":
# continue
pred, score = longterm_audio_predict(cfg, net, audio_idx, results_dict['file'], results_dict['mode'], results_dict['label'], results_dict['label_idx'], timeshift_ms, average_window_duration_ms)
preds.append(pred)
labels.append(results_dict['label_idx'])
results_dict['result_idx'] = pred
for classe_idx in range(num_classes):
results_dict['prob_{}'.format(classe_idx)] = score[classe_idx]
results_list.append(results_dict)
# caltulate accuracy
accuracy = float((np.array(preds) == np.array(labels)).astype(int).sum()) / float(len(labels))
msg = 'epoch: {}, batch: {}, {}_accuracy: {:.4f}'.format(model['prediction']['epoch'], model['prediction']['batch'], mode, accuracy)
print(msg)
# out csv
csv_data_pd = pd.DataFrame(results_list)
csv_data_pd.to_csv(os.path.join(cfg.general.save_dir, 'infer_longterm_average_{}_augmentation_{}.csv'.format(mode, augmentation_on)), index=False, encoding="utf_8_sig")
return accuracy
def main():
"""
使用模型对音频文件进行测试,配置为 --input 中的 config 文件,当存在音频文件长度大于模型送入的音频文件长度时(1s\2s), 该脚本会通过滑窗的方式测试每一小段音频数据,计算连续 500ms(17帧) 音频的平均值结果,
在得到的平均结果中对应label最小值作为最终结果
该过程近似测试流程,可以作为参考
"""
# default_mode = "training"
# default_mode = "testing,validation,training"
# default_mode = "testing,validation"
default_mode = "validation"
default_model_epoch = -1
default_augmentation_on = False
default_timeshift_ms = 30
default_average_window_duration_ms = 500
parser = argparse.ArgumentParser(description='Streamax KWS Infering Engine')
# parser.add_argument('--input', type=str, default="/home/huanyuan/code/demo/Speech/KWS/config/kws/kws_config.py", help='config file')
# parser.add_argument('--input', type=str, default="/home/huanyuan/code/demo/Speech/KWS/config/kws/kws_config_xiaoyu.py", help='config file')
parser.add_argument('--input', type=str, default="/home/huanyuan/code/demo/Speech/KWS/config/kws/kws_config_xiaoyu_2.py", help='config file')
parser.add_argument('--mode', type=str, default=default_mode)
parser.add_argument('--epoch', type=str, default=default_model_epoch)
parser.add_argument('--augmentation_on', type=bool, default=default_augmentation_on)
parser.add_argument('--timeshift_ms', type=int, default=default_timeshift_ms)
parser.add_argument('--average_window_duration_ms', type=int, default=default_average_window_duration_ms)
args = parser.parse_args()
mode_list = args.mode.strip().split(',')
for mode_type in mode_list:
predict(args.input, args.epoch, mode_type, args.augmentation_on, args.timeshift_ms, args.average_window_duration_ms)
if __name__ == "__main__":
main()
| [
"392940398@email.com"
] | 392940398@email.com |
7b2bcc78848034f7c8f4bb73d61272b0c48efda0 | c544b2b7df185242cc09feebd0f23f91b8df4f3c | /migrations/versions/7cec85baef67_users_table.py | 9145008fbf9f0332179141b766db25c6817328bc | [] | no_license | myoshibe/looker_bu_training | 7c5fe62913b8565f610338a68ac10ff0fbef692a | fc8fb5ea74b471f6994bc84604d5c21b50978268 | refs/heads/master | 2022-06-01T10:32:21.904466 | 2020-05-01T09:28:17 | 2020-05-01T09:28:17 | 260,401,309 | 0 | 0 | null | 2020-05-01T07:18:42 | 2020-05-01T07:18:42 | null | UTF-8 | Python | false | false | 1,644 | py | """users table
Revision ID: 7cec85baef67
Revises:
Create Date: 2020-04-30 16:04:04.681302
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7cec85baef67'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.create_table('table',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('string_example', sa.String(length=128), nullable=True),
sa.Column('boolean_example', sa.Boolean(), nullable=True),
sa.Column('integer_example', sa.Integer(), nullable=True),
sa.Column('json_example', sa.JSON(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('table')
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
| [
"sam.pitcher@looker.com"
] | sam.pitcher@looker.com |
6e2e961b1230e6ea174f6fcd4a88f93d5c5a148b | da3875acfa9de630fbe6b9ed88800dd4a365626b | /book_code/ch03.7-pong_v1.py | e137a4e87bc834136dfb866c1d168ac526ee15de | [] | no_license | hjc1985/python | 96f9839a60a86ae07174a18bdb0062ceb242021b | f87aca3359257f8a1a3cbe2d2dae3630ad69c6e2 | refs/heads/master | 2023-06-16T07:37:38.926966 | 2021-03-20T02:44:34 | 2021-03-20T02:44:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,619 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 17 02:02:28 2019
@author: hwdon
"""
import pygame, sys
from pygame.locals import *
WIDTH = 600 # 窗口宽度
HEIGHT = 400 # 窗口高度
BALL_RADIUS = 15 #球的半径
ball_pos = [0,0] #球的位置
ball_vel = [0,0] #球的速度
PAD_WIDTH = 8 #挡板宽
PAD_HEIGHT = 80 #挡板高
HALF_PAD_WIDTH = PAD_WIDTH//2
HALF_PAD_HEIGHT = PAD_HEIGHT//2
paddle1_pos = [0,0]
paddle2_pos = [0,0]
paddle1_vel = 0 #左paddle速度(上下移动的速度)
paddle2_vel = 0
score1 = 0 #左paddle得分
score2 = 0 #右paddle得分
#常用颜色 (R,G,B) (红黄蓝)
WHITE = (255,255,255)
RED = (255,0,0)
GREEN = (0,255,0)
BLACK = (0,0,0)
import random
# 数据:圆的参数
circle_pos = (0,0)
circle_radius = 0
circle_color = (0,0,0)
circle_color = (255,255,255)
#初始化游戏窗口
def init_window():
# 1. 初始化
pygame.init() #初始化 pygame
#设置窗口的模式,(680,480)表示窗口像素,及(宽度,高度)
#此函数返回一个用于绘制的Surface对象(相当于一块画布)
surface = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Game Engine') #设置窗口标题
return surface
def init_scene():
#初始化左右paddle(挡板)的属性
global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel
global score1, score2
paddle1_pos = [HALF_PAD_WIDTH, HEIGHT // 2]
paddle2_pos = [WIDTH - 1 - HALF_PAD_WIDTH, HEIGHT // 2]
#paddle1_vel = [0, 0]
#paddle2_vel = [0, 0]
paddle1_vel = 0
paddle2_vel = 0
score1 = 0
score2 = 0
#初始化球的属性
global ball_pos, ball_vel
ball_pos = [WIDTH / 2, HEIGHT / 2]
horizontal = random.randrange(2,4) #随机生成的水平速度
vertical = random.randrange(1,3) #随机生成的垂直速度
if random.random()>0.5: #随机的向左向右
horizontal= -horizontal
if random.random()>0.5: #随机的向上向下
vertical= -vertical
ball_vel = [horizontal,-vertical]
def ball_init():
global ball_pos, ball_vel # these are vectors stored as lists
ball_pos = [WIDTH / 2, HEIGHT / 2]
horizontal = random.randrange(2,4)
vertical = random.randrange(1,3)
#表示向右
if random.random()>0.5:
horizontal= -horizontal
if random.random()>0.5:
vertical= -vertical
ball_vel = [horizontal,-vertical]
CICLE_RADIUS = 70 #背景中的中心元半径
def draw(surface):
global paddle1_pos, paddle2_pos, ball_pos, ball_vel, score1, score2
#绘制画面背景
surface.fill(BLACK) #背景颜色为黑色
pygame.draw.line(surface, WHITE, [WIDTH // 2, 0],[WIDTH // 2, HEIGHT], 1)
pygame.draw.line(surface, WHITE, [PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1)
pygame.draw.line(surface, WHITE, [WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1)
pygame.draw.circle(surface, WHITE, [WIDTH//2, HEIGHT//2], CICLE_RADIUS, 1)
#绘制挡板 paddles和球 ball
pygame.draw.circle(surface, WHITE, (int(ball_pos[0]),int(ball_pos[1])), BALL_RADIUS, 0)
pygame.draw.rect(surface, GREEN, (int(paddle1_pos[0]) - HALF_PAD_WIDTH, int(paddle1_pos[1]) - HALF_PAD_HEIGHT,
PAD_WIDTH,PAD_HEIGHT))
pygame.draw.rect(surface, GREEN, (int(paddle2_pos[0]) - HALF_PAD_WIDTH, int(paddle2_pos[1]) - HALF_PAD_HEIGHT,
PAD_WIDTH,PAD_HEIGHT))
#绘制得分 scores
drawText(surface,"Score1: "+str(score1),(50,20))
drawText(surface,"Score2: "+str(score2), (470, 20))
pygame.display.flip() #刷新画面
# 辅助函数:绘制文本。参数:文本、位置、字体名和字体大小,
def drawText(surface,text,pos=(1,1),color = RED,font_name="Comic Sans MS",font_size=20):
myfont = pygame.font.SysFont(font_name,font_size)
text_image = myfont.render(text,1,color)
surface.blit(text_image, pos)
#1. 游戏初始化
def init():
surface = init_window()
init_scene()
return surface
# 键盘按下事件处理函数:更新挡板的垂直速度
def keydown(event):
global paddle1_vel, paddle2_vel
if event.key == K_w:
paddle1_vel = -8
elif event.key == K_s:
paddle1_vel = 8
elif event.key == K_UP:
paddle2_vel = -8
elif event.key == K_DOWN:
paddle2_vel = 8
#键盘弹起事件处理函数:挡板速度重置为0
def keyup(event):
global paddle1_vel, paddle2_vel
if event.key in (K_w, K_s):
paddle1_vel = 0
elif event.key in (K_UP, K_DOWN):
paddle2_vel = 0
#2.1 处理(键盘、鼠标等)事件
def processEvent():
for event in pygame.event.get(): #返回当前的所有事件
if event.type == pygame.QUIT: #接收到窗口关闭事件
return False #退出游戏
elif event.type == KEYDOWN:
keydown(event)
elif event.type == KEYUP:
keyup(event)
return True
# 2.2 更新游戏的数据
def update():
global ball_pos, ball_vel # these are vectors stored as lists
global score1, score2
# 更新球
ball_pos[0] += int(ball_vel[0])
ball_pos[1] += int(ball_vel[1])
#上下墙碰撞,水平速度不变,垂直速度相反
if ball_pos[1] < BALL_RADIUS or ball_pos[1] > HEIGHT - 1 - BALL_RADIUS:
ball_vel[0] = ball_vel[0]
ball_vel[1] = -ball_vel[1]
# 检测挡板是否和球碰撞
if ball_pos[0] < BALL_RADIUS + PAD_WIDTH:
if ball_pos[1] <= paddle1_pos[1] + HALF_PAD_HEIGHT and \
ball_pos[1] >= paddle1_pos[1] - HALF_PAD_HEIGHT:
ball_vel[0] = -(ball_vel[0] * 1.1) #挡板击中球
else: #挡板没挡住球,对方得分
ball_init()
score2 += 1
elif ball_pos[0] > WIDTH - 1 - BALL_RADIUS - PAD_WIDTH:
if ball_pos[1] <= paddle2_pos[1] + HALF_PAD_HEIGHT and ball_pos[1] >= paddle2_pos[1] - HALF_PAD_HEIGHT:
ball_vel[0] = -(ball_vel[0] * 1.1)
else:
ball_init()
score1 += 1
# 更新挡板的垂直位置
if paddle1_pos[1] + paddle1_vel > HALF_PAD_HEIGHT and paddle1_pos[1] + paddle1_vel < HEIGHT - 1 - HALF_PAD_HEIGHT:
paddle1_pos[1] += paddle1_vel
if paddle2_pos[1] + paddle2_vel > HALF_PAD_HEIGHT and paddle2_pos[1] + paddle2_vel < HEIGHT - 1 - HALF_PAD_HEIGHT:
paddle2_pos[1] += paddle2_vel
# ------游戏主函数-----
def game_engine():
surface = init() #1. 初始化pygame和游戏数据
#2. 循环,直到游戏结束
running = True
while running == True:
running = processEvent() #2.1 处理事件
update() #2.2 更新数据
draw(surface) #2.3绘制场景
pygame.quit() #3.退出程序
if __name__ == "__main__":
game_engine() | [
"noreply@github.com"
] | noreply@github.com |
53090ee9cbe0dcd8ee7ea9c5b92c13ba621fd579 | 6583be6a07cec50f5bc41edf87c828dd3a939539 | /index.py | b9066869f81f1c2ed18c1324aba7bfcad1f881e3 | [] | no_license | Goutham88/hackathon | 64c0009a925a7968bde5bde696fc36a35c6665ec | ecad7c22e54634300b8df9dd54e152cb0e9f9d34 | refs/heads/master | 2023-04-11T16:04:37.665299 | 2021-05-09T15:17:45 | 2021-05-09T15:17:45 | 292,315,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,932 | py | import validictory
from flask import request, Flask, json
from flask_restful import Api
from base_controller import BaseController
from common_validation import SCHEMA
from leaderboard_handler import LeaderBoardHandler
from submission_handler import SubmissionHandler
app = Flask(__name__)
app.secret_key = "secret key"
if __name__ == "__main__":
app.run(debug=True)
api = Api(app)
class LeaderBoardController(BaseController):
"""
LeaderBoard API
"""
def get(self):
"""
GET of leaderboard API - takes hackathon id and returns all the scores for that hackathon
"""
try:
hackathon_id = request.args.get("hackathon_id")
leaderboard_data = LeaderBoardHandler().get_leaderboard(hackathon_id)
response = app.response_class(
response=json.dumps({"leaderboard_data": leaderboard_data}),
status=200,
mimetype='application/json'
)
return response
except Exception as ex:
raise ex
api.add_resource(LeaderBoardController, '/leaderboard/', endpoint="leaderboard")
class SubmissionController(BaseController):
"""
Submission API
"""
def post(self):
"""
POST of submission API - takes code, hackathon_id, group_id and returns score and passed testcases
"""
try:
request_data = request.get_json()
validictory.validate(request_data, SCHEMA["submission_post_schema"])
files = request_data.get("files")
hackathon_id = request_data.get("hackathon_id")
group_id = request_data.get("group_id")
testcase_result = SubmissionHandler().run_testcases(files, hackathon_id, group_id)
return testcase_result
except Exception as ex:
raise ex
api.add_resource(SubmissionController, '/submission/', endpoint="submission")
| [
"gouthamchunduru8@gmail.com"
] | gouthamchunduru8@gmail.com |
6711e7e7197a8a524223bc5c9d0bc7b748d3b9e3 | e8fbe8a5a95da62214b9214c84f314312e066e0b | /tract_median_age.py | 5c40eadcc7c7b2af35cb62e56516c12064aa5bba | [
"CC0-1.0"
] | permissive | Kibrael/hmda-viz-processing | 81327a612f45f5c9db5bb1629993193cf9840214 | fb6ddafd645533efc286d21f9724d8dd1e64054e | refs/heads/master | 2020-12-25T10:42:08.893093 | 2015-09-18T13:29:21 | 2015-09-18T13:29:21 | 41,053,638 | 0 | 0 | null | 2015-08-20T21:28:41 | 2015-08-19T18:52:49 | HTML | UTF-8 | Python | false | false | 2,190 | py | import json
import requests
import psycopg2
import psycopg2.extras
import os
def write_JSON(name, data, path): #writes a json object to file
with open(path+name, 'w') as outfile:
json.dump(data, outfile, indent=4, ensure_ascii = False)
def connect():
#parameter format for local use
params = {
'dbname':'hmdamaster',
'user':'roellk',
'password':'',
'host':'localhost',
}
try:
conn = psycopg2.connect(**params)
print "i'm connected"
except psycopg2.Error as e: #if database connection results in an error print the following
print "I am unable to connect to the database: ", e
return conn.cursor(cursor_factory=psycopg2.extras.DictCursor) #return a dictionary cursor object
def get_age(state, county, tract):
try:
try:
with open('/Users/roellk/Documents/api_key.txt', 'r') as f:
key = f.read()
api_key = key.strip("'")
field = 'B25035_001E'
except:
print "Error loading API key from file"
#documentation on ACS 5 year is here: http://www.census.gov/data/developers/data-sets/acs-survey-5-year-data.html
#the 2013 A&D reports use the ACS 2010 API
r = requests.get('http://api.census.gov/data/2010/acs5?get=NAME,'+field+'&for=tract:'+tract+'&in=state:'+state+'+county:'+county+'&key='+api_key)
median_list = r.text
return_list = median_list.split(',')
return return_list[8]
except:
print "Unable to connect to Census API"
def median_tract_age(cur):
tract_string = "SELECT DISTINCT(tract) FROM tract_to_cbsa_2010"
cur.execute(tract_string,)
tract_age_list = cur.fetchall()
tract_ages = {}
if len(tract_age_list) > 0:
for i in range (0, len(tract_age_list)):
state = tract_age_list[i][0][0:2]
county = tract_age_list[i][0][2:5]
tract = tract_age_list[i][0][5:]
age = get_age(state,county,tract)
print tract_age_list[i][0], age
#print age.strip('"')
try:
if age is not None:
tract_ages[tract_age_list[i][0]] = age.strip('"')
except:
write_JSON("tract_housing_ages.json", tract_ages, '/Users/roellk/Desktop/HMDA/data/')#/Users/roellk/Desktop/HMDA
write_JSON("tract_housing_ages.json", tract_ages, '/Users/roellk/Desktop/HMDA/data/')#/Users/roellk/Desktop/HMDA
median_tract_age(connect())
| [
"breeroell@gmail.com"
] | breeroell@gmail.com |
8635ec9f9c9cebffa3074b4bd378ae1c25bf01b4 | 62f11818dce1c681c0a324ef4b91b5d0fe70ce01 | /final_network/lstm/test.py | a9bfdf537bae01ec1f764cfdb42569cfbe6b9910 | [] | no_license | OniDaito/MRes | 427ae0c15911b9e592a3bbf680ee9b7f45dbaeb7 | 79a60d33f368310b6f51166d1cb92b42b44dc3a7 | refs/heads/master | 2021-10-16T06:26:49.314964 | 2019-02-08T16:17:48 | 2019-02-08T16:17:48 | 105,301,799 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,991 | py | """
test.py - report the error on this net
author : Benjamin Blundell
email : me@benjamin.computer
"""
import os, sys, math, pickle
import numpy as np
import tensorflow as tf
# Import common items
if __name__ != "__main__":
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
from common.util_neural import *
from common import acids
from common import batcher
def adist(a,b):
a = a + 180
b = b + 180
a = a - b
a = (a + 180) % 360 - 180
return a
def predict(FLAGS, sess, graph, bt):
# Find the appropriate tensors we need to fill in
goutput = graph.get_tensor_by_name("output:0")
ginput = graph.get_tensor_by_name("train_input:0")
gmask = graph.get_tensor_by_name("dmask:0")
gprob = graph.get_tensor_by_name("keepprob:0")
gtest = graph.get_tensor_by_name("train_test:0")
# Grab validation data
(vbatch_in, vbatch_out, loop_v) = bt.random_batch(batcher.SetType.VALIDATE)
mask = bt.create_mask(vbatch_in)
# Run session
res = None
if FLAGS.type_out == batcher.BatchTypeOut.CAT:
gpred = graph.get_tensor_by_name("prediction:0")
res = sess.run([gpred], feed_dict={ginput: vbatch_in, gmask: mask, gprob: 1.0})
else:
res = sess.run([goutput], feed_dict={ginput: vbatch_in, gmask: mask, gprob: 1.0})
# Now lets output a random example and see how close it is, as well as working out the
# the difference in mean values. Don't adjust the weights though
import random
r = random.randint(0, len(vbatch_in)-1)
residues = loop_v[r]._residues[:]
if FLAGS.type_out == batcher.BatchTypeOut.SINCOS:
print("Actual Predicted")
diff_psi = 0
diff_phi = 0
for i in range(0,len(loop_v[r]._residues)):
# TODO data representation is now shared between acids and batcher :/
if FLAGS.type_in == batcher.BatchTypeIn.FIVED:
sys.stdout.write(acids.amino_to_label(acids.vector_to_acid(vbatch_in[r][i])))
else:
sys.stdout.write(acids.amino_to_label(acids.bitmask_to_acid(vbatch_in[r][i])))
phi0 = 0
psi0 = 0
phi0 = math.degrees(math.atan2(vbatch_out[r][i][0], vbatch_out[r][i][1]))
psi0 = math.degrees(math.atan2(vbatch_out[r][i][2], vbatch_out[r][i][3]))
sys.stdout.write(": " + "{0:<8}".format("{0:.3f}".format(phi0)) + " ")
sys.stdout.write("{0:<8}".format("{0:.3f}".format(psi0)) + " ")
phi1 = 0
psi1 = 0
phi1 = math.degrees(math.atan2(res[0][r][i][0], res[0][r][i][1]))
psi1 = math.degrees(math.atan2(res[0][r][i][2], res[0][r][i][3]))
residues[i]._phi = phi1
residues[i]._psi = psi1
residues[i]._omega = math.pi
sys.stdout.write(" | " + "{0:<8}".format("{0:.3f}".format(phi1)) + " ")
sys.stdout.write("{0:<8}".format("{0:.3f}".format(psi1)))
diff_psi += math.fabs(adist(psi0,psi1))
diff_phi += math.fabs(adist(phi0,phi1))
print("")
else:
print("Actual Predicted")
diff_psi = 0
diff_phi = 0
for i in range(0,len(loop_v[r]._residues)):
# TODO data representation is now shared between acids and batcher :/
if FLAGS.type_in == batcher.BatchTypeIn.FIVED:
sys.stdout.write(acids.amino_to_label(acids.vector_to_acid(vbatch_in[r][i])))
else:
sys.stdout.write(acids.amino_to_label(acids.bitmask_to_acid(vbatch_in[r][i])))
(phi0, psi0 )= batcher.cat_to_angles(vbatch_out[r][i])
phi0 = math.degrees(phi0)
psi0 = math.degrees(psi0)
sys.stdout.write(": " + "{0:<8}".format("{0:.3f}".format(phi0)) + " ")
sys.stdout.write("{0:<8}".format("{0:.3f}".format(psi0)) + " ")
sys.stdout.write("{0:<8}".format("{0:.0f}".format(batcher.get_cat(vbatch_out[r][i]))) + " ")
(phi1, psi1 )= batcher.cat_to_angles(res[0][r][i])
phi1 = math.degrees(phi1)
psi1 = math.degrees(psi1)
residues[i]._phi = phi1
residues[i]._psi = psi1
residues[i]._omega = math.pi
sys.stdout.write(" | " + "{0:<8}".format("{0:.3f}".format(phi1)) + " ")
sys.stdout.write("{0:<8}".format("{0:.3f}".format(psi1)))
sys.stdout.write("{0:<8}".format("{0}".format(batcher.get_cat(res[0][r][i]))) + " ")
diff_psi += math.fabs(adist(psi0,psi1))
diff_phi += math.fabs(adist(phi0,phi1))
print("")
cnt = len(loop_v[r]._residues)
print( "Diff in Phi/Psi", diff_phi / cnt, diff_psi / cnt)
def test(FLAGS, bt, end_refine):
''' Run the network on a random validation example to get a feel for
the error function. '''
with tf.Session() as sess:
graph = sess.graph
saver = tf.train.import_meta_graph(FLAGS.save_path + FLAGS.save_name + '.meta')
saver.restore(sess, FLAGS.save_path + FLAGS.save_name)
predict(FLAGS, sess, graph, bt)
| [
"oni@section9.co.uk"
] | oni@section9.co.uk |
36c940953c72705242756e59246b3844d5304fdb | 3f0b20e8ae160d60a21db91cd5fd9ad1300a7037 | /scripts/plot_mem | 2bc0080c4c73e00512603dc0ab819cafe2522020 | [] | no_license | edman/json-compressor | 28aa1b32d11a0b3f547b140315b28a820abfe7b4 | 4bd6931659c3ea7178741ab78427407af6d47c6e | refs/heads/master | 2022-12-20T21:40:43.271695 | 2016-06-19T10:35:43 | 2016-06-19T10:35:43 | 35,529,808 | 1 | 1 | null | 2022-08-15T21:45:24 | 2015-05-13T05:23:04 | C++ | UTF-8 | Python | false | false | 1,822 | #!/usr/bin/env python3
def fixM(a):
return [x / 10**6 for x in a]
import plotly as py
import plotly.graph_objs as go
import pandas as pd
# Read massif data from csv format
summary_file = 'test/inputs/mem_summary.csv'
df = pd.read_csv(summary_file, skipinitialspace=True)
# print(df.head())
x_axis = df['#input']
# Create traces
originalTrace = go.Bar(x=x_axis,
y=fixM(df['#original']), # Data
name='Original file') # Additional options
bpTrace = go.Bar(x=x_axis,
y=fixM(df['#cjson_bp']),
marker={'color': '#FFCA28'},
name='Cjson (BP)')
dfTrace = go.Bar(x=x_axis,
y=fixM(df['#cjson_df']),
name='Cjson (DFUDS)')
pointerBpTrace = go.Bar(x=x_axis,
y=fixM(df['#pointer_bp']),
marker={'color': '#00897B'},
name='Cjson (pointer, BP)')
pointerDfTrace = go.Bar(x=x_axis,
y=fixM(df['#pointer_df']),
name='Cjson (pointer, DFUDS)')
rapidTrace = go.Bar(x=x_axis,
y=fixM(df['#rapid']),
marker={'color': '#303F9F'},
name='RapidJson')
layout = go.Layout(
barmode='group',
autosize=False,
# width=500, height=500,
xaxis={'tickangle': -45},
# xaxis={'title'='SNLI input file'}
# range=[.95, 10.05],
# tick0=1, dtick=1),
yaxis={'title': 'RAM usage (MB)', 'dtick': 50}
)
# Assemble traces that will be ploted
# data = [rapidTrace, bpTrace, dfTrace, pointerBpTrace, pointerDfTrace]
data = [rapidTrace, bpTrace, pointerBpTrace]
# Make figure from data and layout
fig = go.Figure(data=data, layout=layout)
# Generate the plot
py.offline.plot(fig, auto_open=True)
| [
"edmanjos@gmail.com"
] | edmanjos@gmail.com | |
24eb14140b8574a9f8976bab2a89f98bf49ad109 | 3186db1413e39be886fa0067e102b2addd73f4d8 | /FP/OperatiiAritmeticeInBaze/bussines.py | aed6f8f49ef21c5ebcb6ec474adde329e75466bc | [] | no_license | elenamaria0703/MyProjects | 516624425396814b37bfce249d4989aaabbc43a0 | ed8c94a30c1ff9250a7d4ff2f1321b2bb598fdc6 | refs/heads/master | 2021-03-02T05:14:20.427516 | 2020-06-16T14:07:55 | 2020-06-16T14:07:55 | 245,840,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,006 | py | '''Proiect LC Elena Maria'''
class ServiceOperatii(object):
def __init__(self, __conversii):
self.__conversii = __conversii
def _Conversie(self,numar,bazaNr,baza):
'''functia indica ce forma de conversie trbuie folosita
in functie de baza numarului si baza destinatie'''
baze = [4,8,16]
if int(bazaNr) == 2 and int(baza) in baze:
Numar = self.__conversii.conversiiRapide(numar,baza)
elif baza == 10:
Numar = self.__conversii.conversiiSubstitutie(numar,int(bazaNr))
elif bazaNr == 10:
Numar = self.__conversii.conversiiImpartiriRep(numar,baza)
else:
Numar = self.__conversii.conversiiBazaInter(numar,bazaNr,baza)
return Numar
def Scadere(self,numar1,baza1,numar2,baza2,baza):
'''scade doua numere initial in doua baze diferite
mai intai se aduc numerele in aceeasi baaza specificata'''
rezultat = []
Numar1 = self._Conversie(numar1, baza1, baza)
Numar2 = self._Conversie(numar2, baza2, baza)
rezultat.append(Numar1)
rezultat.append(Numar2)
if baza != 16:
Numar1 = int(Numar1)
Numar2 = int(Numar2)
Numar = 0
p = 1
tr = 0
while Numar1 != 0 and Numar2 != 0:
cif1 = Numar1 %10
cif2 = Numar2 %10
Numar1 = Numar1 //10
Numar2 = Numar2 //10
if cif1 + tr >= cif2:
cif = cif1 + tr - cif2
tr = 0
else:
cif = cif1 - tr - cif2 + baza
tr = 1
Numar = cif*p + Numar
p = p*10
while Numar1 != 0:
cif1 = Numar1%10
if tr !=0 :
cif = cif1 - tr
tr = 0
else:
cif = cif1
Numar = cif*p + Numar
p=p*10
Numar1 = Numar1 //10
else:
baza16 = {"A":10,"B":11,"C":12,"D":13,"E":14,"F":15}
tr = 0
Numar = ''
while Numar1 != '' and Numar2 != '':
cif1 = Numar1[len(Numar1)-1:]
Numar1 = Numar1[:len(Numar1)-1]
if cif1 in baza16:
cif1 = int(baza16[cif1])
else:
cif1 = int(cif1)
cif2 = Numar2[len(Numar2)-1:]
Numar2 = Numar2[:len(Numar2)-1]
if cif2 in baza16:
cif2 = int(baza16[cif2])
else:
cif2 = int(cif2)
if cif1 + tr >= cif2:
cif = cif1 + tr - cif2
tr = 0
else:
cif = cif1 - tr - cif2 + baza
tr = 1
aux = ''
for key in baza16:
if cif == baza16[key]:
aux = key
if aux != '':
Numar = aux + Numar
else:
Numar = str(cif) + Numar
while Numar1 != '':
cif1 = Numar1[len(Numar1)-1:]
Numar1 = Numar1[:len(Numar1)-1]
if cif1 in baza16:
cif1 = int(baza16[cif1])
else:
cif1 = int(cif1)
if tr !=0 :
cif = cif1 - tr
tr = 0
else:
cif = cif1
aux = ''
for key in baza16:
if cif == baza16[key]:
aux = key
if aux != '':
Numar = aux + Numar
else:
Numar = str(cif) + Numar
rezultat.append(Numar)
return rezultat
def Adunare(self,numar1,baza1,numar2,baza2,baza):
'''aduna doua numere initial in doua baze diferite
mai intai se aduc numerele in aceeasi baaza specificata'''
rezultat = []
Numar1 = self._Conversie(numar1, baza1, baza)
Numar2 = self._Conversie(numar2, baza2, baza)
rezultat.append(Numar1)
rezultat.append(Numar2)
if baza != 16:
Numar1 = int(Numar1)
Numar2 = int(Numar2)
cat = 0
Numar = 0
p = 1
while Numar1 != 0 and Numar2 != 0:
cif1 = Numar1 %10
cif2 = Numar2 %10
Numar1 = Numar1 //10
Numar2 = Numar2 //10
sum = cif1 + cif2 + cat
rez = self.__conversii.impartire(sum,baza)
cat = rez[0]
rest = rez[1]
Numar = rest*p + Numar
p = p*10
while Numar1 != 0:
cif = Numar1%10
sum = cif + cat
rez = self.__conversii.impartire(sum,baza)
Numar = rez[1]* p + Numar
cat = rez[0]
p=p*10
Numar1 = Numar1 //10
while Numar2 != 0:
cif = Numar2%10
sum = cif + cat
rez = self.__conversii.impartire(sum,baza)
Numar = rez[1]* p + Numar
cat = rez[0]
p = p*10
Numar2 = Numar2 //10
if cat != 0:
Numar = cat* p + Numar
else:
baza16 = {"A":10,"B":11,"C":12,"D":13,"E":14,"F":15}
cat = 0
Numar = ''
while Numar1 != '' and Numar2 != '':
cif1 = Numar1[len(Numar1)-1:]
Numar1 = Numar1[:len(Numar1)-1]
if cif1 in baza16:
cif1 = int(baza16[cif1])
else:
cif1 = int(cif1)
cif2 = Numar2[len(Numar2)-1:]
Numar2 = Numar2[:len(Numar2)-1]
if cif2 in baza16:
cif2 = int(baza16[cif2])
else:
cif2 = int(cif2)
sum = cif1 + cif2 + cat
rez = self.__conversii.impartire(sum,baza)
cat = rez[0]
rest = rez[1]
aux = ''
for key in baza16:
if rest == baza16[key]:
aux = key
if aux != '':
Numar = aux + Numar
else:
Numar = str(rest) + Numar
while Numar1 != '':
cif1 = Numar1[len(Numar1)-1:]
Numar1 = Numar1[:len(Numar1)-1]
if cif1 in baza16:
cif1 = int(baza16[cif1])
else:
cif1 = int(cif1)
sum = cif1 + cat
rez = self.__conversii.impartire(sum,baza)
cat = rez[0]
rest = rez[1]
aux = ''
for key in baza16:
if rest == baza16[key]:
aux = key
if aux != '':
Numar = aux + Numar
else:
Numar = str(rest) + Numar
while Numar2 != '':
cif2 = Numar2[len(Numar2)-1:]
Numar2 = Numar2[:len(Numar2)-1]
if cif2 in baza16:
cif2 = int(baza16[cif2])
else:
cif2 = int(cif2)
sum = cif2 + cat
rez = self.__conversii.impartire(sum,baza)
cat = rez[0]
rest = rez[1]
aux = ''
for key in baza16:
if rest == baza16[key]:
aux = key
if aux != '':
Numar = aux + Numar
else:
Numar = str(rest) + Numar
if cat != 0:
aux = ''
for key in baza16:
if rest == baza16[key]:
aux = key
if aux != '':
Numar = aux + Numar
else:
Numar = str(rest) + Numar
rezultat.append(Numar)
return rezultat
def Impartire(self,numar,baza,cifra):
'''efectueaza impartirea unui numar intr-o baza oarecare la o cifra'''
listaCaturi = []
rest = 0
Numar = 0
if int(baza)!=16:
while numar != '':
cif = int(numar[0])
numar = numar[1:]
nr = rest*int(baza)+cif
rez = self.__conversii.impartire(nr,int(cifra))
rest = rez[1]
listaCaturi.append(rez)
for l in listaCaturi:
Numar = Numar*10 +l[0]
rest = listaCaturi[len(listaCaturi)-1][1]
else:
baza16 = {"A":10,"B":11,"C":12,"D":13,"E":14,"F":15}
while numar != '':
cif = numar[0]
numar = numar[1:]
if cif in baza16:
cif = int(baza16[cif])
else:
cif = int(cif)
nr = rest*int(baza)+cif
rez = self.__conversii.impartire(nr,int(cifra))
rest = rez[1]
listaCaturi.append(rez)
Numar = ''
for l in listaCaturi:
aux = ''
for key in baza16:
if l[0] == baza16[key]:
aux = key
if aux != '':
Numar = Numar + aux
else:
Numar = Numar + str(l[0])
for i in Numar:
if int(i) != 0:
break
else:
Numar = Numar[1:]
rest = listaCaturi[len(listaCaturi)-1][1]
return Numar,rest
def Inmultire(self,numar,baza,cifra):
'''efectueaza inmultirea unui numar intr-o baza oarecare la o cifra'''
Numar = 0
puteri10 = 1
cat = 0
if int(baza)!=16:
while numar != '':
cif = int(numar[len(numar)-1:])
numar = numar[:len(numar)-1]
inmul = cif*int(cifra) + cat
rez = self.__conversii.impartire(inmul,int(baza))
cat = rez[0]
rest = rez[1]
Numar = rest * puteri10 + Numar
puteri10 = puteri10*10
if cat != 0:
Numar = cat * puteri10 + Numar
else:
baza16 = {"A":10,"B":11,"C":12,"D":13,"E":14,"F":15}
if cifra in baza16:
cifra = baza16[cifra]
Numar = ''
while numar != '':
cif = numar[len(numar)-1:]
numar = numar[:len(numar)-1]
if cif in baza16:
cif = int(baza16[cif])
else:
cif = int(cif)
inmul = cif*int(cifra) + cat
rez = self.__conversii.impartire(inmul,int(baza))
cat = rez[0]
rest = rez[1]
for key in baza16:
if baza16[key] == rest:
rest = key
Numar = str(rest) + Numar
if cat != 0:
Numar = str(cat)+Numar
return Numar
class Conversii():
def conversiiRapide(self,numar,baza):
''' ajuta la conversii intre bazele puteri ale lui 2'''
Baza16 = {"0000":'0',"0001":'1',"0010":'2',"0011":'3',"0100":'4',"0101":'5',"0110":'6',"0111":'7',"1000":'8',"1001":'9',"1010":'A',"1011":'B',"1100":'C',"1101":'D',"1110":'E',"1111":'F'}
Baza8 = {"000":0,"001":1,"010":2,"011":3,"100":4,"101":5,"110":6,"111":7}
Baza4 = {"00":0,"01":1,"10":2,"11":3}
if baza == 4:
p = 1
Numar = 0
while numar != '':
cif = numar[len(numar)-2:]
numar = numar[:len(numar)-2]
Numar = Baza4[cif]*p + Numar
p = p *10
return Numar
elif baza == 8:
p = 1
Numar = 0
while numar != '':
cif = numar[len(numar)-3:]
numar = numar[:len(numar)-3]
Numar = Baza8[cif]*p + Numar
p = p *10
return Numar
elif baza == 16:
Numar = ''
while numar != '':
cif = numar[len(numar)-4:]
numar = numar[:len(numar)-4]
Numar = Baza16[cif] + Numar
return Numar
def conversiiImpartiriRep(self,numar,baza):
'''impartim succesiv la baza in care se opereaza
numarul este format din resturile impartirilor in ordine inversa '''
if baza != 16:
Numar = 0
p = 1
rez = self.impartire(int(numar),baza)
Numar = rez[1]*p +Numar
cat = rez[0]
p = p*10
while cat != 0:
rez = self.impartire(cat,baza)
Numar = rez[1]*p +Numar
cat = rez[0]
p = p*10
return Numar
else:
Numar = ''
baza16 = {"A":10,"B":11,"C":12,"D":13,"E":14,"F":15}
rest = ''
rez = self.impartire(int(numar),baza)
for key in baza16:
if baza16[key] == rez[1]:
rest = key
if rest != '':
Numar = str(rest) + Numar
else:
Numar = str(rez[1]) + Numar
cat = rez[0]
while cat != 0:
rest = ''
rez = self.impartire(cat,baza)
for key in baza16:
if baza16[key] == rez[1]:
rest = key
if rest != '':
Numar = str(rest) + Numar
else:
Numar = str(rez[1]) + Numar
cat = rez[0]
return Numar
def conversiiSubstitutie(self,numar,baza):
'''ajuta la trecera dintr-o baza mai mica intr-una mai mare
mai exact dintr-o baza diferita de 10 in baza 10 '''
if baza != 16:
Numar = 0
pow = 0
numar = int(numar)
while numar != 0:
cif = numar%10
Numar = Numar + self.power(baza,pow)*cif
pow += 1
numar = numar//10
return Numar
else:
baza16 = {"A":10,"B":11,"C":12,"D":13,"E":14,"F":15}
Numar = 0
pow = 0
while numar != '':
cif = numar[len(numar)-1:]
numar = numar[:len(numar)-1]
if cif in baza16:
cif = int(baza16[cif])
else:
cif = int(cif)
Numar = Numar + self.power(baza,pow)*cif
pow += 1
return Numar
def conversiiBazaInter(self,numar,bazaNr,baza):
''' folosim baza intermediara 10
din baza initiala trecem numarul in baza 10
prin impartiri succesive la baza in care se opereaza'''
Numar = self.conversiiSubstitutie(numar,int(bazaNr))
return self.conversiiImpartiriRep(Numar, baza)
def impartire(self,nr,baza):
'''impartirea uzuaala a doua numere in baza 10'''
cat = nr//baza
rest = nr - cat*baza
return cat,rest
def power(self,numar,putere):
'''ridica un nr la o putere'''
if putere == 0:
return 1
if putere == 1:
return numar
mij = putere//2
pow = self.power(numar,mij)
if putere%2 == 0:
return pow*pow
else:
return pow*pow*numar | [
"elenamaria0703@users.noreply.github.com"
] | elenamaria0703@users.noreply.github.com |
afbe0a36ff1f83a9c1ebd24646a5d41fef49fe65 | 73c05ee0cbc54dd77177b964f3a72867138a1f0f | /interview/CyC2018_Interview-Notebook/剑指offer/41_2.py | 6ab655dbf45728e3718151f3849c4ee7f6f8943e | [] | no_license | tb1over/datastruct_and_algorithms | 8be573953ca1cdcc2c768a7d9d93afa94cb417ae | 2b1c69f28ede16c5b8f2233db359fa4adeaf5021 | refs/heads/master | 2020-04-16T12:32:43.367617 | 2018-11-18T06:52:08 | 2018-11-18T06:52:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # -*- coding: utf-8 -*-
"""题目描述
请实现一个函数用来找出字符流中第一个只出现一次的字符。例如,当从字符流中只读出前两个字符"go"时,第一个只出现一次的字符是"g"。当从该字符流中读出前六个字符“google"时,第一个只出现一次的字符是"l"。
"""
class Solution:
# 返回对应char
def FirstAppearingOnce(self):
# write code here
def Insert(self, char):
# write code here
| [
"mitree@sina.com"
] | mitree@sina.com |
bea40a355ced6f6a99da070907536ee7aaae0dd6 | 68ae67d7076dd914c6ea04804fb9455f387f8c66 | /users/urls.py | 8552ec9ee650c25691ef745643cc2446e38ad6f1 | [] | no_license | prabhumarappan/bare-comment-system | f96d84e1b9a0f267b2406efa3d6b36fa85f6118e | 9c31068a4ceaed60e3d2cd17d7e22b573786d73f | refs/heads/master | 2021-01-25T12:07:50.767184 | 2018-03-01T16:17:13 | 2018-03-01T16:17:13 | 123,455,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | from django.conf.urls import url
from users import views
urlpatterns = [
url(r'^$', views.UserHome.as_view(), name='userhome'),
url(r'^login/$', views.SignInView.as_view(), name='signin'),
url(r'^posts/$', views.UserPosts.as_view(), name='users_posts'),
url(r'^comments/$', views.UserComments.as_view(), name='users_comments'),
url(r'^signup/$', views.SignUpView.as_view(), name='signup')
] | [
"prabhu@dozee.io"
] | prabhu@dozee.io |
dbe6a7ff336e2612bcf19649381dabd0fb64385a | 2521d80e163140303bac669fb44955fe4ee27eb3 | /learn-sun/nushio3/03-learn-one-image.py | c0acb338056d28ccde5ca55b0873affacbfed81f | [] | no_license | space-weather-KU/chainer-semi | b71ae6fc2d1c8e8b8806ab285d8bdfcb2ff12b2a | ac482bea8304168307d70ce6a2fe0157e2062399 | refs/heads/master | 2020-12-24T07:53:52.521897 | 2017-03-04T17:25:14 | 2017-03-04T17:25:14 | 73,357,685 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,813 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
import json, urllib, numpy as np, matplotlib.pylab as plt, matplotlib.ticker as mtick
import sunpy.map
from astropy.io import fits
from sunpy.cm import color_tables as ct
import sunpy.wcs as wcs
import datetime
import matplotlib.dates as mdates
import matplotlib.colors as mcol
import matplotlib.patches as ptc
from matplotlib.dates import *
import math
import scipy.ndimage.interpolation as interpolation
import chainer
from chainer import datasets
from chainer import links as L
from chainer import functions as F
from chainer import Variable, optimizers
image_size = 1023
image_wavelength = 1600
def get_sun_image(time, wavelength = image_wavelength):
try:
time_str = time.strftime("%Y.%m.%d_%H:%M:%S")
url = "http://jsoc.stanford.edu/cgi-bin/ajax/jsoc_info?ds=aia.lev1[{}_TAI/12s][?WAVELNTH={}?]&op=rs_list&key=T_REC,CROTA2,CDELT1,CDELT2,CRPIX1,CRPIX2,CRVAL1,CRVAL2&seg=image_lev1".format(time_str, wavelength)
response = urllib.urlopen(url)
data = json.loads(response.read())
filename = data['segments'][0]['values'][0]
url = "http://jsoc.stanford.edu"+filename
chromosphere_image = fits.open(url) # download the data
T_REC = data['keywords'][0]['values'][0]
CROTA2_AIA = float(data['keywords'][1]['values'][0])
CDELT1_AIA = float(data['keywords'][2]['values'][0])
CDELT2_AIA = float(data['keywords'][3]['values'][0])
CRPIX1_AIA = float(data['keywords'][4]['values'][0])
CRPIX2_AIA = float(data['keywords'][5]['values'][0])
CRVAL1_AIA = float(data['keywords'][6]['values'][0])
CRVAL2_AIA = float(data['keywords'][7]['values'][0])
chromosphere_image.verify("fix")
exptime = chromosphere_image[1].header['EXPTIME']
original_width = chromosphere_image[1].data.shape[0]
return interpolation.zoom(chromosphere_image[1].data, image_size / float(original_width)) / exptime
except Exception as e:
print e.message
return None
def get_normalized_image_variable(time, wavelength = image_wavelength):
img = get_sun_image(time, wavelength)
img = img[np.newaxis, np.newaxis, :, :]
img = img.astype(np.float32)
x = Variable(img)
return F.sigmoid(x / 100)
def plot_sun_image(img, filename, wavelength=image_wavelength, title = '', vmin=0.5, vmax = 1.0):
cmap = plt.get_cmap('sdoaia{}'.format(wavelength))
plt.title(title)
plt.imshow(img,cmap=cmap,origin='lower',vmin=vmin, vmax=vmax)
plt.savefig(filename)
plt.close("all")
# convolution層を6層に増やした予報モデルです。
class SunPredictor(chainer.Chain):
def __init__(self):
super(SunPredictor, self).__init__(
# the size of the inputs to each layer will be inferred
c1=L.Convolution2D(None, 2, 3,stride=2),
c2=L.Convolution2D(None, 4, 3,stride=2),
c3=L.Convolution2D(None, 8, 3,stride=2),
c4=L.Convolution2D(None, 16, 3,stride=2),
c5=L.Convolution2D(None, 32, 3,stride=2),
c6=L.Convolution2D(None, 64, 3,stride=2),
d6=L.Deconvolution2D(None, 32, 3,stride=2),
d5=L.Deconvolution2D(None, 16, 3,stride=2),
d4=L.Deconvolution2D(None, 8, 3,stride=2),
d3=L.Deconvolution2D(None, 4, 3,stride=2),
d2=L.Deconvolution2D(None, 2, 3,stride=2),
d1=L.Deconvolution2D(None, 1, 3,stride=2)
)
def __call__(self, x):
def f(x) :
return F.relu(x)
h = x
h = f(self.c1(h))
h = f(self.c2(h))
h = f(self.c3(h))
h = f(self.c4(h))
h = f(self.c5(h))
h = f(self.c6(h))
h = f(self.d6(h))
h = f(self.d5(h))
h = f(self.d4(h))
h = f(self.d3(h))
h = f(self.d2(h))
h = F.sigmoid(self.d1(h))
return h
model = SunPredictor()
opt = chainer.optimizers.Adam()
opt.use_cleargrads()
opt.setup(model)
t = datetime.datetime(2014,5,25,19,00,00)
dt = datetime.timedelta(hours = 24)
# 1つの画像対にかんして、ひたすら訓練を繰り返します。
img_input = get_normalized_image_variable(t)
plot_sun_image(img_input.data[0,0], "image-input.png", title = 'before')
img_observed = get_normalized_image_variable(t+dt)
plot_sun_image(img_observed.data[0,0], "image-future-observed.png", title = 'after')
epoch = 0
while True:
img_predicted = model(img_input)
if epoch%25 ==0:
plot_sun_image(img_predicted.data[0,0], "image-future-predicted.png", title = '{}th epoch'.format(epoch))
loss = F.sqrt(F.sum((img_predicted - img_observed)**2))
model.cleargrads()
loss.backward()
opt.update()
epoch+=1
| [
"muranushi@gmail.com"
] | muranushi@gmail.com |
59abb941173fc174e7c1871a202d8b4af137e040 | 68ee9027d4f780e1e5248a661ccf08427ff8d106 | /extra/unused/qgisRasterColorscale.py | 1ced8315150eba200810e34a79bad3ffd8fa1c6c | [
"MIT"
] | permissive | whyjz/CARST | 87fb9a6a62d39fd742bb140bddcb95a2c15a144c | 4fc48374f159e197fa5a9dbf8a867b0a8e0aad3b | refs/heads/master | 2023-05-26T20:27:38.105623 | 2023-04-16T06:34:44 | 2023-04-16T06:34:44 | 58,771,687 | 17 | 4 | MIT | 2021-03-10T01:26:04 | 2016-05-13T20:54:42 | Python | UTF-8 | Python | false | false | 1,429 | py | #!/usr/bin/python
# qgisRasterColorscale.py
# Author: Andrew Kenneth Melkonian
# All rights reserved
def qgisRasterColorscale(qgs_path, qml_path):
assert os.path.exists(qgs_path), "\n***** ERROR: " + qgs_path + " does not exist\n";
assert os.path.exists(qml_path), "\n***** ERROR: " + qml_path + " does not exist\n";
raster_renderer = "";
infile = open(qml_path);
for line in infile:
raster_renderer += line;
infile.close();
import re;
raster_renderer = raster_renderer[re.search("\s*<raster",raster_renderer).start(0) : re.search("</rasterrenderer>",raster_renderer).end(0)];
raster_section = False;
outfile = open("temp", "w");
infile = open(qgs_path, "r");
for line in infile:
if line.find("<rasterrenderer") > -1:
raster_section = True;
outfile.write(raster_renderer + "\n");
elif line.find("</rasterrenderer") > -1:
raster_section = False;
elif raster_section == False:
outfile.write(line);
outfile.close();
infile.close();
return;
if __name__ == "__main__":
import os;
import sys;
assert len(sys.argv) > 2, "\n***** ERROR: qgisRasterColorscale.py requires 2 arguments, " + str(len(sys.argv) - 1) + " given\n";
assert os.path.exists(sys.argv[1]), "\n***** ERROR: " + sys.argv[1] + " does not exist\n";
assert os.path.exists(sys.argv[2]), "\n***** ERROR: " + sys.argv[2] + " does not exist\n";
qgisRasterColorscale(sys.argv[1], sys.argv[2]);
exit();
| [
"wz278@cornell.edu"
] | wz278@cornell.edu |
06dc48c81124fea793ef637fde3fec4caa144662 | 8f5ee885986e9a0ec8816c32a9ad2966fb747f7d | /src/aido_schemas/estimation_demo.py | eb7931f53cb1b8c34e57b5ea740582f2ec41d9cf | [] | no_license | duckietown/aido-protocols | 3cca7564738d645785a5cc242bb39fd53936af0a | 47b551d80151a76aba05f76a13e516f9fa06749c | refs/heads/daffy | 2023-04-13T08:57:28.079004 | 2022-11-29T13:18:35 | 2022-11-29T13:18:35 | 169,989,925 | 1 | 1 | null | 2021-10-31T22:48:30 | 2019-02-10T15:00:05 | Python | UTF-8 | Python | false | false | 523 | py | from .basics import InteractionProtocol
__all__ = ["protocol_simple_predictor"]
protocol_simple_predictor = InteractionProtocol(
description="""
An estimator receives a stream of values and must predict the next value.
""".strip(),
inputs={"observations": float, "seed": int, "get_prediction": type(None)},
outputs={"prediction": float},
language="""
in:seed? ;
(in:observations |
(in:get_prediction ; out:prediction)
)*
""",
)
| [
"acensi@ethz.ch"
] | acensi@ethz.ch |
d7546ddca3ea3c4970e80b72cc378b3eb2bc535b | 663d807308b64c52c6c2ad93bda5d462540a36d3 | /mylib.py | 94073e5a1b7705cc571c43b409ace4b47b05a81c | [] | no_license | alex-bulgakov/pftp | 95e6ee7aa62589bc61975123b58b42dde20ce2e9 | 58b9e12aab4b290f4a19a527747e25a57d9d427c | refs/heads/master | 2023-04-25T02:07:39.904759 | 2021-05-05T05:27:09 | 2021-05-05T05:27:09 | 362,436,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | def get_ls(ftp_handle, name):
result = {}
if (name != ''):
ftp_handle.cwd(name);
for i in ftp_handle.mlsd():
if (i[1]['type'] == 'dir'):
result[i[0]] = True
else:
result[i[0]] = False
return result
def print_list(list):
for i in list:
print(i) | [
"master8423@gmail.com"
] | master8423@gmail.com |
7dfccc7d6a73e02339111529b2e6ddc4f91d759c | 4f856a87be2ca95330416d8a1d461a03b8590674 | /Experiments/vstca_vstnca_2.py | 00eb35a3d2d296e279283817f25cce20f6121112 | [] | no_license | oscarcorreag/PhD-code | ea71f3b7cdbd0e42f9f0a141790f73b1bfdd13bb | 2a1a9bb22f5cd0332f6cf8491be9fa801966e89a | refs/heads/master | 2021-06-26T12:47:20.497517 | 2020-11-04T12:03:56 | 2020-11-04T12:03:56 | 143,695,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,469 | py | import csv
import getopt
import sys
import time
import numpy as np
import math
from mpi4py import MPI
from grid_digraph_generator import GridDigraphGenerator
from link_performance import bpr
from utils import distribute_pois_in_queries
from vst_rs import VST_RS
MASTER_RANK = 0
def print_usage():
print ('usage is: vstca_vstnca_2.py -m <parallelisation_method> where:')
print (' <parallelisation_method> can be: [pp|mpi|n]')
print (' pp: Parallel Python')
print (' mpi: MPI')
print (' n: No parallelization')
def main(argv):
p_method = "pp"
try:
opts, args = getopt.getopt(argv, "hm:")
except getopt.GetoptError as error:
print(error)
print_usage()
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print_usage()
sys.exit(0)
elif opt == "-m":
p_method = arg
break
comm = None
rank = MASTER_RANK
if p_method == "mpi":
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank != MASTER_RANK:
while True:
res = comm.recv(source=MASTER_RANK)
print res
num_samples = 5
num_queries = [16, 32]
num_users_query = [16]
prop_pois_users = 0.1
m = n = 30
N = m * n
graph = GridDigraphGenerator().generate(m, n, edge_weighted=True)
merge_users = False
max_iter = 50
alpha = 1.0
beta = 4.0
results = []
for nq in num_queries:
for nu in num_users_query:
num_pois = max(int(prop_pois_users * nu), 1)
graph.capacitated = True
capacity = int(math.ceil((nu / 4.0 * nq) / 12.0))
graph.set_capacities({e: capacity for e in graph.get_edges()})
print "(nq, nu, np, cap):", (nq, nu, num_pois, capacity)
for sample in range(num_samples):
print "\tsample:", sample
ppq = distribute_pois_in_queries((m, n), nq, num_pois, seed=0)
queries_u = []
queries_z = []
#
all_pois = []
for ps in ppq.values():
all_pois.extend(ps)
free_nodes = set(range(m * n)).difference(all_pois)
#
occupied_t = set()
occupied_p = set()
for i, pois_z in ppq.iteritems():
np.random.seed(sample * i)
#
where_t = set(free_nodes).difference(occupied_t)
terminals = np.random.choice(a=list(where_t), size=nu, replace=False)
queries_z.append((terminals, pois_z))
occupied_t.update(terminals)
occupied_p.update(terminals)
#
where_p = set(range(m * n)).difference(occupied_p)
pois_u = np.random.choice(a=list(where_p), size=num_pois, replace=False)
queries_u.append((terminals, pois_u))
occupied_p.update(pois_u)
#
# VST-NCA **********************************************************************************************
# POIs Zipfian distributed.
vst_rs = VST_RS(graph)
st = time.clock()
_, c, warl, mwrl, mrl1, mrl2, entropy = \
vst_rs.non_congestion_aware(queries_z, 4, 8, bpr, merge_users=merge_users, alpha=alpha, beta=beta,
p_method=p_method, verbose=False)
et = time.clock() - st
line = ["VST-NCA", "N/A", "zipfian", N, capacity, merge_users, sample, nq, nu,
prop_pois_users, num_pois, c, warl, mwrl, mrl1, mrl2, 0, et, alpha, beta, entropy]
print line
results.append(line)
# POIs Uniformly distributed.
vst_rs = VST_RS(graph)
st = time.clock()
_, c, warl, mwrl, mrl1, mrl2, entropy = \
vst_rs.non_congestion_aware(queries_u, 4, 8, bpr, merge_users=merge_users, alpha=alpha, beta=beta,
p_method=p_method, verbose=False)
et = time.clock() - st
line = ["VST-NCA", "N/A", "uniform", N, capacity, merge_users, sample, nq, nu,
prop_pois_users, num_pois, c, warl, mwrl, mrl1, mrl2, 0, et, alpha, beta, entropy]
print line
results.append(line)
# VST-NCA **********************************************************************************************
# VST-CA ***********************************************************************************************
# MIXED
# POIs Zipfian distributed.
vst_rs = VST_RS(graph)
st = time.clock()
_, c, warl, mwrl, mrl1, mrl2, entropy, ni = \
vst_rs.congestion_aware(queries_z, 4, 8, bpr, merge_users=merge_users, max_iter=max_iter,
alpha=alpha, beta=beta, verbose=False, randomize=True, p_method=p_method)
et = time.clock() - st
ni_ = str(ni)
if ni == max_iter:
ni_ += "(*)"
line = ["VST-CA", "mixed", "zipfian", N, capacity, merge_users, sample, nq, nu,
prop_pois_users, num_pois, c, warl, mwrl, mrl1, mrl2, ni_, et, alpha, beta, entropy]
print line
results.append(line)
# POIs Uniformly distributed.
vst_rs = VST_RS(graph)
st = time.clock()
_, c, warl, mwrl, mrl1, mrl2, entropy, ni = \
vst_rs.congestion_aware(queries_u, 4, 8, bpr, merge_users=merge_users, max_iter=max_iter,
alpha=alpha, beta=beta, verbose=False, randomize=True, p_method=p_method)
et = time.clock() - st
ni_ = str(ni)
if ni == max_iter:
ni_ += "(*)"
line = ["VST-CA", "mixed", "uniform", N, capacity, merge_users, sample, nq, nu,
prop_pois_users, num_pois, c, warl, mwrl, mrl1, mrl2, ni_, et, alpha, beta, entropy]
print line
results.append(line)
# PURE
# POIs Zipfian distributed.
vst_rs = VST_RS(graph)
st = time.clock()
_, c, warl, mwrl, mrl1, mrl2, entropy, ni = \
vst_rs.congestion_aware(queries_z, 4, 8, bpr, merge_users=merge_users, max_iter=max_iter,
alpha=alpha, beta=beta, verbose=False, randomize=False, p_method=p_method)
et = time.clock() - st
ni_ = str(ni)
if ni == max_iter:
ni_ += "(*)"
line = ["VST-CA", "pure", "zipfian", N, capacity, merge_users, sample, nq, nu,
prop_pois_users, num_pois, c, warl, mwrl, mrl1, mrl2, ni_, et, alpha, beta, entropy]
print line
results.append(line)
# POIs Uniformly distributed.
vst_rs = VST_RS(graph)
st = time.clock()
_, c, warl, mwrl, mrl1, mrl2, entropy, ni = \
vst_rs.congestion_aware(queries_u, 4, 8, bpr, merge_users=merge_users, max_iter=max_iter,
alpha=alpha, beta=beta, verbose=False, randomize=False, p_method=p_method)
et = time.clock() - st
ni_ = str(ni)
if ni == max_iter:
ni_ += "(*)"
line = ["VST-CA", "pure", "uniform", N, capacity, merge_users, sample, nq, nu,
prop_pois_users, num_pois, c, warl, mwrl, mrl1, mrl2, ni_, et, alpha, beta, entropy]
print line
results.append(line)
# VST-CA ***********************************************************************************************
result_file = open("files/vstca_vstnca_2_" + time.strftime("%d%b%Y_%H%M%S") + ".csv", 'wb')
wr = csv.writer(result_file)
wr.writerows(results)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"oscarcorreag@gmail.com"
] | oscarcorreag@gmail.com |
55115fcc0c3e7a1a0da991f74b56743011deafc8 | 79d6bf8f380a758285fc06fc7bc390bd30f8349b | /build/urg_node-indigo-devel/catkin_generated/pkg.installspace.context.pc.py | 15b28753234a568c1b4129fdae64f1ea02aad184 | [] | no_license | 18744012771/jrc_agv_ws | 77b1b4fbe9733c6826ddc2e1fd1c9b98a70b4aa8 | 8feb380f3b57869b56706c557f85849572968d48 | refs/heads/master | 2020-04-23T06:12:15.130615 | 2019-01-03T07:01:49 | 2019-01-03T07:01:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/brucechen/nav_ws/install/include".split(';') if "/home/brucechen/nav_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure;laser_proc;message_runtime;nodelet;rosconsole;roscpp;sensor_msgs;std_msgs;std_srvs;urg_c".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lurg_c_wrapper;-lurg_node_driver".split(';') if "-lurg_c_wrapper;-lurg_node_driver" != "" else []
PROJECT_NAME = "urg_node"
PROJECT_SPACE_DIR = "/home/brucechen/nav_ws/install"
PROJECT_VERSION = "0.1.11"
| [
"cbbsjtu@126.com"
] | cbbsjtu@126.com |
c4ac861f2ee0b8e2fc382f3d37a11fd699b479ca | a1bffcd8854e1843e56bb812d4d83b3161a5211e | /plugins/lookup/cyberarkpassword.py | 79e855c22d4b5573ba40e8c231017a3b2e10e868 | [] | no_license | goneri/ansible.community | 1a71f9d98c164b77f8ed2ed7f558b4963005ff8f | f26f612dd0a3154050d90b51a75502018c95f6e4 | refs/heads/master | 2020-12-29T07:47:35.353515 | 2020-01-22T17:43:18 | 2020-01-22T17:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,196 | py | # (c) 2017, Edward Nunez <edward.nunez@cyberark.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
lookup: cyberarkpassword
short_description: get secrets from CyberArk AIM
requirements:
- CyberArk AIM tool installed
description:
- Get secrets from CyberArk AIM.
options :
_command:
description: Cyberark CLI utility.
env:
- name: AIM_CLIPASSWORDSDK_CMD
default: '/opt/CARKaim/sdk/clipasswordsdk'
appid:
description: Defines the unique ID of the application that is issuing the password request.
required: True
query:
description: Describes the filter criteria for the password retrieval.
required: True
output:
description:
- Specifies the desired output fields separated by commas.
- "They could be: Password, PassProps.<property>, PasswordChangeInProcess"
default: 'password'
_extra:
description: for extra_parms values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide"
note:
- For Ansible on windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe
'''
EXAMPLES = """
- name: passing options to the lookup
debug: msg={{ lookup("cyberarkpassword", cyquery)}}
vars:
cyquery:
appid: "app_ansible"
query: "safe=CyberArk_Passwords;folder=root;object=AdminPass"
output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess"
- name: used in a loop
debug: msg={{item}}
with_cyberarkpassword:
appid: 'app_ansible'
query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass'
output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess'
"""
RETURN = """
password:
description:
- The actual value stored
passprops:
description: properties assigned to the entry
type: dictionary
passwordchangeinprocess:
description: did the password change?
"""
import os
import subprocess
from subprocess import PIPE
from subprocess import Popen
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.parsing.splitter import parse_kv
from ansible_collections.ansible.community.plugins.module_utils._text import to_bytes, to_text, to_native
from ansible.utils.display import Display
display = Display()
CLIPASSWORDSDK_CMD = os.getenv('AIM_CLIPASSWORDSDK_CMD', '/opt/CARKaim/sdk/clipasswordsdk')
class CyberarkPassword:
def __init__(self, appid=None, query=None, output=None, **kwargs):
self.appid = appid
self.query = query
self.output = output
# Support for Generic parameters to be able to specify
# FailRequestOnPasswordChange, Queryformat, Reason, etc.
self.extra_parms = []
for key, value in kwargs.items():
self.extra_parms.append('-p')
self.extra_parms.append("%s=%s" % (key, value))
if self.appid is None:
raise AnsibleError("CyberArk Error: No Application ID specified")
if self.query is None:
raise AnsibleError("CyberArk Error: No Vault query specified")
if self.output is None:
# If no output is specified, return at least the password
self.output = "password"
else:
# To avoid reference issues/confusion to values, all
# output 'keys' will be in lowercase.
self.output = self.output.lower()
self.b_delimiter = b"@#@" # Known delimiter to split output results
def get(self):
result_dict = {}
try:
all_parms = [
CLIPASSWORDSDK_CMD,
'GetPassword',
'-p', 'AppDescs.AppID=%s' % self.appid,
'-p', 'Query=%s' % self.query,
'-o', self.output,
'-d', self.b_delimiter]
all_parms.extend(self.extra_parms)
b_credential = b""
b_all_params = [to_bytes(v) for v in all_parms]
tmp_output, tmp_error = Popen(b_all_params, stdout=PIPE, stderr=PIPE, stdin=PIPE).communicate()
if tmp_output:
b_credential = to_bytes(tmp_output)
if tmp_error:
raise AnsibleError("ERROR => %s " % (tmp_error))
if b_credential and b_credential.endswith(b'\n'):
b_credential = b_credential[:-1]
output_names = self.output.split(",")
output_values = b_credential.split(self.b_delimiter)
for i in range(len(output_names)):
if output_names[i].startswith("passprops."):
if "passprops" not in result_dict:
result_dict["passprops"] = {}
output_prop_name = output_names[i][10:]
result_dict["passprops"][output_prop_name] = to_native(output_values[i])
else:
result_dict[output_names[i]] = to_native(output_values[i])
except subprocess.CalledProcessError as e:
raise AnsibleError(e.output)
except OSError as e:
raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror))
return [result_dict]
class LookupModule(LookupBase):
"""
USAGE:
"""
def run(self, terms, variables=None, **kwargs):
display.vvvv("%s" % terms)
if isinstance(terms, list):
return_values = []
for term in terms:
display.vvvv("Term: %s" % term)
cyberark_conn = CyberarkPassword(**term)
return_values.append(cyberark_conn.get())
return return_values
else:
cyberark_conn = CyberarkPassword(**terms)
result = cyberark_conn.get()
return result
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
8da4424e9f6b851a48964db0046f00d0ae6d4274 | 36c1a54444e8dfa1808700b7b6df48cd83785b63 | /forms.py | 14ad66b65b43367ee089640093424e77fc098e64 | [] | no_license | agrawalkaran/ASSIGNMENT-SUBMISSION | eba82634d265bfe4d99303ce35dfc8a5efe42325 | 36e8e7ac271dc8d773488229b9363a5395b8b472 | refs/heads/main | 2023-03-22T03:35:25.539215 | 2021-03-07T12:16:40 | 2021-03-07T12:16:40 | 345,307,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,150 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField,RadioField,SelectField,TextAreaField,TextField
from wtforms.validators import DataRequired, Length, Email, EqualTo,ValidationError
from wtforms.fields.html5 import DateField
from wtforms import validators
from flask import Flask, render_template,request,flash,session,url_for,redirect,session
class RegistrationForm(FlaskForm):
name = StringField("Fullname",validators=[validators.DataRequired("Please enter your Full name."),validators.Regexp(regex="[a-zA-Z]",message="Fullname Should Only Contain Letters")])
email = StringField('Email',validators=[DataRequired("Please enter your Email."), Email()])
password = PasswordField('Password', validators=[DataRequired("Please enter your Password.")])
confirm_password = PasswordField('Confirm Password',validators=[DataRequired("Please enter your Confirm Password."), EqualTo('password')])
Enrollment = StringField('Enrollment No',validators=[DataRequired("Please enter your Enrollment Number."),validators.Regexp(regex='[0-9]{11}',message="Enrollment Should Only Contain Eleven Numbers")])
Gender = RadioField('Gender', choices = [('M','Male'),('F','Female')])
birth = DateField("Date Of Birth", format='%Y-%m-%d', validators=[DataRequired(message="Please Select the Date Of Birth")],)
contact = StringField('Mobile Number',validators=[DataRequired("Please enter your Mobile Number."),validators.Regexp(regex='(((\+){1}91){1})? ?-?[0-9]{10}',message="Please Enter Valid Mobile Number")])
semester=SelectField('Your Semester:', coerce=int,choices=[(0, 'Please Select...'), (1, '1'),(2, '2'),(3, '3'),(4, '4'),(5, '5'),(6, '6'),(7, '7'),(8, '8')],validators=[DataRequired("Please enter your Semester.")])
city=SelectField('Your City:', choices=[('0', 'Please Select...'), ('Ahmedabad','Ahmedabad')],validators=[DataRequired("Please enter your City.")])
state=SelectField('Your State:', choices=[('0', 'Please Select...'), ('Gujarat','Gujarat')],validators=[DataRequired()])
Address = TextAreaField('Address:',validators=[DataRequired("Please enter your Address.")])
pincode = StringField('Pincode',validators=[DataRequired("Please enter your Pincode."),validators.Regexp(regex='[0-9]{6}',message="Pincode Should Only Contain Six Numbers") ])
submit = SubmitField('Sign Up')
class EditProfileForm(FlaskForm):
name = StringField("Fullname",validators=[validators.DataRequired("Please enter your Full name."),validators.Regexp(regex="[a-zA-Z]",message="Fullname Should Only Contain Letters")])
email = StringField('Email',validators=[DataRequired("Please enter your Email."), Email()])
password = PasswordField('Password', validators=[DataRequired("Please enter your Password.")])
confirm_password = PasswordField('Confirm Password',validators=[DataRequired("Please enter your Confirm Password."), EqualTo('password')])
Enrollment = StringField('Enrollment No',validators=[DataRequired("Please enter your Enrollment Number."),validators.Regexp(regex='[0-9]{11}',message="Enrollment Should Only Contain Eleven Numbers")])
Gender = RadioField('Gender', choices = [('M','Male'),('F','Female')])
birth = DateField("Date Of Birth", format='%Y-%m-%d', validators=[DataRequired(message="Please Select the Date Of Birth")],)
contact = StringField('Mobile Number',validators=[DataRequired("Please enter your Mobile Number."),validators.Regexp(regex='(((\+){1}91){1})? ?-?[0-9]{10}',message="Please Enter Valid Mobile Number")])
semester=SelectField('Your Semester:', coerce=int,choices=[(0, 'Please Select...'), (1, '1'),(2, '2'),(3, '3'),(4, '4'),(5, '5'),(6, '6'),(7, '7'),(8, '8')],validators=[DataRequired("Please enter your Semester.")])
city=SelectField('Your City:',choices=[('0', 'Please Select...'), ('Ahmedabad','Ahmedabad')],validators=[DataRequired("Please enter your City.")])
state=SelectField('Your State:', choices=[('0', 'Please Select...'), ('Gujarat','Gujarat')],validators=[DataRequired()])
Address = TextAreaField('Address:',validators=[DataRequired("Please enter your Address.")])
pincode = StringField('Pincode',validators=[DataRequired("Please enter your Pincode."),validators.Regexp(regex='[0-9]{6}',message="Pincode Should Only Contain Six Numbers") ])
submit = SubmitField('Update Profile')
class TeacherRegistrationForm(FlaskForm):
name = StringField("Fullname",validators=[validators.DataRequired("Please enter your Full name."),validators.Regexp(regex="[a-zA-Z]",message="Fullname Should Only Contain Letters")])
email = StringField('Email',validators=[DataRequired("Please enter your Email."), Email()])
password = PasswordField('Password', validators=[DataRequired("Please enter your Password.")])
confirm_password = PasswordField('Confirm Password',validators=[DataRequired("Please enter your Confirm Password."), EqualTo('password')])
Tid = StringField('Teacher Id',validators=[DataRequired("Please enter your Enrollment Number."),validators.Regexp(regex='[0-9]{11}',message="Enrollment Should Only Contain Eleven Numbers")])
Gender = RadioField('Gender', choices = [('M','Male'),('F','Female')])
birth = DateField("Date Of Birth", format='%Y-%m-%d', validators=[DataRequired(message="Please Select the Date Of Birth")],)
contact = StringField('Mobile Number',validators=[DataRequired("Please enter your Mobile Number."),validators.Regexp(regex='(((\+){1}91){1})? ?-?[0-9]{10}',message="Please Enter Valid Mobile Number")])
department=SelectField('Your Department:', choices=[(0, 'Please Select...'), ('CSE(Computer Science and Engineering)', 'CSE(Computer Science and Engineering)'),('ICT(Information Communication Technology)', 'ICT(Information Communication Technology)')],validators=[DataRequired("Please enter your Semester.")])
qualifications=SelectField('Your Qualifications:', choices=[('0', 'Please Select...'), ('B.TECH','B.TECH'),('M.TECH','M.TECH')],validators=[DataRequired("Please enter your City.")])
designation=SelectField('Your Designation:', choices=[('0', 'Please Select...'), ('Head Of Department(HOD)','Head Of Department(HOD)'),('Professor','Professor'),('Assistant Professor','Assistant Professor')],validators=[DataRequired()])
Address = TextAreaField('Address:',validators=[DataRequired("Please enter your Address.")])
pincode = StringField('Pincode',validators=[DataRequired("Please enter your Pincode."),validators.Regexp(regex='[0-9]{6}',message="Pincode Should Only Contain Six Numbers") ])
submit = SubmitField('Sign Up')
class EditTeacherProfile(FlaskForm):
name = StringField("Fullname",validators=[validators.DataRequired("Please enter your Full name."),validators.Regexp(regex="[a-zA-Z]",message="Fullname Should Only Contain Letters")])
email = StringField('Email',validators=[DataRequired("Please enter your Email."), Email()])
password = PasswordField('Password', validators=[DataRequired("Please enter your Password.")])
confirm_password = PasswordField('Confirm Password',validators=[DataRequired("Please enter your Confirm Password."), EqualTo('password')])
Tid = StringField('Teacher Id',validators=[DataRequired("Please enter your Enrollment Number."),validators.Regexp(regex='[0-9]{11}',message="Enrollment Should Only Contain Eleven Numbers")])
Gender = RadioField('Gender', choices = [('M','Male'),('F','Female')])
birth = DateField("Date Of Birth", format='%Y-%m-%d', validators=[DataRequired(message="Please Select the Date Of Birth")],)
contact = StringField('Mobile Number',validators=[DataRequired("Please enter your Mobile Number."),validators.Regexp(regex='(((\+){1}91){1})? ?-?[0-9]{10}',message="Please Enter Valid Mobile Number")])
department=SelectField('Your Department:', choices=[(0, 'Please Select...'), ('CSE(Computer Science and Engineering)', 'CSE(Computer Science and Engineering)'),('ICT(Information Communication Technology)', 'ICT(Information Communication Technology)')],validators=[DataRequired("Please enter your Semester.")])
qualifications=SelectField('Your Qualifications:', choices=[('0', 'Please Select...'), ('B.TECH','B.TECH'),('M.TECH','M.TECH')],validators=[DataRequired("Please enter your City.")])
designation=SelectField('Your Designation:', choices=[('0', 'Please Select...'), ('Head Of Department(HOD)','Head Of Department(HOD)'),('Professor','Professor'),('Assistant Professor','Assistant Professor')],validators=[DataRequired()])
Address = TextAreaField('Address:',validators=[DataRequired("Please enter your Address.")])
pincode = StringField('Pincode',validators=[DataRequired("Please enter your Pincode."),validators.Regexp(regex='[0-9]{6}',message="Pincode Should Only Contain Six Numbers") ])
submit = SubmitField('Update Profile')
class LoginForm(FlaskForm):
email = StringField('Email',validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
| [
"noreply@github.com"
] | noreply@github.com |
c87aa7b331421cf19d7fc2a8bbc3abc5a955c02c | 9dda882a68cc7e16550e25b12917fc1649f3a868 | /app.py | 4dfe2fe40db21995e9db5d661acc6b7118909701 | [] | no_license | thedhamale/Loan-Prediction- | 8cc88a5020104e7ecb13bfe2bb25bad551ef0dce | dda4b18129cd9a9bd534cbed81f23d1c71f66cfc | refs/heads/master | 2023-01-07T10:18:10.717126 | 2020-10-25T09:19:47 | 2020-10-25T09:19:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,761 | py | from flask import Flask, render_template, request
import pickle
import numpy as np
app = Flask(__name__)
model = pickle.load(open('XGBoost4.pkl', 'rb'))
@app.route('/',methods=['GET'])
def Home():
return render_template('html.html')
@app.route("/predict", methods=['POST'])
def predict():
Property_Area_Semiurban = 0
if request.method == 'POST':
Credit_History = request.form['Credit_History']
if(Credit_History == 'Yes'):
Credit_History = 1
else:
Credit_History = 0
Gender_Male = request.form.get("Gender_Male", False)
if(Gender_Male == 'Male'):
Gender_Male = 1
else:
Gender_Male = 0
Married_Yes = request.form.get('Married_Yes', False)
if(Married_Yes == 'Yes'):
Married_Yes = 1
else:
Married_Yes = 0
Property_Area_Urban = request.form.get('Property_Area_Urban', False)
if(Property_Area_Urban == 'Urban'):
Property_Area_Urban = 1
Property_Area_Semiurban = 0
else:
Property_Area_Urban = 0
Property_Area_Semiurban = 1
Total_Income = float(request.form['Total_Income'])
EMI = float(request.form['EMI'])
prediction=model.predict([[Credit_History, Gender_Male, Married_Yes,Property_Area_Semiurban, Property_Area_Urban, Total_Income,EMI]])
if (prediction == 0):
return render_template('html.html',prediction_text="Opps !!...Sorry you cannot get the loan")
else:
return render_template('html.html', prediction_text="Hurrah !!...you can get the loan")
if __name__=="__main__":
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
819d1ba8a7aa2a192c6361bb3a15ee40dfc7f970 | 8dd6367a2762092c16abb981870d677f1de47823 | /my_predict.py | f2ba19de87b619824f22e0bac8483b4cc543936e | [] | no_license | gouyl/-AI- | 93ec358e1032066c9920efcd8faea5d84cc34a5b | dda66055d49354538549ac0580d1f1a026d80218 | refs/heads/master | 2020-06-15T18:35:39.419097 | 2019-07-05T08:01:26 | 2019-07-05T08:01:26 | 195,366,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,084 | py | from keras.models import load_model
import numpy as np
import os, time, random, re
from PIL import Image
from tqdm import tqdm_notebook
from keras.preprocessing.image import img_to_array, load_img
from scipy import misc
model = load_model("model_unet_2w.h5")
im_height = 256
im_width = 256
im_chan = 3
dir_test_3 = "../data/jingwei_round1_test_a_20190619/256_test_3/"
dir_test_4 = "../data/jingwei_round1_test_a_20190619/256_test_4/"
ids_test_3 = next(os.walk(dir_test_3))[2]
ids_test_4 = next(os.walk(dir_test_4))[2]
print(len(ids_test_3))
print(len(ids_test_4))
X_test_3 = np.zeros((len(ids_test_3), im_height, im_width, im_chan), dtype=np.uint8)
X_test_4 = np.zeros((len(ids_test_4), im_height, im_width, im_chan), dtype=np.uint8)
for n, id_ in tqdm_notebook(enumerate(ids_test_3), total=len(ids_test_3)):
img = load_img(dir_test_3+id_)
x = img_to_array(img)[:,:,:]
X_test_3[n] = x
print("Done,X_test_3")
for n, id_ in tqdm_notebook(enumerate(ids_test_4), total=len(ids_test_4)):
img = load_img(dir_test_4+id_)
x = img_to_array(img)[:,:,:]
X_test_4[n] = x
print("Done,x_test_4")
pred_test_3 = model.predict(X_test_3, verbose=2)
pred_test_3 = np.argmax(pred_test_3, axis=2)
num, w_h = pred_tes_3.shape
print(num)
pred_test_3 = pred_test_3.reshape((num,256,256)).astype(np.uint8)
print(np.unique(pred_test_3))
zero_ratio = len(pred_test_3[pred_test_3==0])/(num*w_h)
one_ratio = len(pred_test_3[pred_test_3==1])/(num*w_h)
two_ratio = len(pred_test_3[pred_test_3==2])/(num*w_h)
print(zero_ratio, one_ratio, two_ratio)
pred_test_4 = model.predict(X_test_4, verbose=2)
pred_test_4 = np.argmax(pred_test_4, axis=2)
num, w_h = pred_test_4.shape
pred_test_4 = pred_test_4.reshape((num, 256,256)).astype(np.uint8)
print(np.unique(pred_test_4))
img_3_width = 37241
img_3_height = 19903
number_row = int(img_3_height/256)
number_col = int(img_3_width/256)
Label_3_img = np.zeros((img_3_height, img_3_width), dtype=np.uint8)
for n, id_ in tqdm_notebook(enumerate(ids_test_3), total=len(ids_test_3)):
num = re.findall(r"\d+", id_)
num = int(num[0])
row = int(num / number_col)
col = int(num % number_col)
img = pred_test_3[n]
Label_3_img[256*row:256*(row+1), 256*col:256*(col+1)] = img
print(np.unique(Label_3_img))
misc.imsave("image_3_predict.png", Label_3_img)
print("Done: image 3")
img_4_height = 28832
img_4_width = 25936
number_row = int(img_4_height/256)
number_col = int(img_4_width/256)
Label_4_img = np.zeros((img_4_height, img_4_width), dtype=np.uint8)
for n, id_ in tqdm_notebook(enumerate(ids_test_4), total=len(ids_test_3)):
num = re.findall(r"\d+", id_)
num = int(num[0])
row = int(num / number_col)
col = int(num % number_col)
img = pred_test_4[n]
#img = np.squeeze(img)
#img = np.rint(img).astype(np.uint8)
Label_4_img[256*row:256*(row+1), 256*col:256*(col+1)] = img
print(np.unique(Label_4_img))
misc.imsave("image_4_predict.png", Label_4_img)
print("Done: image 4") | [
"noreply@github.com"
] | noreply@github.com |
d309e15758a8bb9586bf1fb4764ac853cb5ff95c | 3602b714233a795e16d001d37c55461916aff486 | /client.py | 124967f5484e3cd19290f9a2f010b1077bfc4405 | [] | no_license | teknus/AsyncChat | a037ee57242e6bf9fed075c30b2934607961533c | 74e0284ac298d74781937d08099c5398458d1887 | refs/heads/master | 2021-01-20T13:54:44.946924 | 2017-05-07T16:43:40 | 2017-05-07T16:43:40 | 90,531,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | # telnet program example
from __future__ import print_function
import socket, select, string, sys, os
def prompt(name) :
sys.stdout.write(name)
sys.stdout.flush()
#main function
if __name__ == "__main__":
if(len(sys.argv) < 3) :
print ('Usage : python telnet.py hostname port')
sys.exit()
name = input("say my name: ")
name = "<{}>".format(name)
host = sys.argv[1]
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
# connect to remote host
try :
s.connect((host, port))
except :
print ('Unable to connect')
sys.exit()
s.send(name.encode())
print( 'Connected to remote host. Start sending messages')
prompt(name)
while 1:
socket_list = [sys.stdin, s]
# Get the list sockets which are readable
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
#incoming message from remote server
if sock == s:
data = sock.recv(4096)
if not data :
print ('\nDisconnected from chat server')
sys.exit()
else :
#print data
sys.stdout.write(data.decode())
prompt(name)
#user entered a message
else :
msg = sys.stdin.readline()
s.send(msg.encode())
prompt(name)
| [
"mateusteknus@gmail.com"
] | mateusteknus@gmail.com |
27f10dff9fe70eb67bbbd8be5e27c8ee089b46f9 | 65dce36be9eb2078def7434455bdb41e4fc37394 | /454 4Sum II.py | c5d654a137c6a70d3df07a7fcec921b7407065cd | [] | no_license | EvianTan/Lintcode-Leetcode | 9cf2d2f6a85c0a494382b9c347bcdb4ee0b5d21a | d12dd31e98c2bf24acc20c5634adfa950e68bd97 | refs/heads/master | 2021-01-22T08:13:55.758825 | 2017-10-20T21:46:23 | 2017-10-20T21:46:23 | 92,607,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | '''
Given four lists A, B, C, D of integer values, compute how many tuples (i, j, k, l) there are such that A[i] + B[j] + C[k] + D[l] is zero.
To make problem a bit easier, all A, B, C, D have same length of N where 0 ≤ N ≤ 500. All integers are in the range of -228 to 228 - 1 and the result is guaranteed to be at most 231 - 1.
Example:
Input:
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
Output:
2
Explanation:
The two tuples are:
1. (0, 0, 0, 1) -> A[0] + B[0] + C[0] + D[1] = 1 + (-2) + (-1) + 2 = 0
2. (1, 1, 0, 0) -> A[1] + B[1] + C[0] + D[0] = 2 + (-1) + (-1) + 0 = 0
'''
class Solution(object):
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
dic={}
res=0
for a in A:
for b in B:
if a+b not in dic:
dic[a+b]=1
else:
dic[a+b]+=1
for c in C:
for d in D:
if -c-d in dic:
res+=dic[-c-d]
return res | [
"yiyun.tan@uconn.edu"
] | yiyun.tan@uconn.edu |
0372aded2d6c264a7e9cce586bc00655b1517d7c | 89a7a78580fcf786c7a054ccf69adbd385510efe | /lojban/main/feeds.py | 06a855ee7591d8edb7a4154f7e58eb30d5cb7535 | [] | no_license | lagleki/lojban-website | 0effacfd458724d489ec8a3f35d5cbd667813e2f | f2e1b4765bf918f295537e511fe870af3e5f8134 | refs/heads/master | 2021-01-24T22:52:24.851614 | 2008-08-02T11:07:26 | 2008-08-02T11:07:26 | 24,986,613 | 0 | 3 | null | 2020-10-02T08:01:56 | 2014-10-09T11:41:39 | null | UTF-8 | Python | false | false | 501 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.syndication.feeds import Feed
from django.utils.feedgenerator import Atom1Feed
from lojban.main.models import NewsItem
class NewsFeed(Feed):
feed_type = Atom1Feed
title = "News from Lojbanistan"
link = "/news/"
subtitle = "News about Lojban, the logical language."
def items(self):
return NewsItem.objects.order_by('-pub_date')[:5]
def item_pubdate(self, item):
return item.pub_date
| [
"jim@git.dabell.name"
] | jim@git.dabell.name |
e97e4caa02a91f4185685942cc774181c4259b6c | caaf56727714f8c03be38710bc7d0434c3ec5b11 | /homeassistant/components/avri/__init__.py | 3165b6ee87a77f41cca449f635f51943bbe62923 | [
"Apache-2.0"
] | permissive | tchellomello/home-assistant | c8db86880619d7467901fd145f27e0f2f1a79acc | ed4ab403deaed9e8c95e0db728477fcb012bf4fa | refs/heads/dev | 2023-01-27T23:48:17.550374 | 2020-09-18T01:18:55 | 2020-09-18T01:18:55 | 62,690,461 | 8 | 1 | Apache-2.0 | 2023-01-13T06:02:03 | 2016-07-06T04:13:49 | Python | UTF-8 | Python | false | false | 1,572 | py | """The avri component."""
import asyncio
from datetime import timedelta
import logging
from avri.api import Avri
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import (
CONF_COUNTRY_CODE,
CONF_HOUSE_NUMBER,
CONF_HOUSE_NUMBER_EXTENSION,
CONF_ZIP_CODE,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
SCAN_INTERVAL = timedelta(hours=4)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Avri component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Avri from a config entry."""
client = Avri(
postal_code=entry.data[CONF_ZIP_CODE],
house_nr=entry.data[CONF_HOUSE_NUMBER],
house_nr_extension=entry.data.get(CONF_HOUSE_NUMBER_EXTENSION),
country_code=entry.data[CONF_COUNTRY_CODE],
)
hass.data[DOMAIN][entry.entry_id] = client
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| [
"noreply@github.com"
] | noreply@github.com |
2bfb834c61e5fd67368ad0fbc61cdbb04f3ac348 | 1a4bc1a11fdb3f714f22f5e0e826b47aa0569de2 | /lab/lab09/tests/q3_2.py | ca45e1c7feda06578903f5453e3fdb3f09c5adcf | [] | no_license | taylorgibson/ma4110-fa21 | 201af7a044fd7d99140c68c48817306c18479610 | a306e1b6e7516def7de968781f6c8c21deebeaf5 | refs/heads/main | 2023-09-05T21:31:44.259079 | 2021-11-18T17:42:15 | 2021-11-18T17:42:15 | 395,439,687 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | test = { 'name': 'q3_2',
'points': None,
'suites': [ { 'cases': [ { 'code': ">>> # Make sure your column labels are correct.\n>>> set(faithful_predictions.labels) == set(['duration', 'wait', 'predicted wait'])\nTrue",
'hidden': False,
'locked': False},
{'code': '>>> abs(1 - np.mean(faithful_predictions.column(2))/100) <= 0.35\nTrue', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
"taylorgibson@gmail.com"
] | taylorgibson@gmail.com |
ae604020c0ed94084fd173b76d055203e2f7b813 | 015518a4c80704d5cebbb49907f5b2df610cb5d7 | /shotNoiseCharacterizations/characterizationStudy/detectorCharacterization2.py | bc71833e57a78ec8ca5517914e2bf178316a18d8 | [] | no_license | danielpereiraUA/offlineQRNG | 4da2078558b85e80028e975ed80b19972b15d7d1 | dc8b2f68ba9eed9d0fe3caf1de8608ab8e250757 | refs/heads/master | 2021-09-29T03:44:41.201003 | 2018-11-23T14:47:58 | 2018-11-23T14:47:58 | 154,479,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | import numpy as np
import matplotlib.pyplot as plt
variance = np.loadtxt(open("variance.txt", "rb"), delimiter=",")
plt.figure()
plt.plot(variance[:, 0], variance[:, 1], '.')
plt.show()
| [
"danielfpereira@ua.pt"
] | danielfpereira@ua.pt |
758f2f2c19abcdc277d1a86dcb42bee7f08ee51e | 0b2cf106297d2e8afd18629d164d3260ebad9dca | /kit/migrations/0002_auto_20210205_0645.py | 5793c5afe49d862286331420d5f331cd0f5e4e9f | [] | no_license | Kuljeet1998/timekit-clone | c3474d173779b34a9457cc7bfe8f74d0c77ceb4d | a8c92177c9daeb3efd7827e36a8c499c9d71f62b | refs/heads/master | 2023-03-04T16:04:47.605886 | 2021-02-09T13:20:34 | 2021-02-09T13:20:34 | 336,971,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,483 | py | # Generated by Django 2.2 on 2021-02-05 06:45
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('kit', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='booking',
name='state',
field=models.CharField(choices=[('tentative', 'tentative'), ('confirmed', 'confirmed'), ('error', 'error'), ('declined', 'declined'), ('completed', 'completed'), ('cancelled_by_owner', 'cancelled_by_owner'), ('cancelled_by_customer', 'cancelled_by_customer'), ('rescheduled_by_customer', 'rescheduled_by_customer')], default='tentative', max_length=23),
),
migrations.AlterField(
model_name='slot',
name='end_time',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 5, 7, 45, 42, 967802, tzinfo=utc)),
),
migrations.AlterField(
model_name='slot',
name='max_seats',
field=models.PositiveIntegerField(default=3),
),
migrations.AlterField(
model_name='slot',
name='name',
field=models.CharField(default='name', max_length=50),
),
migrations.AlterField(
model_name='slot',
name='slot_duration',
field=models.PositiveIntegerField(default=1),
),
migrations.AlterField(
model_name='slot',
name='start_time',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 5, 6, 45, 42, 967746, tzinfo=utc)),
),
migrations.AlterField(
model_name='widget',
name='button_text',
field=models.CharField(default='Book it', max_length=100),
),
migrations.AlterField(
model_name='widget',
name='calendar_type',
field=models.CharField(choices=[('week', 'week'), ('list', 'list')], default='week', max_length=15),
),
migrations.AlterField(
model_name='widget',
name='success_message',
field=models.CharField(default='We have received your booking and sent a confirmation', max_length=300),
),
migrations.AlterField(
model_name='widget',
name='time_format',
field=models.PositiveIntegerField(choices=[(24, '24'), (12, '12')], default=24),
),
]
| [
"kbhengura@gmail.com"
] | kbhengura@gmail.com |
3ebade0b69d6d4005c76fc444966770f2e952710 | c932c53d3004853f5328caed01438ab69005bd9f | /utils/logger.py | 8711f3afa7ed08c17431f3123ad4734499f6e488 | [] | no_license | russ0616/NCTU_VRDL_HW2 | 8e84f55005bb66daea1b30b8ff672048bf586d4d | 3ea83fe2bf15f132173f9a5b940e08e624533fd2 | refs/heads/main | 2023-01-21T07:59:41.140397 | 2020-11-26T05:43:03 | 2020-11-26T05:43:03 | 315,958,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # import tensorflow as tf
from torch.utils.tensorboard import SummaryWriter
class Logger(object):
def __init__(self, log_dir):
"""Create a summary writer logging to log_dir."""
self.writer = SummaryWriter(log_dir = log_dir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
self.writer.add_scalar(tag, value, step)
def list_of_scalars_summary(self, tag_value_pairs, step):
"""Log scalar variables."""
for tag, value in tag_value_pairs:
self.writer.add_scalar(tag, value, step)
| [
"noreply@github.com"
] | noreply@github.com |
b474bc294dfdd65f1ee2cc101dd843636e5bccc2 | 9d5e5cbf9b11891f4cde70f115c82eba9220cee6 | /hw4_git.py | afff454eaaaf0fcdec999f0ff3be68a065be0122 | [] | no_license | xuanathon/hw4-git-practice | 6d091cbab0157b1130f658da3a82798c7a63cdbf | bf88dfd7a4040609d96ff72ddff334d6c2abf4b8 | refs/heads/master | 2021-01-03T00:15:22.373505 | 2020-02-11T18:14:07 | 2020-02-11T18:14:07 | 239,830,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | def sayHello():
return "Hello World!"
print(sayHello()) | [
"email.xhuang@gmail.com"
] | email.xhuang@gmail.com |
3bf974b01905f2a97f8d3506c3f812fdc4c90e10 | 7324417b008227587bb11e708196daa8e5540e16 | /unittests/TestCase3.py | b6acdc56c1879e7ca2bb3721ad106e89f7f9978a | [] | no_license | ajrichards/cytostream | f5bfd4beebe17ec388a0b73eea140ac3780b1cc6 | 17aa5cb5da1f20691bfb8c9c414222b8c3900abf | refs/heads/master | 2016-08-12T07:12:34.369476 | 2013-04-11T09:37:50 | 2013-04-11T09:37:50 | 36,160,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,832 | py | #!/usr/bin/env python
import sys,os,unittest,time,re
from cytostream import NoGuiAnalysis
'''
description - Shows the user how to run an original set of files using one set of parameters. Then
the model is run again this time using a reference file---referred to in the software
as 'onefit'. This means that the model is run on a single reference file then all other
files in the project are fit using the results from that model run.
A. Richards
'''
class TestCase3(unittest.TestCase):
def setUp(self):
cwd = os.getcwd()
if os.path.split(cwd)[1] == 'unittests':
BASEDIR = os.path.split(cwd)[0]
elif os.path.split(cwd)[1] == 'cytostream':
BASEDIR = cwd
else:
print "ERROR: Model test cannot find home dir -- cwd", cwd
## run the no gui analysis
filePathList = [os.path.join(BASEDIR,"cytostream","example_data", "3FITC_4PE_004.fcs"),
os.path.join(BASEDIR,"cytostream","example_data", "duplicate.fcs")]
projectID = 'utest'
homeDir = os.path.join(BASEDIR,"cytostream","projects", projectID)
## run the initial model for all files
self.nga = NoGuiAnalysis(homeDir,filePathList,useSubsample=True,makeQaFigs=False,record=False)
self.nga.set('num_iters_mcmc', 1200)
self.nga.set('model_mode', 'onefit')
self.nga.set('model_reference', "3FITC_4PE_004")
self.nga.set('model_reference_run_id', 'run1')
self.nga.set('thumbnail_results_default','components')
self.nga.run_model()
## create all pairwise figs for all files
fileNameList = self.nga.get_file_names()
for fileName in fileNameList:
self.nga.make_results_figures(fileName,'run1')
def tests(self):
## ensure project was created
self.assertTrue(os.path.isfile(os.path.join(self.nga.controller.homeDir,"%s.log"%self.nga.controller.projectID)))
self.failIf(len(os.listdir(os.path.join(self.nga.controller.homeDir,"data"))) < 2)
## get file names
fileNameList = self.nga.get_file_names()
self.assertEqual(len(fileNameList),2)
## get events
events = self.nga.get_events(fileNameList[0],subsample=self.nga.controller.log.log['subsample_qa'])
self.assertEqual(events.shape[0], int(float(self.nga.controller.log.log['subsample_qa'])))
## check that model results can be retrieved
modelRunID = 'run1'
componentModel, componentClasses = self.nga.get_model_results(fileNameList[0],modelRunID,'components')
self.assertEqual(componentClasses.size,int(float(self.nga.controller.log.log['subsample_analysis'])))
modesModel, modesClasses = self.nga.get_model_results(fileNameList[0],modelRunID,'modes')
self.assertEqual(modesClasses.size,int(float(self.nga.controller.log.log['subsample_analysis'])))
## check that information can be retrieved from model log file
modelLog = self.nga.get_model_log(fileNameList[0],modelRunID)
self.assertEqual('utest',modelLog['project id'])
## check that analysis figs were made
self.failIf(len(os.listdir(os.path.join(self.nga.controller.homeDir,'figs', modelRunID))) != 2)
self.assertTrue(os.path.isdir(os.path.join(self.nga.controller.homeDir,'figs',modelRunID,'3FITC_4PE_004_thumbs')))
## check that model file used 'onefit' and that the reference is nonzero
### Run the tests
if __name__ == '__main__':
unittest.main()
| [
"ajrichards@users.noreply.github.com"
] | ajrichards@users.noreply.github.com |
9b782c688e0dd74223de5b199c0bc92e6fa39895 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007.1/server/openldap/actions.py | cb88cc11802e70a1cca7ea9961bec056dfadb4c4 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,360 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005, 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.echo("include/ldap_defaults.h", "#define LDAPI_SOCK \"/var/run/openldap/slapd.sock\"")
autotools.configure("--prefix=/usr \
--enable-bdb \
--with-ldbm-api=berkeley \
--enable-hdb=mod \
--enable-slapd \
--enable-slurpd \
--enable-ldbm \
--enable-passwd=mod \
--enable-phonetic=mod \
--enable-dnssrv=mod \
--enable-ldap \
--enable-wrappers \
--enable-meta=mod \
--enable-monitor=mod \
--enable-null=mod \
--enable-shell=mod \
--enable-rewrite \
--enable-rlookups \
--enable-aci \
--enable-modules \
--enable-cleartext \
--enable-lmpasswd \
--enable-spasswd \
--enable-slapi \
--enable-dyngroup \
--enable-proxycache \
--enable-perl \
--enable-syslog \
--enable-dynamic \
--enable-local \
--enable-proctitle \
--enable-overlay \
--with-tls \
--with-cyrus-sasl \
--enable-crypt \
--enable-ipv6")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("ANNOUNCEMENT", "CHANGES", "COPYRIGHT", "README", "LICENSE")
pisitools.dodir("/var/run/openldap")
pisitools.dodir("/var/run/openldap/slapd")
pisitools.dodir("/etc/openldap/ssl")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
7b8466f0376f6de64cf039644fc1465308b1e644 | e1c5b001b7031d1ff204d4b7931a85366dd0ce9c | /EMu/2016/plot_fake/check_data.py | 285e253d884fcbc8e17661669330414a85534585 | [] | no_license | fdzyffff/IIHE_code | b9ff96b5ee854215e88aec43934368af11a1f45d | e93a84777afad69a7e63a694393dca59b01c070b | refs/heads/master | 2020-12-30T16:03:39.237693 | 2020-07-13T03:06:53 | 2020-07-13T03:06:53 | 90,961,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | import ROOT
try:
tchain=ROOT.TChain('tap')
tchain.Add('data_2016B_DoubleEG.root')
except:
print "errors!"
run_list = []
n_passed1 = 0
totalEntry = tchain.GetEntries()
for iEntry in range(0, tchain.GetEntries()):
tchain.GetEntry(iEntry)
if tchain.ev_run_out not in run_list:run_list.append(tchain.ev_run_out)
if iEntry%50000==0 and iEntry > 0:
print '%d / %d Prossed'%(iEntry,totalEntry)
if 60<=tchain.M_ee and tchain.M_ee<=120 :
if (tchain.t_region == 1 and tchain.heep2_region == 1) or (tchain.t_region == 3 and tchain.heep2_region == 3) or (tchain.t_region == 1 and tchain.heep2_region == 3) or (tchain.t_region == 3 and tchain.heep2_region == 1):
n_passed1+=tchain.w_PU_combined
print 'n total : ', n_passed1
run_list.sort()
for run in run_list:
print run
| [
"1069379433@qq.com"
] | 1069379433@qq.com |
ff7282c609559212ee211ecf3f1df66bdfce0a0c | c8507d1eb884807c10995af25fe8712d56ec2ff7 | /accounts/migrations/0001_initial.py | d60bbeddc96f5e13af488e818f5b64147f75ca48 | [] | no_license | eunzz/happymoon | 16d4b8bb291cca151e68c7fe8e5a2b2ecedd7c1f | 1ec42fd1b0f3ff254b8ea18d91c8215a6d415530 | refs/heads/master | 2020-04-08T23:10:31.904539 | 2018-08-24T07:48:17 | 2018-08-24T07:48:17 | 159,814,426 | 1 | 0 | null | 2018-11-30T11:32:06 | 2018-11-30T11:32:06 | null | UTF-8 | Python | false | false | 1,021 | py | # Generated by Django 2.0.7 on 2018-08-16 13:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Information',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('birth_year', models.IntegerField(blank=True)),
('birth_month', models.IntegerField(blank=True)),
('birth_day', models.IntegerField(blank=True)),
('channel', models.CharField(max_length=100)),
('referral_code', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"dajasin245@naver.com"
] | dajasin245@naver.com |
8b4b2b904f9a127a18c7de5caeafb89569c9e117 | 106a3e0c5688a867e90b6dba92e32b0d970d71c8 | /Class/ACME_Volume_2-Python/CVXOPT/cvxopt_intro.py | 377b2bfee524615ab74e2f4e7b2963246ba8cbce | [] | no_license | scj1420/Class-Projects-Research | 48cde615c650e2816665254c4676e646255fecb5 | 6e969de3a8337b0bd9bb4ba7abac722ab5c065ab | refs/heads/master | 2018-12-20T15:44:41.090235 | 2018-09-17T18:26:58 | 2018-09-17T18:26:58 | 113,921,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,557 | py | # cvxopt_intro.py
"""Volume 2: Intro to CVXOPT.
<Name>
<Class>
<Date>
"""
import cvxopt as cvx
import numpy as np
def prob1():
"""Solve the following convex optimization problem:
minimize 2x + y + 3z
subject to x + 2y >= 3
2x + 10y + 3z >= 10
x >= 0
y >= 0
z >= 0
Returns (in order):
The optimizer x (ndarray)
The optimal value (sol['primal objective'])
"""
c = cvx.matrix([2.,1.,3.])
G = cvx.matrix([[-1.,-2.,-1.,0.,0.],[-2.,-10.,0.,-1.,0.],[0.,-3.,0.,0.,-1.]])
h = cvx.matrix([-3.,-10.,0.,0.,0.])
sol = cvx.solvers.lp(c, G, h)
return np.ravel(sol['x']), sol['primal objective']
# Problem 2
def l1Min(A, b):
"""Calculate the solution to the optimization problem
minimize ||x||_1
subject to Ax = b
Parameters:
A ((m,n) ndarray)
b ((m, ) ndarray)
Returns:
The optimizer x (ndarray), without any slack variables u
The optimal value (sol['primal objective'])
"""
m = len(A)
n = len(A[0])
c = cvx.matrix(np.concatenate([np.ones(n), np.zeros(n)]))
iden = np.eye(n)
r1 = np.column_stack([-iden, iden])
r2 = np.column_stack([-iden, -iden])
G = cvx.matrix(np.row_stack([r1, r2]))
h = cvx.matrix(np.zeros(2*n))
Z = np.zeros((m,n))
Am = cvx.matrix(np.column_stack([Z, A]))
print(c)
print(G)
print(h)
print(Am)
print(b)
sol = cvx.solvers.lp(c,G,h,Am,cvx.matrix(b))
return np.ravel(sol['x'][n:]), sol['primal objective']
# Problem 3
def prob3():
"""Solve the transportation problem by converting the last equality constraint
into inequality constraints.
Returns (in order):
The optimizer x (ndarray)
The optimal value (sol['primal objective'])
"""
c = cvx.matrix([4.,7.,6.,8.,8.,9.])
g1 = -1*np.eye(6)
g2 = np.array([0.,1.,0.,1.,0.,1.])
g = np.row_stack([g1, g2, -g2])
G = cvx.matrix(g)
h = cvx.matrix(np.concatenate([np.zeros(6), [8.,-8.]]))
A = cvx.matrix(np.array([[1.,1.,0.,0.,0.,0.],
[0.,0.,1.,1.,0.,0.],
[0.,0.,0.,0.,1.,1.],
[1.,0.,1.,0.,1.,0.]]))
b = cvx.matrix([7.,2.,4.,5.])
sol = cvx.solvers.lp(c,G,h,A,b)
return np.ravel(sol['x']), sol['primal objective']
# Problem 4
def prob4():
"""Find the minimizer and minimum of
g(x,y,z) = (3/2)x^2 + 2xy + xz + 2y^2 + 2yz + (3/2)z^2 + 3x + z
Returns (in order):
The optimizer x (ndarray)
The optimal value (sol['primal objective'])
"""
P = cvx.matrix(np.array([[3.,2.,1.],[2.,4.,2.],[1.,2.,3.]]))
q = cvx.matrix([3.,0.,1.])
sol = cvx.solvers.qp(P,q)
return np.ravel(sol['x']), sol['primal objective']
# Problem 5
def l2Min(A, b):
"""Calculate the solution to the optimization problem
minimize ||x||_2
subject to Ax = b
Parameters:
A ((m,n) ndarray)
b ((m, ) ndarray)
Returns:
The optimizer x (ndarray)
The optimal value (sol['primal objective'])
"""
m = len(A)
n = len(A[0])
print(m,n)
P = cvx.matrix(2*np.eye(n))
q = cvx.matrix(np.zeros(n))
A = cvx.matrix(A)
b = cvx.matrix(b)
sol = cvx.solvers.qp(P,q, A=A, b=b)
return np.ravel(sol['x']), sol['primal objective']
# Problem 6
def prob6():
"""Solve the allocation model problem in 'ForestData.npy'.
Note that the first three rows of the data correspond to the first
analysis area, the second group of three rows correspond to the second
analysis area, and so on.
Returns (in order):
The optimizer x (ndarray)
The optimal value (sol['primal objective']*-1000)
"""
data = np.load('ForestData.npy')
s = data[:,1]
b = cvx.matrix(s[::3].astype(np.float))
p = data[:,3]
c = cvx.matrix(-p)
t = data[:,4]
g = data[:,5]
w = data[:,6]
G1 = np.row_stack([t,g,w])
h1 = np.array([40000., 5., 55160.])
G2 = np.eye(21).astype(np.float)
h2 = np.zeros(21).astype(np.float)
G = cvx.matrix(np.row_stack([-G1, -G2]))
h = cvx.matrix(np.concatenate([-h1, -h2]))
R = [np.concatenate([np.array([0]*3*i), np.ones(3), np.array([0]*3*(6-i))]) for i in range(7)]
A = cvx.matrix(np.row_stack(R))
sol = cvx.solvers.lp(c, G, h, A, b)
return np.ravel(sol['x']), sol['primal objective']*-1000
p,q = prob6()
print(p,q) | [
"scj1420@gmail.com"
] | scj1420@gmail.com |
68ebf0be7d954d7382efd358034e2d409192a457 | 597354b124d70e86bcce25551ebb6c63e5e01154 | /LeetCode Graph/venv/LeetCode 200 Number of Islands.py | c0a80241b76c9e5395965dc492a26f58951d7149 | [] | no_license | HHonoka/LeetCode- | acf361a15f1af8becf2a7225cc738d91bfda5ded | cd751a26fd097f542ab6fe2386a179ddf70de79b | refs/heads/master | 2020-05-25T00:49:20.001811 | 2019-05-19T23:59:52 | 2019-05-19T23:59:52 | 187,539,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
count = 0
if not grid:
return 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == '1':
self.dfs(grid, i, j)
count += 1
return count
def dfs(self, grid, i, j):
if 0 <= i < len(grid) and 0 <= j < len(grid[0]) and grid[i][j] == '1':
grid[i][j] = '0'
self.dfs(grid, i - 1, j)
self.dfs(grid, i + 1, j)
self.dfs(grid, i, j - 1)
self.dfs(grid, i, j + 1) | [
"shangshanghan16@gmail.com"
] | shangshanghan16@gmail.com |
463c64b2a11dd3a5346f3222bf3bb3927143f553 | 25dbee4b914a268ec99f05043cd33f5351cddc71 | /lib/python2.6/site-packages/twisted/web2/http_headers.py | 1db89f913b32c93b6fc9827c5625edad0bffbfe5 | [] | no_license | bmelton/CLAIM | cd6deb9bee2e43b4abe527d34ad6851228f129a3 | 5f220b81d44739cbf63e0ecb3862dbee3b1dfa13 | refs/heads/master | 2021-01-01T18:42:43.584449 | 2010-11-04T18:41:03 | 2010-11-04T18:41:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | /usr/share/pyshared/twisted/web2/http_headers.py | [
"barry.melton@gmail.com"
] | barry.melton@gmail.com |
ea7860e1c15b5ad3ae2fab8fa8abb3312cc5c4aa | dc24716e7edebdb4af4e60bbcda752efc2d74799 | /cprop.py | d86b7384aaa9dfa0cd8c8300bc9f0e47c8db500e | [] | no_license | hitesh4/OpenCV-Python-Implementation | 1a188e45f166c1d8777f6f5f7e92162667c8fd54 | 77ab7829152bdfa6aee1ed0c90f36493d13bce5a | refs/heads/master | 2021-08-23T12:02:05.003661 | 2017-12-05T07:38:04 | 2017-12-05T07:38:04 | 113,092,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, frame = cap.read();
if ret == True:
framegray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
ret1, thresh = cv2.threshold(framegray,127,255,0)
image,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
framecnt = frame
im = framecnt
for c in contours:
mask = np.zeros(framegray.shape,np.uint8)
im = cv2.drawContours(mask, [c],-1,255,-1)
pixelpoints = np.transpose(np.nonzero(mask))
print pixelpoints
cv2.imshow('frame',im)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
| [
"saini.hitesh4@gmail.com"
] | saini.hitesh4@gmail.com |
44d2c91af62c4397eec5ddcda1e60e02cd58e9f2 | 2ea31e038b000b4262e636ca291cce5cd13776dd | /TIMS_GUI/TIMS_functions.py | 7935e9b0a976bccbb9257565e0b86155428ec3f6 | [
"MIT"
] | permissive | okdpetrology/TIMS_GUI | 9395629b780da13251a67357097f1163246ce87a | 9960e0c842310cd6e951dea3e89bf459b35803d0 | refs/heads/main | 2023-04-17T05:02:20.167801 | 2021-04-22T05:51:21 | 2021-04-22T05:51:21 | 360,406,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,554 | py | import pandas as pd
import os
import re
import copy
# Functions
def get_raw_tables(file_name):
df = pd.read_csv(file_name, header=None, skip_blank_lines=False)
# df = dat_dataframe
# date_ind = list(df.loc[df[0] == 'Date'].index.values)
baselines_ind = list(df.loc[df[0] == 'Baselines for this Block'].index.values)
baselines_ind2 = list(
df.loc[df[0] == 'Block "Function" "Mean Bf" "%SdErrB" "Mean Af" "%SdErrA" "No After" "No Before"'].index.values)
individualRatios_ind = list(df.loc[df[0] == 'Individual Ratios for this Block:'].index.values)
grandFunction_ind = list(
df.loc[df[0] == 'Grand "Function" "Mean Bf" "%SdErrB" "Mean Af" "%SdErrA" "No After" "No Before"'].index.values)
# print('Date: ', date_ind)
# print('Baselines: ', baselines_ind)
# print('Blocks: ',baselines_ind2)
# print('Individual Ratios: ',individualRatios_ind)
# print('Grand: ',grandFunction_ind)
date_dict = {}
# file_name = 'TEST1'
for idx in range(len(df)):
if 'Date' in str(df.iloc[idx][0]):
unicode_line = str(df.iloc[idx][0])
uni_list = unicode_line.split(':')
# date_dict[file_name] = uni_list.pop().strip()
### Important grand, block, individual ratios stuff
chopped_df = []
chopped_df_dict = {}
for i in range(len(grandFunction_ind)):
df_crop1 = df[baselines_ind[i]:baselines_ind2[i]]
df_crop1b = df[baselines_ind2[i]:individualRatios_ind[i]]
df_crop2 = df[individualRatios_ind[i]:grandFunction_ind[i]]
# print(i)
try:
df_crop3 = df[grandFunction_ind[i]:baselines_ind[i + 1]]
except:
final = grandFunction_ind[i] + 14
df_crop3 = df[grandFunction_ind[i]:final]
df_crop4 = df[(final + 1): (final + 8)]
chopped_df_dict['Machine Parameters:'] = df_crop4
chopped_df.append(df_crop1)
chopped_df.append(df_crop1b)
chopped_df.append(df_crop2)
chopped_df.append(df_crop3)
chopped_df_dict['Baselines:' + str(i + 1)] = df_crop1
chopped_df_dict['Block:' + str(i + 1)] = df_crop1b
chopped_df_dict['Individual Ratios:' + str(i + 1)] = df_crop2
chopped_df_dict['Grand:' + str(i + 1)] = df_crop3
# Making these tables look nice
for idx in range(len(grandFunction_ind)):
string = 'Block:' + str(idx + 1)
df_block = grand_dataframe(chopped_df_dict[string], file_name, string)
chopped_df_dict[string] = df_block
string2 = 'Grand:' + str(idx + 1)
df_block = grand_dataframe(chopped_df_dict[string2], file_name, string2)
chopped_df_dict[string2] = df_block
string3 = 'Individual Ratios:' + str(idx + 1)
df_block = indiv_dataframe(chopped_df_dict[string3], file_name, string3)
chopped_df_dict[string3] = df_block
chopped_df_dict['Date'] = uni_list.pop().strip()
# print(file_name, " : ", string2)
return chopped_df_dict
def grand_dataframe(grand_df, file_name, df_name):
# Works for Grand or Block
df2 = grand_df[1:]
df2_list = []
header = ['File Name',
'Dataframe Name',
'Function',
'Mean Bf',
'%SdErrB',
'Mean Af',
'%SdErrA',
'No After',
'No Before']
for idx in range(len(df2)):
# if idx == 0:
# continue
string = str(df2.iloc[idx][0])
new_string = re.split('"', string)
new_str_list = new_string[1:]
if len(new_str_list) <= 1:
# print(new_str_list)
continue
new_str_list2 = new_str_list[1].split()
new_str_list = [new_str_list[0]]
new_str_list.extend(new_str_list2)
new_str_list.insert(0, df_name)
new_str_list.insert(0, file_name)
df2_list.append(new_str_list)
return pd.DataFrame(df2_list, columns=header)
def indiv_dataframe(indiv_dataframe, file_name, df_name):
df3 = indiv_dataframe
df3_list = []
header = ['File Name', 'Dataframe Name', 'F0', 'FG', 'FH', 'FI', 'FK', 'FL', 'FM', 'FN', 'FO', 'FP', 'FQ', 'FR',
'FS']
for idx in range(len(df3)):
if idx <= 1:
continue
string = str(df3.iloc[idx][0])
new_string = re.split(' ', string)
# print(new_string)
if len(new_string) <= 1:
continue
new_string.insert(0, df_name)
new_string.insert(0, file_name)
df3_list.append(new_string)
return pd.DataFrame(df3_list, columns=header)
def multi_file_get_tables(list_filenames):
dict_of_multifiles = {}
for file in list_filenames:
dict_of_multifiles[file] = get_raw_tables(file)
return dict_of_multifiles
def combine_all_df(data_dict):
grand_list = []
block_list = []
indiv_list = []
# date_list = []
for key in data_dict:
# print(key)
for value in data_dict[key]:
if 'Grand' in value:
grand_list.append(data_dict[key][value])
if 'Block' in value:
block_list.append(data_dict[key][value])
if 'Individual' in value:
indiv_list.append(data_dict[key][value])
# if 'Date' in value:
# date_list.append(data_dict[key][value])
for i in range(len(grand_list)):
if i == 0:
grand_df = grand_list[i]
else:
grand_df = grand_df.append(grand_list[i])
for i in range(len(block_list)):
if i == 0:
block_df = block_list[i]
else:
block_df = block_df.append(block_list[i])
for i in range(len(indiv_list)):
if i == 0:
indiv_df = indiv_list[i]
else:
indiv_df = indiv_df.append(indiv_list[i])
# for i in range(len(date_list)):
# if i == 0:
# date_df = date_list[i]
# else:
# date_df = date_df.append(date_list[i])
mega_dict = {}
mega_dict['Grand'] = grand_df
mega_dict['Block'] = block_df
mega_dict['Individual Ratios'] = indiv_df
# mega_dict['Block'] = block_df
return mega_dict
def format_grand12(data_dict):
big_dict = {}
file_list = list(data_dict.keys())
for file in file_list:
test_z_dict = {}
# print(file)
# for i in range(13):
# if str(13-i) in data_dict[file].keys():
# string = 'Grand:' + str(i)
# test_z = data_dict[file][string]
# else:
# continue
try:
test_z = data_dict[file]['Grand:12'] ##Technically, hardcoded right now
except:
print(file, ': Probably aborted during run.')
continue
# Define new Grand: 12 df based on file name
for idx in range(len(test_z)):
str1 = test_z.loc[idx]['Function']
# print('DEBUG: ', file,' ', str1 )
str2 = str1 + ' Mean Af'
str3 = str1 + ' %SdErrA'
test_z_dict[str2] = test_z.loc[idx]['Mean Af']
# print('DEBUG: ', file,' ', test_z.loc[0]['Mean Af'])
test_z_dict[str3] = test_z.loc[idx]['%SdErrA']
new_name = file.split('/')
name = new_name.pop()
big_dict[name] = test_z_dict
df_1 = pd.DataFrame(big_dict)
df_flip = pd.DataFrame.transpose(df_1)
columns = list(df_flip.columns)
for col in columns:
df_flip[col] = pd.to_numeric(df_flip[col])
return df_flip
def format_machine(data_dict):
big_dict = {}
file_list = list(data_dict.keys())
for file in file_list:
# print(file)
test_z = data_dict[file]['Machine Parameters:']
# Define new Machine Parameters df based on file name
test_dict = {}
test_dict['Date'] = data_dict[file]['Date']
uni_list2 = []
for row in range(len(test_z)):
# print(row)
unicode_line = str(test_z.iloc[row][0])
unicode_line = unicode_line.translate({ord(c): None for c in '""'})
uni_list = unicode_line.split()
# print(uni_list)
if uni_list[0] == 'Source':
uni_list2 = uni_list
unicode_line = str(test_z.iloc[(row + 1)][0])
unicode_line = unicode_line.translate({ord(c): None for c in '""'})
uni_list3 = unicode_line.split()
for val in range(len(uni_list2)):
test_dict[uni_list2[val]] = uni_list3[val]
for idx in range(len(uni_list)):
if ':' in uni_list[idx]:
# print(uni_list[idx])
filter = uni_list[idx].split(':')
# print('filter= ', filter)
if len(filter) == 3:
continue
if filter[0] == 'HT':
test_dict[uni_list[idx]] = None
continue
test_dict[uni_list[idx]] = uni_list[(idx + 1)]
if '/' in file:
new_name = file.split('/')
name = new_name.pop()
else:
name = file
big_dict[name] = test_dict
df_1 = pd.DataFrame(big_dict)
df_flip = pd.DataFrame.transpose(df_1)
# columns = list(df_flip.columns)
# for col in columns:
# df_flip[col] = pd.to_numeric(df_flip[col])
return df_flip
def mega_format(data_files):
mega_dict = {}
data_dict = multi_file_get_tables(data_files)
df_combine = combine_all_df(data_dict)
df_a = format_grand12(data_dict)
df_b = format_machine(data_dict)
result = pd.concat([df_a, df_b], axis=1)
col_name = "Date"
first_col = result.pop(col_name)
result.insert(0, col_name, first_col)
mega_dict['Combine'] = df_combine
mega_dict['Important'] = result
return mega_dict
def files_process_toEXCEL(processed_dict, path, excel_name='TIMS_mega_output.xlsx'):
with pd.ExcelWriter(os.path.join(path, excel_name)) as writer:
processed_dict['Important'].to_excel(writer, sheet_name='Output', index=True)
processed_dict['Combine']['Grand'].to_excel(writer, sheet_name='Grand', index=False)
processed_dict['Combine']['Block'].to_excel(writer, sheet_name='Block', index=False)
processed_dict['Combine']['Individual Ratios'].to_excel(writer, sheet_name='Individual Ratios', index=False) | [
"noreply@github.com"
] | noreply@github.com |
8801dc22bff5d819007bbb28597f482585ce99c7 | 2a6306cc6dbd32412b4c45c6d0c240e26271793a | /design/mechanical/calc/belt_drive.py | 142e01e6e5f7dbebd6df73c732c36523684c8b9b | [] | no_license | hidmic/airi-hw | 550151c776d04310359873a28c18e2649c52f6ce | 3e27914a299eac699a9ef7c1c11266a76831aa14 | refs/heads/master | 2023-06-14T04:33:05.639178 | 2021-04-30T19:00:50 | 2021-07-09T23:01:53 | 178,717,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,088 | py | import pint
import numpy as np
def belt_drive():
"""
Cálculo de transmisión a correa.
Ecuaciones y modelos tomados de
"""
units = pint.UnitRegistry()
# Cálculo de torque mínimo de motor
alpha = np.arctan(1.0/5.0) # Inclinación máxima de rampas (Ley 24.314)
Pmax = 8 * units.kgf # Peso nominal
D = 9.6 * units.cm # Diámetro de rueda
Cmin = Pmax * np.sin(alpha) * D / 2
# Cálculo de velocidad nominal mínima de motor
vmin = 1 * units.m / units.s
Nmin = (vmin / (D / 2)).to(units.rpm)
# Caracteristicas del motorreductor DC MR08D 24v 24:1
# (con motor DC Mobuchi RS-555SH-2670)
N = 266 * units.rpm # Velocidad nominal
C = 6.8 * units.kgf * units.cm # Torque a máxima eficiencia
assert 2 * C > Cmin
assert N > Nmin
sf = 1.5 # Factor de servicio nominal
C_peak = (C * sf).to(units.N * units.m)
# Se utiliza correa GT3 3 mm de pitch, 6 mm de ancho
p = 3 * units.mm # Pitch de correa
n1 = n2 = 20 # Dientes por polea
pd = n1 * p / np.pi # Delta angular
d_left = 100 * units.mm
d_right = 80 * units.mm
W = 6 * units.mm
h = 2.41 * units.mm - 1.14 * units.mm
C_rated = 0.95 * 1.26 * units.N * units.m
assert C_rated > C_peak # Verificación de torque
# Cálculo de tensión de correa
T = 2 * (
0.812 * C_peak.to(units.lbf * units.inch) / pd.to(units.inch) +
0.077 * units.lbf * units.minute**2 / units.ft**2 * ((pd * N).to(units.ft / units.minute)/1000)**2
).to(units.N)
# Cálculo de dientes y ángulo de contacto
EA = 30000 * units.lbf * (0.82 * W / (1 * units.inch))
ε = (T / EA).to('dimensionless')
n_left = np.floor((1 - 2 * ε) * ((n1 + n2)/2 + 2 * d_left/p))
L_left = n_left * p
n_right = np.floor((1 - 2 * ε) * ((n1 + n2)/2 + 2 * d_right/p))
L_right = n_right * p
print('\n'.join('{} = {}'.format(name, value)
for name, value in locals().items()
if isinstance(value, units.Quantity)))
if __name__ == '__main__':
belt_drive()
| [
"hid.michel@gmail.com"
] | hid.michel@gmail.com |
b6744307ed25dda4c35d25313c54275955c76d98 | f97091116f61c6cfb5ffb442ce8351340696f443 | /script/TS4-AizuSpiderSA-ROS.py | ca98834545848ec30d7c407c3777c769f1a3d82f | [
"CC0-1.0"
] | permissive | WRS-TDRRC/WRS-TDRRC-2020-Practice | 0e4942b629b53ff6bed19bccae4bd181a359bad3 | 5bb66f8494d8998549b87047c3501cc3241c9388 | refs/heads/master | 2023-08-29T08:02:16.202376 | 2021-10-01T16:02:00 | 2021-10-01T16:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | import WRSUtil
WRSUtil.loadProject(
"MultiSceneViews", "TS4", "AGXSimulator", "AizuSpiderSA",
enableVisionSimulation = True, remoteType = "ROS")
| [
"shimizu@sist.chukyo-u.ac.jp"
] | shimizu@sist.chukyo-u.ac.jp |
5a647f04533220fc84f5a02065135607beb9b7ae | 870949dcecf17bac1f8ea5f09ce264454ef420ef | /mission5.py | e63db5b76a435477fe2ee9caeea8bd697060e46a | [] | no_license | jackiechen0708/PythonChallenge | f81caf3a64484f056f0eaba27b640f3b1caa7333 | c29db313ea198cd27dd129bdc49c947fee0aa482 | refs/heads/master | 2021-01-10T10:31:20.039868 | 2016-03-14T16:05:51 | 2016-03-14T16:05:51 | 53,776,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | import pickle
p=pickle.load(file('mission5data'))
for item in p:
# print item
print "".join(map(lambda p: p[0]*p[1], item))
| [
"12307130250@fudan.edu.cn"
] | 12307130250@fudan.edu.cn |
7e51003812a04763942a01c01a8888c80f6f2dc8 | 789a3a72c9cce5011df8082cf9c4226152767fe2 | /preprocessing-test.py | b93d94b360c1902af9fa91ac88235093709529a2 | [] | no_license | ExTee/LanguageDetection | fd748df72eaf55a03c2bec2d6c33c0db7d2a6bf0 | 0eca58445360c5f285683e74cf1ffe1ab5c6ad61 | refs/heads/master | 2021-04-29T15:59:16.991397 | 2018-02-16T22:13:54 | 2018-02-16T22:13:54 | 121,805,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,494 | py | # -*- coding: utf-8 -*-
import csv
from sklearn.preprocessing import LabelEncoder, StandardScaler
import re
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
import keras.optimizers
from keras.utils import plot_model
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sets import Set
import numpy as np
def get_x_train():
train_file = open('train_set_x.csv', 'rb')
reader = csv.reader(train_file, delimiter = ',')
rows = []
for line in reader:
rows.append(line)
del(rows[0])
for row in rows:
del(row[0])
rows = map(lambda x: x[0].lower().decode('utf-8'), rows)
rows = map(lambda x: re.sub(r'(\s)http\w+','',x), rows)
return rows
def get_x_test():
test_file = open('test_set_x.csv', 'rb')
reader = csv.reader(test_file, delimiter = ',')
entries = []
for a,b in reader:
entries.append(b)
del(entries[0])
rows = []
for string in entries:
s1 = ''.join(string.split())
rows.append(s1)
rows = map(lambda x: x.lower().decode('utf-8'), rows)
rows = map(lambda x: re.sub(r'(\s)http\w+','',x), rows)
return rows
#takes as input a SCALED X, and ONE-HOT encoded Y
def output_train_test_files(X,Y,x_t):
#seed to randomize
#seed = 216
#X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20, random_state=seed)
#save our train/test data
#np.savez_compressed('train_test_data.npz',X_train=X_train,Y_train=Y_train,X_test=X_test,Y_test=Y_test)
np.savez_compressed('train_test_data-allfeatures-tfidf-nohttp.npz',X_train=X,Y_train=Y,X_test=x_t)
print("Data has been saved.")
# returns one-hot encoded y data
def preprocess_y():
train_file = open('train_set_y.csv', 'rb')
reader = csv.reader(train_file, delimiter = ',')
rows = []
for a,b in reader:
rows.append(b)
del(rows[0])
#traansform string to int
rows = map(lambda x: int(x), rows)
#one-hot encoding with keras
y = keras.utils.to_categorical(rows, num_classes=5)
return y
vectorizer = TfidfVectorizer(analyzer='char', lowercase = False, max_features=200)
#print(get_x_train())
#print(rows)
X_train = vectorizer.fit_transform(get_x_train())
X_test = vectorizer.transform(get_x_test())
Y_train = preprocess_y()
output_file = open('nn-results-allfeatures-tfidf-nohttp.csv', 'wb')
model = Sequential()
model.add(Dense(1000,input_dim=200,kernel_initializer="glorot_uniform",activation="sigmoid"))
model.add(Dropout(0.5))
model.add(Dense(600,kernel_initializer="glorot_uniform",activation="sigmoid"))
model.add(Dropout(0.5))
model.add(Dense(200,kernel_initializer="glorot_uniform",activation="sigmoid"))
model.add(Dropout(0.5))
#we have 5 categories
categories = 5
model.add(Dense(categories,kernel_initializer="glorot_uniform",activation="softmax"))
model_optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
#model_optimizer = 'rmsprop'
model.compile(loss='categorical_crossentropy',
optimizer=model_optimizer,
metrics=['accuracy'])
history = model.fit(X_train,Y_train,
epochs=12,
validation_split=0.10,
batch_size=32,
verbose=2,
shuffle=True)
results = model.predict_classes(X_test, batch_size=64, verbose=0)
output = []
for i in range(len(results)):
output.append(('{},{}'.format(i,results[i])))
output_file.write('Id,Category\n')
for line in output:
output_file.write(line + '\n')
output_file.close()
#print(vectorizer.get_feature_names())
#print(rows[5], x[5])
| [
"xintong.wang1995@gmail.com"
] | xintong.wang1995@gmail.com |
af56f50878c6deef38a151cf456a1a89359011fc | 7bd7f24a3ddb8ba62eab0594cc11fdcc93d1d4aa | /wk1/singleLL.py | 523c9bbd6189ca5c5b681a879f204212a1233a61 | [] | no_license | mjso7660/Blockchain-Simulation | 806b84afd55f57a0f1c1657b9c829476b3d3656c | 081697d1c83a0c91eb558817608eaff09b20f530 | refs/heads/master | 2021-09-04T20:11:19.523076 | 2018-01-22T03:09:51 | 2018-01-22T03:09:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,438 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 20 22:35:14 2018
@author: Min Joon So, Shailesh Patro
Blockchain wk1 assignment
"""
class Node:
'''
class node for doubly linked list
'''
def __init__(self,key,value):
self.key = key
self.value = value
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def get_value(self, key):
'''
returns a corresponding value for the given key
'''
temp = self.head
while temp is not None:
if temp.key is key:
return temp.value
temp = temp.next
if temp is None:
return None
def push(self, key, value):
'''
adds a new node at the head
'''
if not self.check_key(key):
return
new_node = Node(key, value)
new_node.next = self.head
self.head = new_node
def append(self, key, value):
'''
append a new node at the tail
'''
if not self.check_key(key):
return
new_node = Node(key, value)
temp = self.head
while temp.next is not None:
temp = temp.next
if temp.next is None:
temp.next = new_node
def delete(self, key):
'''
deletes a node with given key
'''
if self.head.key is key:
self.head = self.head.next
temp = self.head
while temp.next is not None:
if temp.next.key is key:
temp.next = temp.next.next
break
temp = temp.next
temp = None
return
def insert_after_key(self, loc, key, value = None):
'''
searches for a given key and inserts a new node with 'key'' and 'value' after
loc: key of the node after which a new node will be inserted
'''
if not self.check_key(key):
return
temp = self.head
while temp is not None:
if temp.key is loc:
new_node = Node(key, value)
new_node.next = temp.next
temp.next = new_node
break
temp = temp.next
if temp is None:
print("not found")
def insert_after_node(self, original_node, node_insert):
if node_insert.next is not None:
print("Error")
return
if original_node.next is None:
original_node.next = node_insert
return
node_insert.next = original_node.next
original_node.next = node_insert
return
def traversal(self):
'''
prints all keys and values
'''
temp = self.head
if temp is None:
return None
while temp is not None:
print(temp.key,temp.value)
temp = temp.next
return
def reverse(self):
'''
reversed a list
'''
current = self.head # Initialize current to start of list (head)
previous = None # Since we want the new tail to point to None and since there is no node before
while (current != None): # Initiate a while loop that runs as long as current node is not null, loop
nextnode = current.next # Create a pointing variable called "nextnode" to next node
current.next = previous # Set current node to previous (for the first run, head node points to None/Null), now we are breaking the link of the first node to second node, this is where nextnode is used)
previous = current # Move previous
current = nextnode # Move current
self.head = previous # When the loop is complete move the head to last node (new head of list)
def check_key(self, new_key):
'''
new_key: key of a new node to be inserted
returns True if new_key doesn't overlap with anyother keys. If the key already exits, return False
'''
temp = self.head
while temp is not None:
if temp.key is new_key:
print("key alread exists")
return False
temp = temp.next
return True
# End of class definition
# Start of public functions
def deep_copy(llist):
'''
llist: linked list to copy
returns a deep copy of given linked list
'''
new_llist = LinkedList()
temp = llist.head
while temp is not None:
new_node = Node(temp.key, temp.value)
if new_llist.head is None:
new_llist.head = new_node
llist.insert_after_node(temp, new_node)
temp = temp.next.next
temp = llist.head
while temp is not None:
next_node = temp.next
if next_node.next is None:
temp.next = None;
return new_llist
temp.next = next_node.next
next_node.next = temp.next.next
temp = temp.next
return new_llist
def check_same(llist1,llist2):
'''
checks if given linked lists are the identical
'''
temp1 = llist1.head
temp2 = llist2.head
while temp1 is not None and temp2 is not None:
if (temp1.key, temp1.value) != (temp2.key, temp2.value):
return False
temp1 = temp1.next
temp2 = temp2.next
if temp1 is not None or temp2 is not None:
return False
return True
if __name__ == '__main__':
llist = LinkedList()
llist.push(1,'Min Joon')
llist.push(3,'Shailesh')
llist.push(2,'Blockchain')
llist.push(2,'error') #key '2' already exists
llist.push(5,'CooperUnion')
llist.push(4,1)
llist.push(6,3.141)
llist.insert_after_key(2, 8, 7)
llist.insert_after_key(5, 7, '*')
llist.append(0,'a')
llist.append(9, 4)
#print keys and values
llist.traversal()
print("----")
#deep-copy, reversed
new_list = deep_copy(llist)
new_list.reverse()
new_list.traversal()
print("----")
#check if they match
print(check_same(new_list,llist))
| [
"noreply@github.com"
] | noreply@github.com |
017b5e90f7909b8addc97becfd474488ecf4c16e | 78fac3c0ffbb4859ac7ac16eefc696f43b0780ef | /proj09/cards.py | 41f46ed7bf523737ddb8daae4b0ebd367fc44117 | [] | no_license | merylmerylmeryl/Intro-to-Python | 7337836d6a63d78ca343cd3da5e32e801add6a6b | 31b988f7213dfe555059008a6eb8d8718339bd2e | refs/heads/master | 2021-09-03T08:35:43.585875 | 2018-01-07T15:58:51 | 2018-01-07T15:58:51 | 116,578,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,161 | py | import random # needed for shuffling a Deck
class Card():
"""Denote a card with rank and suit"""
# Protocol:
# 1. 'no card' is represented by BOTH r = 0 and s = ''
# 2. set_rank and set_suit should be commented out after development and debugging
# 3 rank is int: 1=Ace, 2-10 face value, 11=Jack, 12=Queen, 13=King
def __init__(self, r=0, s=''):
self.__rank=0
self.__suit='' # create blank card by default unless we fix it later
if type(r) == str:
if r in 'Jj':
self.__rank = 11 # Jack
elif r in 'Qq':
self.__rank = 12 # Queen
elif r in 'Kk':
self.__rank = 13 # King
elif r in 'aA':
self.__rank = 1 # Ace
# else str rank not in the approved set, keep the default rank of 0
elif type(r) == int:
if 1 <= r <= 14:
self.__rank = r
# else int rank not between 1 and 14, keep the default rank of 0
# else rank not a str or an int, keep the default rank of 0
if type(s) == str and s:
if s in 'Cc':
self.__suit = 'C'
elif s in 'Hh':
self.__suit = 'H'
elif s in 'Dd':
self.__suit = 'D'
elif s in 'Ss':
self.__suit = 'S'
# else suit not in approved set, keep the default suit of ''
# else suit not a string, keep the default suit of ''
def set_rank(self, r):
"""For Development and Debugging only: Set the rank of the card: 0-13"""
self.__rank = r
def set_suit(self, s):
"""For Development and Debugging only: Set the suit of the card: C,S,D,H"""
self.__suit = s
def get_rank(self):
"""Return rank of the card as int: 0-13"""
return self.__rank
def get_suit(self):
"""Return suit of the card as string: C,S,D,H"""
return self.__suit
def get_value(self):
"""Get the value on the face card:
(Jack, Queen, King = 10), Ace = 1, others are face value 2-10"""
if self.__rank <= 10:
return self.__rank
else:
return 10 # Only Jack, Queen or King remain; their value is 10
def __str__(self):
"""String representation of card for printing: rank + suit,
e.g. 7S or JD, 'blk' for 'no card'"""
nameString = "blk A 2 3 4 5 6 7 8 9 10 J Q K" # 'blk' for blank, i.e. no card
nameList = nameString.split() # create a list of names so we can index into it using rank
# put name and suit in 3-character-wide field, right-justified
return (nameList[self.__rank] + self.__suit).rjust(3)
def __repr__(self):
"""Representation of card: rank + suit"""
return self.__str__()
class Deck():
"""Denote a deck to play cards with"""
def __init__(self):
"""Initialize deck as a list of all 52 cards: 13 cards in each of 4 suits"""
self.__deck = [Card(j, i) for i in "CSHD" for j in range(1,14)] # list comprehension
def shuffle(self):
"""Shuffle the deck"""
random.shuffle(self.__deck) # random.shuffle() randomly rearranges a sequence
def deal(self):
"""Deal a card by returning the card that is removed off the top of the deck"""
if len(self.__deck) == 0: # deck is empty
return None
else:
return self.__deck.pop(0) # remove card (pop it) and then return it
def discard(self, n):
"""Remove n cards from the top of the deck"""
del self.__deck[:n] # delete an n-card slice from the end of the deck list
def top(self):
"""Return the value of the top card -- do not remove from deck."""
if len(self.__deck) == 0: # deck is empty
return None
else:
return self.__deck[0]
def bottom(self):
"""Return the value of the bottom card -- do not remove from deck."""
if len(self.__deck) == 0: # deck is empty
return None
else:
return self.__deck[-1]
def add_card_top(self, c):
"""Place card c on top of deck"""
self.__deck= [c] + self.__deck
def add_card_bottom(self,c):
""" Place card c on the bottom of the deck"""
self.__deck.append(c)
def cards_left(self):
"""Return number of cards in deck"""
return len(self.__deck)
def empty(self):
"""Return True if the deck is empty, False otherwise"""
return len(self.__deck) == 0
def __str__(self):
"""Represent the whole deck as a string for printing -- very useful during code development"""
s = ""
for index, card in enumerate(self.__deck):
if index%13 == 0: # insert newline: print 13 cards per line
s += "\n"
s += str(card) + " "
return s
def __repr__(self):
"""Representation of deck"""
return self.__str__()
| [
"noreply@github.com"
] | noreply@github.com |
961cc26729281f013409614ad41160edb6caace8 | a97f789530412fc1cb83170a11811f294b139ee8 | /疯狂Python讲义/codes/10/10.8/dict_vs_defaultdict.py | 271ff0662c308adfa488ad5cc5242ed3fdbd5820 | [] | no_license | baidongbin/python | 3cebf2cc342a15b38bf20c23f941e6887dac187a | 1c1398bff1f1820afdd8ddfa0c95ccebb4ee836f | refs/heads/master | 2021-07-21T19:23:32.860444 | 2020-03-07T11:55:30 | 2020-03-07T11:55:30 | 195,909,272 | 0 | 1 | null | 2020-07-21T00:51:24 | 2019-07-09T01:24:31 | Python | UTF-8 | Python | false | false | 244 | py | from collections import defaultdict
my_dict = {}
# 使用 int 作为 defaultdict 的 default_factory
# 当 key 不存在时,将会返回 int 函数的返回值
my_defaultdict = defaultdict(int)
print(my_defaultdict['a'])
print(my_dict['a'])
| [
"baidongbin@thunisoft.com"
] | baidongbin@thunisoft.com |
6a48c072b3f836446cbbaea3197cd6142ed8820a | 90c90c9be8ff3dedd6a0c6c055e10ed47d588ae7 | /put_ufile.py | 07b38379dd8590f7e9c4dfde7f1f3c83e59e8e32 | [] | no_license | ouyangxudu/bakfile_to_ufile | d6c8cc17013feaa220cb8a2fbc1419336ea72986 | 15cd4ff600254a13244235a2048829dfbdbe5f7c | refs/heads/master | 2021-09-10T16:22:23.123221 | 2018-03-29T07:06:27 | 2018-03-29T07:06:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # -*- coding: utf-8 -*-
import os
from ucloud.ufile import putufile
from ucloud.compact import b
from ucloud.logger import logger, set_log_file
import ucloud.ufile.config as config
from ucloud.compact import BytesIO
from config import *
set_log_file()
def putfile(dir,file):
# 构造上传对象,并设置公私钥
handler = putufile.PutUFile(public_key, private_key)
# upload small file to public bucket
logger.info('start upload file to public bucket')
# 要上传的目标空间
bucket = bucketname
# 上传到目标空间后保存的文件名
key = file
# 要上传文件的本地路径
local_file = dir + r'\{}'.format(file)
print(local_file)
# 请求上传
ret, resp = handler.putfile(bucket, key, local_file)
assert resp.status_code == 200
| [
"root@10-13-181-13.(none)"
] | root@10-13-181-13.(none) |
f721745c59dfa425155103c807994cc344f7ce31 | 93039551fbdef0a112a9c39181d30b0c170eb3a6 | /day03/day03HomeWork.py | a399b93ad6e0b969a53525cf311c62598023889e | [] | no_license | wenzhe980406/PythonLearning | 8714de8a472c71e6d02b6de64efba970a77f6f4a | af0e85f0b11bf9d2f8e690bac480b92b971c01bb | refs/heads/master | 2020-07-14T20:46:45.146134 | 2020-05-28T12:16:21 | 2020-05-28T12:16:21 | 205,398,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,561 | py | # _*_ coding : UTF-8 _*_
# 开发人员 : ChangYw
# 开发时间 : 2019/7/17 17:33
# 文件名称 : day03HomeWork.PY
# 开发工具 : PyCharm
#1
# #1)
# if __name__ == "__main__":
# score = []
#
# #2)
# if __name__ == "__main__":
# score.append(68)
# score.append(87)
# score.append(92)
# score.append(100)
# score.append(76)
# score.append(88)
# score.append(54)
# score.append(89)
# score.append(76)
# score.append(61)
#
# #3)
# if __name__ == "__main__":
# print(score[2])
#
# #4)
# if __name__ == "__main__":
# print(score[:6])
#
# #5)
# if __name__ == "__main__":
# score.insert(3,59)
# print(score)
#
# #6)
# if __name__ == "__main__":
# num = score.count(76)
# print(num)
#
# #7)
# if __name__ == "__main__":
# print(55 in score)
#
# #8)
# if __name__ == "__main__":
# print(score.index(68)+19000100)
# print(score.index(87)+19000100)
# print(score.index(92)+19000100)
# print(score.index(100)+19000100)
#
# #9)
# if __name__ == "__main__":
# score[3] = score[3] + 1
# print(score[3])
#
# #10)
# if __name__ == "__main__":
# del score[0]
# print(score)
#11)
# if __name__ == "__main__":
# print(score.__len__())
# print(len(score))
# #12)
# if __name__ == "__main__":
# score.sort()
# print(score)
# print(min(score))
# print(max(score))
# #13)!!
# if __name__ == "__main__":
# print(list(reversed(score)))
#14)
# if __name__ == "__main__":
# del score[-1]
# print(score)
# #15)???如何定位第一个值为88的字符?
# if __name__ == "__main__":
# score.append(88)
# del score[6]
# #16)
# if __name__ == "__main__":
# score1 = [80,61]
# score2 = [71,95,82]
# score = score1.append(score2)
# print(score)
#17)
# if __name__ == "__main__":
# score1 = [80,61]
# score2 = score1 *5
# print(score2)
#2)
import random
# if __name__ == "__main__":
# #1)入栈(先入后出,后入先出)
# score1 = [70,45,15,48,25,70,75,35,76,88]
# score2 = [22,84,63]
# score = score1 + score2
# # 2)出栈
# del score[0:2]
# print(score)
# #3)查看栈顶的元素
# print(score[-1])
# print(score.pop())
# #4)查看栈的长度
# print(len(score))
# #5)判断栈是否为空
# if score is None :
# print("score is null")
# else:
# print("score is not null")
# #6)退出程序。
# exit(0)
#3)
# if __name__ == "__main__":
# comm_list = ["T恤", "长裤", "鞋子", "饮料", "餐巾纸", "手机", "电脑", "防晒霜", "疯狂Python书", "椅子"]
# comm_price = ["88", "108", "168", "38", "18", "6288", "5288", "108", "68", "228"]
#
# money = input("请输入你的余额:")
# if (not money.isdigit()):
# print("请输入一个正整数:")
# print("输入成功,即将进入主界面")
# # money = int(money)
# while True :
# print("--------------")
# print("您的余额为:",money)
# print("--------------")
# print("1.显示余额")
# print("2.充值")
# print("3.显示商品")
# print("4.显示商品价格")
# print("5.购买商品")
# print("6.退出程序")
#
# choice = input("请输入你的选择:")
# if (not choice.isdigit()):
# print("请输入一个正整数:")
# choice = int(choice)
# if choice <0 and choice > 5 :
# print("请正确输入界面选项!")
#
# if choice == 1 :
# continue
# elif choice == 2 :
# money_invest = input("请输入充值金额:(充值金额为正整数)")
# if (not money_invest.isdigit()):
# print("请正确输入您的充值金额,金额为正整数:")
# else:
# # money_invest = int(money_invest)
# money = int(money)
# money_invest = int(money_invest)
# money = money + money_invest
# print("本次充值金额为:", money_invest, "元", "充值后的金额为:", money, "元")
# elif choice == 3:
# print("商品列表为:")
# for i in comm_list :
# print(i,end=" ")
# print()
# elif choice == 4:
# print("商品列表对应价格为:")
# for i in comm_price :
# print(i,end=" ")
# print()
# elif choice == 5:
# comm_choice = input("请输入要购买的商品:")
# money = int(money)
# if not comm_choice in comm_list:
# print("没有",comm_choice,"这款的商品,请按照列表重新输入")
# for i in comm_list:
# print(i, end=" ")
# print()
# elif comm_choice in comm_list:
# print("您选中了", comm_choice)
# comm_buy_num = int(comm_list.index(comm_choice))
# comm_buy_money = int(comm_price[comm_buy_num])
# if int(money) < int(comm_buy_money) :
# print("您的余额不足,请及时充值!")
# else:
# print("您的余额为:", money, "购买", comm_choice, "即将扣除", comm_buy_money, "元,请稍后")
# money = int(money - comm_buy_money)
# print("购买成功,您的余额还剩余:",money,"元")
# elif comm_choice is None :
# print("既然选择要买了,那可就要买一个哟!")
# elif (comm_choice.isdigit()):
# print("请正确输入您想要购买的商品:")
# elif choice == 6 :
# print("谢谢光临,欢迎下次光临!")
# exit(0)
#
#4
# if __name__ == "__main__":
# #7.3 True
# print('abc' in ('abcdefg'))
# #7.4 True
# print('abc' in ('abcdefg'))
# #7.5 True
# print('\x41'=='A')
# #7.6 hello world!
# print(''.join(list('hello world!')))
# #7.7 换行
# # print('\n')
# #7.8 为啥是3
# x = ['11','2','3']
# print(max(x))
# #7.9 11
# print(min(['11','2','3']))
#7.10 11
# x = ['11', '2', '3']
# print(max(x,key=len))
# #7.11 c:\test.htm
# path = r'c:\test.html'
# print(path[:-4]+'htm')
# #7.12 False
# print(list(str([1,2,3])) == [1,2,3])
# #7.13 [1,2,3]
# print(str([1,2,3]))
# #7.14 (1,2,3)
# print(str((1,2,3)))
# #7.15 1+3+5+7+9=25
# print(sum(range(1,10,2)))
# #7.16 1+2+3+4+5+6+7+8+9=45
# print(sum(range(1,10)))
# #7.17 A
# print('%c'%65)
# #7.18 65
# print('%s'%65)
# #7.19 65,A
# print('%d,%c'%(65,65))
# #7.20 The first:97,the second is 65
# print('The first:{1},the second is {0}'.format(65,97))
# #7.21 65,0x41,0o101
# print('{0:#d},{0:#x},{0:#o}'.format(65))
# #7.22 True
# print(isinstance('abcdefg',str))
# #7.23 True
# print(isinstance('abcdefg',object))
# #7.24 True
# print(isinstance(3,object))
# #7.25 6
# print('abcabcabc'.rindex('abc'))
# #7.26 ab:efg
# print(':'.join('abcdefg'.split('cd')))
# #7.27 -1
# print('Hello world.I like Python.'.rfind('python'))
# #7.28 3
# print('abcabcabc'.count('abc'))
# #7.29 1
# print('apple.peach,banana,pear'.find('p'))
# #7.30 -1
# print('apple.peach,banana,pear'.find('ppp'))
# #7.31 ['abcdefg']
# print('abcdefg'.split(','))
# #7.32 1:2:3:4:5
# print(':'.join('1,2,3,4,5'.split(',')))
# #7.33 a,b,ccc,ddd
# print(','.join('a b ccc\n\n\nddd '.split()))
# #7.34 ??? 345
# x = {i:str(i+3) for i in range(3)}
# print(''.join([item[1] for item in x.items()]))
# #7.35 HELLO WORLD
# print('Hello world'.upper())
# #7.36 hello world
# print('Hello world'.lower())
# #7.37 HELLO WORLD
# print('Hello world'.lower().upper())
# #7.38 Hello world
# print('Hello world'.swapcase().swapcase())
# #7.39 True
# print(r'c:\windows\notepad.exe'.endswith('.exe'))
# #7.40
# print(r'c:\windows\notepad.exe'.endswith('.jpg','.exe'))
# #7.41 True
# print(r'C:\\Windows\\notepad.exe'.startswith('C:'))
# #7.42 20
# print(len('Hello world!'.ljust(20))) | [
"noreply@github.com"
] | noreply@github.com |
8b0b8a90a7800f75c7f3754a8069e53ce9b2bf41 | 9ad4200223a12e19538cfc4ae385a22ae6d5510b | /utils/queue.py | 11042a72ecc444b7516b92f84c0e05d9b812d6a9 | [] | no_license | pi-tau/fun-with-algorithms | 29037266426940bc651a158eb94aa61e4291b8b9 | 341bdc7d144d18b49917453006461d670109e706 | refs/heads/master | 2023-03-16T01:38:17.240160 | 2021-01-15T08:16:16 | 2021-01-15T08:16:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | """ A queue is a collection of objects that are inserted and removed
according to the first-in, first-out (FIFO) principle.
The queue data structure supports the following accessor methods:
Q.front(): Return the element at the front of the queue Q.
Q.is_empty(): Return True if the queue Q is empty.
len(Q): Return the total number of elements in the queue Q.
The queue data structure supports the following mutator methods:
Q.enqueue(elem): Add an element to the back of the queue Q.
Q.dequeue(): Remove and return the first element from the queue Q.
"""
from .linked_list import DoublyLinkedList
class Queue:
#--------------- queue initializer ----------------#
def __init__(self):
""" Initialize an empty queue. """
self._container = DoublyLinkedList()
#---------------- public accessors ----------------#
def first(self):
""" Return the element at the front of the queue. """
return self._container.first().elem()
def is_empty(self):
""" Return True if the queue is empty. """
return self._container.is_empty()
def __len__(self):
""" Return the total number of elements in the queue. """
return len(self._container)
#---------------- public mutators ----------------#
def enqueue(self, elem):
""" Add an element to the back of the queue. """
self._container.add_last(elem)
def dequeue(self):
""" Remove and return the first element from the queue. """
return self._container.delete(self._container.first())
# | [
"pavel.z.tashev@protonmail.com"
] | pavel.z.tashev@protonmail.com |
91f2e18c0ffd9f7e62905e02ed08b8fbe643972a | 455656d51975be13614fd2f7a577afb9d203bc02 | /Aula 06/Exemplos_aula06.py | b2d7a2ee1112ee45d441f91ded93e252f47eb7c1 | [] | no_license | wandersonDeve/Blue_T3C5_mod1 | e75a8af489bac0e188dc2b719ca142d9052c1b7e | cc91363011e63b5f31b654511ae18f54d097595d | refs/heads/main | 2023-06-19T05:11:23.872156 | 2021-07-18T14:42:55 | 2021-07-18T14:42:55 | 375,507,328 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from time import sleep
nome = 'Wanderson'
for letra in nome:
print(f'{letra}', end='',flush=True)
sleep(0.2)
| [
"81826043+wandersonDeve@users.noreply.github.com"
] | 81826043+wandersonDeve@users.noreply.github.com |
5b54be2d8925014bf36e0946272f750a96fdcdf4 | 09d3a1687182125444fe508eaab580fd4c95512e | /Problem 4.py | db12228d792c2328104660d601122c6e5a5d0f95 | [] | no_license | atulyaatul1999/project-2 | cfb002b0945d0527890d7abe15f9da67da57ee83 | de60765b4f0d4b0c31e076cca8e8cfa9b1604e22 | refs/heads/master | 2022-06-12T03:51:28.519970 | 2020-05-03T22:26:58 | 2020-05-03T22:26:58 | 260,627,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | class Group():
def __init__(self, _name):
self.name = _name
self.groups = []
self.users = []
def add_group(self, group):
self.groups.append(group)
def add_user(self, user):
self.users.append(user)
def get_groups(self):
return self.groups
def get_users(self):
return self.users
def get_name(self):
return self.name
def is_user_in_group(user, group):
#Return True if user is in the group,Check the group's immediate visible users
users = group.get_users()
if user in users:
return True
# Recurse through the group's groups and check if user exists in any
list = group.get_groups()
for item in list:
if is_user_in_group(user, item):
return True
return False
parent = Group("parent")
child = Group("child")
sub_child = Group("subchild")
sub_child.add_user("sub_child_user")
child.add_group(sub_child)
parent.add_group(child)
parent.add_user("shaktiman")
print(is_user_in_group("sub_child_user",parent))
# its answer should be True as "sub_child_user" is the user of sub_child which is in the parent's group list
print(is_user_in_group("sub_child_user",sub_child))
# its answer should be True as "sub_child_user" is the user of sub_child
print(is_user_in_group(child,parent))
# its answer should be False as child is not in parent's group list
sub_child2=Group("sub_child2")
sub_child2.add_user("sub_child_user2")
parent.add_group(sub_child2)
print(is_user_in_group("sub_child_user2",child))
# its answer should be False as "sub_child_user2" is not in the user's list of child and the group list of child is also empty
print(is_user_in_group("sub_child_user2",parent))
# its answer should be True as "sub_child_user2" is the user of sub_child2 which is in the parent's group list
| [
"59124269+atulyaatul1999@users.noreply.github.com"
] | 59124269+atulyaatul1999@users.noreply.github.com |
c9772b396e6939e1731613d1b33ef2a3156f7d45 | 5fa5fca593f74a71964003096e7a62f556e5045b | /content/migrations/0002_auto_20210324_1446.py | 9f4c566028c42b3d6e2b4b10fa7abea972e26c08 | [] | no_license | ibrahimciftci/Python-Django-DernekProjesi | 91981db6db69662bdd8e46efe0f08bef05155e43 | 182f4eacb3b8d69dde13a879ecbe95d73ce31575 | refs/heads/main | 2023-05-09T00:58:47.930029 | 2021-06-02T15:39:32 | 2021-06-02T15:39:32 | 349,974,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | # Generated by Django 3.1.7 on 2021-03-24 11:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('content', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('keywords', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('image', models.ImageField(blank=True, upload_to='images/')),
('status', models.CharField(choices=[('True', 'Evet'), ('False', 'Hayır')], max_length=10)),
('slug', models.SlugField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='content.category')),
],
),
migrations.DeleteModel(
name='user',
),
]
| [
"ibrhm.cftci@hotmail.com"
] | ibrhm.cftci@hotmail.com |
02b39ec4aaf52e26f7c63601e86ed3348320ec60 | 836be3cde86848810ec221254f0c934f09b6062c | /array_sum/array_sum_sol1.py | 109ca8e69dd7178dabab5fd746a9c9d2e0ead341 | [] | no_license | kvijayenderreddy/2020_Devops_automation | c8b26af009dc0b4972fa970600beb6877ae11b56 | d5bc5ec831d6fe25f05acde325482fb95e25ffdb | refs/heads/master | 2022-04-08T17:15:23.991803 | 2020-03-11T00:46:07 | 2020-03-11T00:46:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # Time Complexity: O(n)
# Space Complexity: O(1)
def arr_sum(arr,sum):
i = 0
j = len(arr) -1
arr.sort()
while(i < j):
if arr[i] + arr[j] > sum:
j-=1
elif arr[i] + arr[j] < sum:
i+=1
else:
print(arr[i],arr[j])
i+=1
| [
"devops.everyday.challenge@gmail.com"
] | devops.everyday.challenge@gmail.com |
d14b0abe4d34150707d282e67e01ee046841b36f | a4c9cd65c69ba6dcb90d16beeb5e3daf3f04fc55 | /trees/binary_trees/threaded_binary_tree.py | d030d5d2d3c8be4e6355ec89af123e327a87643b | [
"MIT"
] | permissive | Hackerman272/python-sample-code | ee9e41728eff9f792362b8b8e03cbd95671a072b | 5cea78b30b4b8f1ad3dcedc952fca56103638892 | refs/heads/main | 2023-08-22T12:16:26.378368 | 2021-07-04T06:36:38 | 2021-07-04T06:36:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,161 | py | # Copyright © 2021 by Shun Huang. All rights reserved.
# Licensed under MIT License.
# See LICENSE in the project root for license information.
"""Threaded Binary Search Trees."""
from dataclasses import dataclass
from typing import Any, Optional
from trees import tree_exceptions
from trees.binary_trees import binary_tree
@dataclass
class SingleThreadNode(binary_tree.Node):
"""Single Threaded Tree node definition."""
left: Optional["SingleThreadNode"] = None
right: Optional["SingleThreadNode"] = None
parent: Optional["SingleThreadNode"] = None
isThread: bool = False
@dataclass
class DoubleThreadNode(binary_tree.Node):
"""Double Threaded Tree node definition."""
left: Optional["DoubleThreadNode"] = None
right: Optional["DoubleThreadNode"] = None
parent: Optional["DoubleThreadNode"] = None
leftThread: bool = False
rightThread: bool = False
class RightThreadedBinaryTree(binary_tree.BinaryTree):
"""Right Threaded Binary Tree.
Attributes
----------
root: `Optional[SingleThreadNode]`
The root node of the right threaded binary search tree.
empty: `bool`
`True` if the tree is empty; `False` otherwise.
Methods
-------
search(key: `Any`)
Look for a node based on the given key.
insert(key: `Any`, data: `Any`)
Insert a (key, data) pair into the tree.
delete(key: `Any`)
Delete a node based on the given key from the tree.
inorder_traverse()
In-order traversal by using the right threads.
preorder_traverse()
Pre-order traversal by using the right threads.
get_leftmost(node: `SingleThreadNode`)
Return the node whose key is the smallest from the given subtree.
get_rightmost(node: `SingleThreadNode`)
Return the node whose key is the biggest from the given subtree.
get_successor(node: `SingleThreadNode`)
Return the successor node in the in-order order.
get_predecessor(node: `SingleThreadNode`)
Return the predecessor node in the in-order order.
get_height(node: `Optional[SingleThreadNode]`)
Return the height of the given node.
Examples
--------
>>> from trees.binary_trees import threaded_binary_tree
>>> tree = threaded_binary_tree.RightThreadedBinaryTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in tree.inorder_traverse()]
[(1, '1'), (4, '4'), (7, '7'), (11, '11'), (15, '15'), (20, '20'),
(22, '22'), (23, '23'), (24, '24'), (30, '30'), (34, '34')]
>>> [item for item in tree.preorder_traverse()]
[(1, '1'), (4, '4'), (7, '7'), (11, '11'), (15, '15'), (20, '20'),
(22, '22'), (23, '23'), (24, '24'), (30, '30'), (34, '34')]
>>> tree.get_leftmost().key
1
>>> tree.get_leftmost().data
'1'
>>> tree.get_rightmost().key
34
>>> tree.get_rightmost().data
"34"
>>> tree.get_height(tree.root)
4
>>> tree.search(24).data
`24`
>>> tree.delete(15)
"""
def __init__(self):
binary_tree.BinaryTree.__init__(self)
# Override
def search(self, key: Any) -> SingleThreadNode:
"""Look for a node by a given key.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.search`.
"""
current = self.root
while current:
if key == current.key:
return current # type: ignore
elif key < current.key:
current = current.left
else: # key > current.key
if current.isThread is False:
current = current.right
else:
break
raise tree_exceptions.KeyNotFoundError(key=key)
# Override
def insert(self, key: Any, data: Any):
"""Insert a (key, data) pair into the right threaded binary tree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.insert`.
"""
node = SingleThreadNode(key=key, data=data)
if self.root is None:
self.root = node
else:
temp = self.root
while temp:
# Move to left subtree
if node.key < temp.key:
if temp.left:
temp = temp.left
continue
else:
temp.left = node
node.right = temp
node.isThread = True
node.parent = temp
break
# Move to right subtree
elif node.key > temp.key:
if temp.isThread is False and temp.right:
temp = temp.right
continue
else:
node.right = temp.right
temp.right = node
node.isThread = temp.isThread
temp.isThread = False
node.parent = temp
break
else:
raise tree_exceptions.DuplicateKeyError(key=key)
# Override
def delete(self, key: Any):
"""Delete the node by the given key.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.delete`.
"""
if self.root:
deleting_node = self.search(key=key)
# The deleting node has no child
if deleting_node.left is None and (
deleting_node.right is None or deleting_node.isThread
):
self._transplant(deleting_node=deleting_node, replacing_node=None)
# The deleting node has only one right child
elif deleting_node.left is None and deleting_node.isThread is False:
self._transplant(
deleting_node=deleting_node, replacing_node=deleting_node.right
)
# The deleting node has only one left child,
elif deleting_node.left and deleting_node.isThread:
predecessor = self.get_predecessor(node=deleting_node)
if predecessor:
predecessor.right = deleting_node.right
self._transplant(
deleting_node=deleting_node, replacing_node=deleting_node.left
)
# The deleting node has two children
elif (
deleting_node.left
and deleting_node.right
and deleting_node.isThread is False
):
predecessor = self.get_predecessor(node=deleting_node)
replacing_node: SingleThreadNode = self.get_leftmost(
node=deleting_node.right
)
# the minmum node is not the direct child of the deleting node
if replacing_node.parent != deleting_node:
if replacing_node.isThread:
self._transplant(
deleting_node=replacing_node, replacing_node=None
)
else:
self._transplant(
deleting_node=replacing_node,
replacing_node=replacing_node.right,
)
replacing_node.right = deleting_node.right
replacing_node.right.parent = replacing_node
replacing_node.isThread = False
self._transplant(
deleting_node=deleting_node, replacing_node=replacing_node
)
replacing_node.left = deleting_node.left
replacing_node.left.parent = replacing_node
if predecessor and predecessor.isThread:
predecessor.right = replacing_node
else:
raise RuntimeError("Invalid case. Should never happened")
# Override
def get_leftmost(self, node: SingleThreadNode) -> SingleThreadNode:
"""Return the leftmost node from a given subtree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_leftmost`.
"""
current_node = node
while current_node.left:
current_node = current_node.left
return current_node
# Override
def get_rightmost(self, node: SingleThreadNode) -> SingleThreadNode:
"""Return the rightmost node from a given subtree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_rightmost`.
"""
current_node = node
while current_node.isThread is False and current_node.right:
current_node = current_node.right
return current_node
# Override
def get_successor(self, node: SingleThreadNode) -> Optional[SingleThreadNode]:
"""Return the successor node in the in-order order.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_successor`.
"""
if node.isThread:
return node.right
else:
if node.right:
return self.get_leftmost(node=node.right)
# if node.right is None, it means no successor of the given node.
return None
# Override
def get_predecessor(self, node: SingleThreadNode) -> Optional[SingleThreadNode]:
"""Return the predecessor node in the in-order order.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_predecessor`.
"""
if node.left:
return self.get_rightmost(node=node.left)
parent = node.parent
while parent and node == parent.left:
node = parent
parent = parent.parent
return parent
# Override
def get_height(self, node: Optional[SingleThreadNode]) -> int:
"""Return the height of the given node.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_height`.
"""
if node is None:
return 0
if node.left is None and node.isThread:
return 0
return max(self.get_height(node.left), self.get_height(node.right)) + 1
def inorder_traverse(self) -> binary_tree.Pairs:
"""Use the right threads to traverse the tree in in-order order.
Yields
------
`Pairs`
The next (key, data) pair in the tree in-order traversal.
"""
if self.root:
current: Optional[SingleThreadNode] = self.get_leftmost(node=self.root)
while current:
yield (current.key, current.data)
if current.isThread:
current = current.right
else:
if current.right is None:
break
current = self.get_leftmost(current.right)
def preorder_traverse(self) -> binary_tree.Pairs:
"""Use the right threads to traverse the tree in pre-order order.
Yields
------
`Pairs`
The next (key, data) pair in the tree pre-order traversal.
"""
current = self.root
while current:
yield (current.key, current.data)
if current.isThread:
current = current.right.right
else:
current = current.left
def _transplant(
self,
deleting_node: SingleThreadNode,
replacing_node: Optional[SingleThreadNode],
):
if deleting_node.parent is None:
self.root = replacing_node
if self.root:
self.root.isThread = False
elif deleting_node == deleting_node.parent.left:
deleting_node.parent.left = replacing_node
if replacing_node:
if deleting_node.isThread:
if replacing_node.isThread:
replacing_node.right = replacing_node.right
else: # deleting_node == deleting_node.parent.right
deleting_node.parent.right = replacing_node
if replacing_node:
if deleting_node.isThread:
if replacing_node.isThread:
replacing_node.right = replacing_node.right
else:
deleting_node.parent.right = deleting_node.right
deleting_node.parent.isThread = True
if replacing_node:
replacing_node.parent = deleting_node.parent
class LeftThreadedBinaryTree(binary_tree.BinaryTree):
"""Left Threaded Binary Tree.
Attributes
----------
root: `Optional[SingleThreadNode]`
The root node of the left threaded binary search tree.
empty: `bool`
`True` if the tree is empty; `False` otherwise.
Methods
-------
search(key: `Any`)
Look for a node based on the given key.
insert(key: `Any`, data: `Any`)
Insert a (key, data) pair into the tree.
delete(key: `Any`)
Delete a node based on the given key from the tree.
reverse_inorder_traverse()
Reversed In-order traversal by using the left threads.
get_leftmost(node: `SingleThreadNode`)
Return the node whose key is the smallest from the given subtree.
get_rightmost(node: `SingleThreadNode`)
Return the node whose key is the biggest from the given subtree.
get_successor(node: `SingleThreadNode`)
Return the successor node in the in-order order.
get_predecessor(node: `SingleThreadNode`)
Return the predecessor node in the in-order order.
get_height(node: `Optional[SingleThreadNode]`)
Return the height of the given node.
Examples
--------
>>> from trees.binary_trees import threaded_binary_tree
>>> tree = threaded_binary_tree.LeftThreadedBinaryTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in tree.reverse_inorder_traverse()]
[(34, "34"), (30, "30"), (24, "24"), (23, "23"), (22, "22"),
(20, "20"), (15, "15"), (11, "11"), (7, "7"), (4, "4"), (1, "1")]
>>> tree.get_leftmost().key
1
>>> tree.get_leftmost().data
'1'
>>> tree.get_rightmost().key
34
>>> tree.get_rightmost().data
"34"
>>> tree.get_height(tree.root)
4
>>> tree.search(24).data
`24`
>>> tree.delete(15)
"""
def __init__(self):
binary_tree.BinaryTree.__init__(self)
# Override
def search(self, key: Any) -> SingleThreadNode:
"""Look for a node by a given key.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.search`.
"""
current = self.root
while current:
if key == current.key:
return current # type: ignore
elif key < current.key:
if current.isThread is False:
current = current.left
else:
break
else: # key > current.key:
current = current.right
raise tree_exceptions.KeyNotFoundError(key=key)
# Override
def insert(self, key: Any, data: Any):
"""Insert a (key, data) pair into the left threaded binary tree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.insert`.
"""
node = SingleThreadNode(key=key, data=data)
if self.root is None:
self.root = node
else:
temp = self.root
while temp:
# Move to right subtree
if node.key > temp.key:
if temp.right:
temp = temp.right
continue
else:
temp.right = node
node.left = temp
node.isThread = True
node.parent = temp
break
# Move to left subtree
elif node.key < temp.key:
if temp.isThread is False and temp.left:
temp = temp.left
continue
else:
node.left = temp.left
temp.left = node
node.isThread = temp.isThread
temp.isThread = False
node.parent = temp
break
else:
raise tree_exceptions.DuplicateKeyError(key=key)
# Override
def delete(self, key: Any):
"""Delete the node by the given key.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.delete`.
"""
if self.root:
deleting_node = self.search(key=key)
# The deleting node has no child
if deleting_node.right is None and (
deleting_node.left is None or deleting_node.isThread
):
self._transplant(deleting_node=deleting_node, replacing_node=None)
# The deleting node has only one right child,
elif deleting_node.right and deleting_node.isThread:
successor = self.get_successor(node=deleting_node)
if successor:
successor.left = deleting_node.left
self._transplant(
deleting_node=deleting_node, replacing_node=deleting_node.right
)
# The deleting node has only one left child
elif (deleting_node.right is None) and (deleting_node.isThread is False):
self._transplant(
deleting_node=deleting_node, replacing_node=deleting_node.left
)
# The deleting node has two children
elif deleting_node.right and deleting_node.left:
replacing_node: SingleThreadNode = self.get_leftmost(
node=deleting_node.right
)
successor = self.get_successor(node=replacing_node)
# the minmum node is not the direct child of the deleting node
if replacing_node.parent != deleting_node:
if replacing_node.isThread:
self._transplant(
deleting_node=replacing_node, replacing_node=None
)
else:
self._transplant(
deleting_node=replacing_node,
replacing_node=replacing_node.right,
)
replacing_node.right = deleting_node.right
replacing_node.right.parent = replacing_node
self._transplant(
deleting_node=deleting_node, replacing_node=replacing_node
)
replacing_node.left = deleting_node.left
replacing_node.left.parent = replacing_node
replacing_node.isThread = False
if successor and successor.isThread:
successor.left = replacing_node
else:
raise RuntimeError("Invalid case. Should never happened")
# Override
def get_leftmost(self, node: SingleThreadNode) -> SingleThreadNode:
"""Return the leftmost node from a given subtree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_leftmost`.
"""
current_node = node
while current_node.left and current_node.isThread is False:
current_node = current_node.left
return current_node
# Override
def get_rightmost(self, node: SingleThreadNode) -> SingleThreadNode:
"""Return the rightmost node from a given subtree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_rightmost`.
"""
current_node = node
if current_node:
while current_node.right:
current_node = current_node.right
return current_node
# Override
def get_successor(self, node: SingleThreadNode) -> Optional[SingleThreadNode]:
"""Return the successor node in the in-order order.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_successor`.
"""
if node.right:
return self.get_leftmost(node=node.right)
parent = node.parent
while parent and node == parent.right:
node = parent
parent = parent.parent
return parent
# Override
def get_predecessor(self, node: SingleThreadNode) -> Optional[SingleThreadNode]:
"""Return the predecessor node in the in-order order.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_predecessor`.
"""
if node.isThread:
return node.left
else:
if node.left:
return self.get_rightmost(node=node.left)
# if node.left is None, it means no predecessor of the given node.
return None
# Override
def get_height(self, node: Optional[SingleThreadNode]) -> int:
"""Return the height of the given node.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_height`.
"""
if node is None:
return 0
if node.isThread and node.right is None:
return 0
return max(self.get_height(node.left), self.get_height(node.right)) + 1
def reverse_inorder_traverse(self) -> binary_tree.Pairs:
"""Use the left threads to traverse the tree in reversed in-order.
Yields
------
`Pairs`
The next (key, data) pair in the tree reversed in-order traversal.
"""
if self.root:
current: Optional[SingleThreadNode] = self.get_rightmost(node=self.root)
while current:
yield (current.key, current.data)
if current.isThread:
current = current.left
else:
if current.left is None:
break
current = self.get_rightmost(current.left)
def _transplant(
self,
deleting_node: SingleThreadNode,
replacing_node: Optional[SingleThreadNode],
):
if deleting_node.parent is None:
self.root = replacing_node
if self.root:
self.root.isThread = False
elif deleting_node == deleting_node.parent.left:
deleting_node.parent.left = replacing_node
if replacing_node:
if deleting_node.isThread:
if replacing_node.isThread:
replacing_node.left = deleting_node.left
else:
deleting_node.parent.left = deleting_node.left
deleting_node.parent.isThread = True
else: # deleting_node == deleting_node.parent.right
deleting_node.parent.right = replacing_node
if replacing_node:
if deleting_node.isThread:
if replacing_node.isThread:
replacing_node.left = deleting_node.left
if replacing_node:
replacing_node.parent = deleting_node.parent
class DoubleThreadedBinaryTree(binary_tree.BinaryTree):
"""Double Threaded Binary Tree.
Attributes
----------
root: `Optional[DoubleThreadNode]`
The root node of the left threaded binary search tree.
empty: `bool`
`True` if the tree is empty; `False` otherwise.
Methods
-------
search(key: `Any`)
Look for a node based on the given key.
insert(key: `Any`, data: `Any`)
Insert a (key, data) pair into the tree.
delete(key: `Any`)
Delete a node based on the given key from the tree.
inorder_traverse()
In-order traversal by using the right threads.
preorder_traverse()
Pre-order traversal by using the right threads.
reverse_inorder_traverse()
Reversed In-order traversal by using the left threads.
get_leftmost(node: `DoubleThreadNode`)
Return the node whose key is the smallest from the given subtree.
get_rightmost(node: `DoubleThreadNode`)
Return the node whose key is the biggest from the given subtree.
get_successor(node: `DoubleThreadNode`)
Return the successor node in the in-order order.
get_predecessor(node: `DoubleThreadNode`)
Return the predecessor node in the in-order order.
get_height(node: `Optional[DoubleThreadNode]`)
Return the height of the given node.
Examples
--------
>>> from trees.binary_trees import threaded_binary_tree
>>> tree = threaded_binary_tree.DoubleThreadedBinaryTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in tree.inorder_traverse()]
[(1, '1'), (4, '4'), (7, '7'), (11, '11'), (15, '15'), (20, '20'),
(22, '22'), (23, '23'), (24, '24'), (30, '30'), (34, '34')]
>>> [item for item in tree.preorder_traverse()]
[(1, '1'), (4, '4'), (7, '7'), (11, '11'), (15, '15'), (20, '20'),
(22, '22'), (23, '23'), (24, '24'), (30, '30'), (34, '34')]
>>> [item for item in tree.reverse_inorder_traverse()]
[(34, "34"), (30, "30"), (24, "24"), (23, "23"), (22, "22"),
(20, "20"), (15, "15"), (11, "11"), (7, "7"), (4, "4"), (1, "1")]
>>> tree.get_leftmost().key
1
>>> tree.get_leftmost().data
'1'
>>> tree.get_rightmost().key
34
>>> tree.get_rightmost().data
"34"
>>> tree.get_height(tree.root)
4
>>> tree.search(24).data
`24`
>>> tree.delete(15)
"""
def __init__(self):
binary_tree.BinaryTree.__init__(self)
# Override
def search(self, key: Any) -> DoubleThreadNode:
"""Look for a node by a given key.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.search`.
"""
current = self.root
while current:
if key == current.key:
return current # type: ignore
elif key < current.key:
if current.leftThread is False:
current = current.left
else:
break
else: # key > current.key
if current.rightThread is False:
current = current.right
else:
break
raise tree_exceptions.KeyNotFoundError(key=key)
# Override
def insert(self, key: Any, data: Any):
"""Insert a (key, data) pair into the double threaded binary tree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.insert`.
"""
node = DoubleThreadNode(key=key, data=data)
if self.root is None:
self.root = node
else:
temp = self.root
while temp:
# Move to left subtree
if node.key < temp.key:
if temp.leftThread is False and temp.left:
temp = temp.left
continue
else:
node.left = temp.left
temp.left = node
node.right = temp
node.rightThread = True
node.parent = temp
temp.leftThread = False
if node.left:
node.leftThread = True
break
# Move to right subtree
elif node.key > temp.key:
if temp.rightThread is False and temp.right:
temp = temp.right
continue
else:
node.right = temp.right
temp.right = node
node.left = temp
node.leftThread = True
temp.rightThread = False
node.parent = temp
if node.right:
node.rightThread = True
break
else:
raise tree_exceptions.DuplicateKeyError(key=key)
# Override
def delete(self, key: Any):
"""Delete the node by the given key.
See Also
--------
:py:meth:`treesnary_trees.binary_tree.BinaryTree.delete`.
"""
if self.root:
deleting_node = self.search(key=key)
# The deleting node has no child
if (deleting_node.leftThread or deleting_node.left is None) and (
deleting_node.rightThread or deleting_node.right is None
):
self._transplant(deleting_node=deleting_node, replacing_node=None)
# The deleting node has only one right child
elif (
deleting_node.leftThread or deleting_node.left is None
) and deleting_node.rightThread is False:
successor = self.get_successor(node=deleting_node)
if successor:
successor.left = deleting_node.left
self._transplant(
deleting_node=deleting_node, replacing_node=deleting_node.right
)
# The deleting node has only one left child,
elif (
deleting_node.rightThread or deleting_node.right is None
) and deleting_node.leftThread is False:
predecessor = self.get_predecessor(node=deleting_node)
if predecessor:
predecessor.right = deleting_node.right
self._transplant(
deleting_node=deleting_node, replacing_node=deleting_node.left
)
# The deleting node has two children
elif deleting_node.left and deleting_node.right:
predecessor = self.get_predecessor(node=deleting_node)
replacing_node: DoubleThreadNode = self.get_leftmost(
node=deleting_node.right
)
successor = self.get_successor(node=replacing_node)
# the minmum node is not the direct child of the deleting node
if replacing_node.parent != deleting_node:
if replacing_node.rightThread:
self._transplant(
deleting_node=replacing_node, replacing_node=None
)
else:
self._transplant(
deleting_node=replacing_node,
replacing_node=replacing_node.right,
)
replacing_node.right = deleting_node.right
replacing_node.right.parent = replacing_node
replacing_node.rightThread = False
self._transplant(
deleting_node=deleting_node, replacing_node=replacing_node
)
replacing_node.left = deleting_node.left
replacing_node.left.parent = replacing_node
replacing_node.leftThread = False
if predecessor and predecessor.rightThread:
predecessor.right = replacing_node
if successor and successor.leftThread:
successor.left = replacing_node
else:
raise RuntimeError("Invalid case. Should never happened")
# Override
def get_leftmost(self, node: DoubleThreadNode) -> DoubleThreadNode:
"""Return the leftmost node from a given subtree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_leftmost`.
"""
current_node = node
while current_node.left and current_node.leftThread is False:
current_node = current_node.left
return current_node
# Override
def get_rightmost(self, node: DoubleThreadNode) -> DoubleThreadNode:
"""Return the rightmost node from a given subtree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_rightmost`.
"""
current_node = node
if current_node:
while current_node.right and current_node.rightThread is False:
current_node = current_node.right
return current_node
# Override
def get_successor(self, node: DoubleThreadNode) -> Optional[DoubleThreadNode]:
"""Return the successor node in the in-order order.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_successor`.
"""
if node.rightThread:
return node.right
else:
if node.right:
return self.get_leftmost(node=node.right)
return None
# Override
def get_predecessor(self, node: DoubleThreadNode) -> Optional[DoubleThreadNode]:
"""Return the predecessor node in the in-order order.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_predecessor`.
"""
if node.leftThread:
return node.left
else:
if node.left:
return self.get_rightmost(node=node.left)
return None
# Override
def get_height(self, node: Optional[DoubleThreadNode]) -> int:
"""Return the height of the given node.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_height`.
"""
if node is None:
return 0
if (
(node.left is None and node.right is None)
or (node.leftThread and node.right is None)
or (node.left is None and node.rightThread)
or (node.leftThread and node.rightThread)
):
return 0
return max(self.get_height(node.left), self.get_height(node.right)) + 1
def preorder_traverse(self) -> binary_tree.Pairs:
"""Use the right threads to traverse the tree in pre-order order.
Yields
------
`Pairs`
The next (key, data) pair in the tree pre-order traversal.
"""
current = self.root
while current:
yield (current.key, current.data)
if current.rightThread:
current = current.right.right
elif current.leftThread is False:
current = current.left
else:
break
def inorder_traverse(self) -> binary_tree.Pairs:
"""Use the right threads to traverse the tree in in-order order.
Yields
------
`Pairs`
The next (key, data) pair in the tree in-order traversal.
"""
if self.root:
current: Optional[DoubleThreadNode] = self.get_leftmost(node=self.root)
while current:
yield (current.key, current.data)
if current.rightThread:
current = current.right
else:
if current.right is None:
break
current = self.get_leftmost(current.right)
def reverse_inorder_traverse(self) -> binary_tree.Pairs:
"""Use the left threads to traverse the tree in reversed in-order.
Yields
------
`Pairs`
The next (key, data) pair in the tree reversed in-order traversal.
"""
if self.root:
current: Optional[DoubleThreadNode] = self.get_rightmost(node=self.root)
while current:
yield (current.key, current.data)
if current.leftThread:
current = current.left
else:
if current.left is None:
break
current = self.get_rightmost(current.left)
def _transplant(
self,
deleting_node: DoubleThreadNode,
replacing_node: Optional[DoubleThreadNode],
):
if deleting_node.parent is None:
self.root = replacing_node
if self.root:
self.root.leftThread = False
self.root.rightThread = False
elif deleting_node == deleting_node.parent.left:
deleting_node.parent.left = replacing_node
if replacing_node:
if deleting_node.leftThread:
if replacing_node.leftThread:
replacing_node.left = deleting_node.left
if deleting_node.rightThread:
if replacing_node.rightThread:
replacing_node.right = replacing_node.right
else:
deleting_node.parent.left = deleting_node.left
deleting_node.parent.leftThread = True
else: # deleting_node == deleting_node.parent.right
deleting_node.parent.right = replacing_node
if replacing_node:
if deleting_node.leftThread:
if replacing_node.leftThread:
replacing_node.left = deleting_node.left
if deleting_node.rightThread:
if replacing_node.rightThread:
replacing_node.right = replacing_node.right
else:
deleting_node.parent.right = deleting_node.right
deleting_node.parent.rightThread = True
if replacing_node:
replacing_node.parent = deleting_node.parent
| [
"shunsvineyard@protonmail.com"
] | shunsvineyard@protonmail.com |
305a1aebbb2dd4ca338ca97056069f55d581a16b | 7428592b0783ec231a94f3c0a717a744c67fdbed | /venv/bin/symilar | b5d8631b3ee70b947e9a6e0d714c5724a27e21a4 | [] | no_license | sagarkamthane/Code-with-harry | 468bc31f8b5f9bb2720c10635726caf4d35dcbb8 | 5f63547876f034374004a7ad03f2eab729adff62 | refs/heads/main | 2023-01-22T15:14:06.391951 | 2020-12-03T17:13:00 | 2020-12-03T17:13:00 | 318,263,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | #!/Users/sagarkamthane/PycharmProjects/CODEWITHHARRY/venv/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"sagarkamthane@sysadmins-MacBook-Pro-4.local"
] | sagarkamthane@sysadmins-MacBook-Pro-4.local | |
72bee9a42771f7552299c70b906ce7027a7a4d0e | 661e6ee4f6c078fae31b960abc87777ae0dfe3ab | /build_adafruit_circuitpython_bundle_3.x_mpy_20181222/examples/slideshow_touch.py | e7cbc7717bd1e1d3479020faea4d891d24c35a22 | [] | no_license | wangguanwu/human_detect_design | c22018a7c52518cb5f9c2277bdbf5190abf196cd | 52530b9eaedacf9ef811c33604590e2af7d882e1 | refs/heads/master | 2020-04-12T22:27:37.892743 | 2019-01-01T03:51:54 | 2019-01-01T03:51:54 | 162,790,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | import board
from adafruit_slideshow import SlideShow, PlayBackDirection
import touchio
import pulseio
forward_button = touchio.TouchIn(board.TOUCH4)
back_button = touchio.TouchIn(board.TOUCH1)
brightness_up = touchio.TouchIn(board.TOUCH3)
brightness_down = touchio.TouchIn(board.TOUCH2)
slideshow = SlideShow(board.DISPLAY, pulseio.PWMOut(board.TFT_BACKLIGHT), folder="/",
auto_advance=False, dwell=0)
while True:
if forward_button.value:
slideshow.direction = PlayBackDirection.FORWARD
slideshow.advance()
if back_button.value:
slideshow.direction = PlayBackDirection.BACKWARD
slideshow.advance()
if brightness_up.value:
slideshow.brightness += 0.001
elif brightness_down.value:
slideshow.brightness -= 0.001
| [
"2531507093@qq.com"
] | 2531507093@qq.com |
2557772b7d100f774d617492135465f035ea0c26 | 8698ddbe01fb67ecf8bffaea56eebbe206611cd6 | /model/model_3D/SegNet.py | ff561664393e2de824a9522a1d24735662dc4f92 | [] | no_license | hula-ai/organ_segmentation_analysis | ddc70bfc8cd16f7e744023e7dbb0095dc4a63946 | 5867e3ce21cc3bc7ee76dabcba032a3cd25d7223 | refs/heads/master | 2020-09-05T12:44:43.572185 | 2019-11-07T00:01:54 | 2019-11-07T00:01:54 | 220,108,637 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,208 | py | import tensorflow as tf
from model.base_model import BaseModel
from model.ops import conv_3d, deconv_3d, max_pool
class SegNet(BaseModel):
def __init__(self, sess, conf):
super(SegNet, self).__init__(sess, conf)
self.k_size = self.conf.filter_size
self.build_network(self.inputs_pl)
self.configure_network()
def build_network(self, x):
# Building network...
with tf.variable_scope('SegNet'):
with tf.variable_scope('Encoder'):
# first box of convolution layer,each part we do convolution two times, so we have conv1_1, and conv1_2
x = conv_3d(x, self.k_size, 64, 'conv1_1', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 64, 'conv1_2', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = max_pool(x, ksize=2, stride=2, name='pool_1')
# Second box of convolution layer(4)
x = conv_3d(x, self.k_size, 128, 'conv2_1', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 128, 'conv2_2', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = max_pool(x, ksize=2, stride=2, name='pool_2')
# Third box of convolution layer(7)
x = conv_3d(x, self.k_size, 256, 'conv3_1', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 256, 'conv3_2', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 256, 'conv3_3', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = max_pool(x, ksize=2, stride=2, name='pool_3')
# Fourth box of convolution layer(10)
if self.bayes:
x = tf.layers.dropout(x, rate=(1 - self.keep_prob_pl), training=self.with_dropout_pl, name="dropout1")
x = conv_3d(x, self.k_size, 512, 'conv4_1', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
else:
x = conv_3d(x, self.k_size, 512, 'conv4_1', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 512, 'conv4_2', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 512, 'conv4_3', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = max_pool(x, ksize=2, stride=2, name='pool_4')
# Fifth box of convolution layers(13)
if self.bayes:
x = tf.layers.dropout(x, rate=(1-self.keep_prob_pl), training=self.with_dropout_pl, name="dropout2")
x = conv_3d(x, self.k_size, 512, 'conv5_1', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
else:
x = conv_3d(x, self.k_size, 512, 'conv5_1', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 512, 'conv5_2', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 512, 'conv5_3', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = max_pool(x, ksize=2, stride=2, name='pool_5')
with tf.variable_scope('Decoder'):
if self.bayes:
x = tf.layers.dropout(x, rate=(1-self.keep_prob_pl), training=self.with_dropout_pl, name="dropout3")
x = deconv_3d(x, 2, 512, 'deconv_5', 2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl)
else:
x = deconv_3d(x, 2, 512, 'deconv_5', 2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl)
x = conv_3d(x, self.k_size, 512, 'deconv5_2', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 512, 'deconv5_3', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 512, 'deconv5_4', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
# Second box of deconvolution layers(6)
if self.bayes:
x = tf.layers.dropout(x, rate=(1-self.keep_prob_pl), training=self.with_dropout_pl, name="dropout4")
x = deconv_3d(x, 2, 512, 'deconv_4', 2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl)
else:
x = deconv_3d(x, 2, 512, 'deconv_4', 2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl)
x = conv_3d(x, self.k_size, 512, 'deconv4_2', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 512, 'deconv4_3', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 256, 'deconv4_4', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
# Third box of deconvolution layers(9)
if self.bayes:
x = tf.layers.dropout(x, rate=(1-self.keep_prob_pl), training=self.with_dropout_pl, name="dropout5")
x = deconv_3d(x, 2, 256, 'deconv_3', 2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl)
else:
x = deconv_3d(x, 2, 256, 'deconv_3', 2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl)
x = conv_3d(x, self.k_size, 256, 'deconv3_2', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 256, 'deconv3_3', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 128, 'deconv3_4', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
# Fourth box of deconvolution layers(11)
if self.bayes:
x = tf.layers.dropout(x, rate=(1-self.keep_prob_pl), training=self.with_dropout_pl, name="dropout6")
x = deconv_3d(x, 2, 128, 'deconv_2', 2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl)
else:
x = deconv_3d(x, 2, 128, 'deconv_2', 2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl)
x = conv_3d(x, self.k_size, 128, 'deconv2_2', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 64, 'deconv2_3', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
# Fifth box of deconvolution layers(13)
x = deconv_3d(x, 2, 64, 'deconv_1', 2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl)
x = conv_3d(x, self.k_size, 64, 'deconv1_2', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
x = conv_3d(x, self.k_size, 64, 'deconv1_3', self.conf.use_BN, self.is_training_pl, activation=tf.nn.relu)
with tf.variable_scope('Classifier'):
self.logits = conv_3d(x, 1, self.conf.num_cls, 'output', self.conf.use_BN, self.is_training_pl)
| [
"pyuan2@hula-13.ee.e.uh.edu"
] | pyuan2@hula-13.ee.e.uh.edu |
c3a24b760defa2b2c5eb127fae5db04954b0950f | 90cc2c3d7c9bcbba51273fc8cd402ace073f1169 | /venv/bin/flask | bffb845abee1c5f4ce45c58fabe578ccc6bc9bb8 | [
"MIT"
] | permissive | howtoosee/MooVieGo | 912e4329a23d09613969526e9324f37c84e0455a | 203dd454f4e7d5adbb615379febd3b1e5db8f149 | refs/heads/master | 2020-12-14T12:17:13.106024 | 2020-08-14T19:51:56 | 2020-08-14T19:51:56 | 234,739,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | #!/Users/howtoosee/Documents/Enrichment/HnR2020/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"chenxihao0627@gmail.com"
] | chenxihao0627@gmail.com | |
a5e994b745288becf5f7c50b640bea1b03d4ad05 | ae88dd2493c2329be480030f87e6e2a91470e255 | /src/python/DQIS/Client/CommandLine.py | 15d7a3b7f6f2e7ddc217564c71ac050dab93013c | [] | no_license | dmwm/DQIS | a48da3841ab6a086247ae8e437e2b5eb9e1c5048 | bd861954c2531df1bd2e9dceb2585b9acd4cbbdc | refs/heads/master | 2021-01-23T08:15:06.804525 | 2010-05-11T19:30:33 | 2010-05-11T19:30:33 | 4,423,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | '''
Created on 7 May 2010
@author: metson
'''
from DQIS.API.Database import Database
from optparse import OptionParser
import json
D_DATABASE_NAME = 'dqis'
D_DATABASE_ADDRESS = 'localhost:5984'
def do_options():
op = OptionParser(version="%prog 0.1")
op.add_option("-u", "--url",
type="string",
action="store",
dest="db_address",
help="Database url. Default address %s" % D_DATABASE_ADDRESS,
default=D_DATABASE_ADDRESS)
op.add_option("-d", "--database",
type="string",
action="store",
dest="db_name",
help="Database name. Default: '%s'" % D_DATABASE_NAME,
default=D_DATABASE_NAME)
op.add_option("-k", "--key",
action="append",
nargs=2,
type="string",
dest="keys",
help="Key Value pair (e.g.-k ecal True)")
op.add_option("--startrun",
action="store",
type="int",
dest="start_run",
help="Run value")
op.add_option("--endrun",
action="store",
type="int",
dest="end_run",
help="Run value")
op.add_option("--lumi",
action="store",
type="int",
dest="lumi",
help="Lumi value")
op.add_option("--dataset",
action="store",
type="string",
dest="dataset",
help="Dataset value")
op.add_option("--bfield", "-b",
action="store",
type="int",
dest="bfield",
help="Magnetic field value")
op.add_option("--id",
type="string",
action="store",
dest="doc_id",
help="Document ID",) #TODO: validate
op.add_option("--crab",
"-c",
action="store_true",
dest='crab',
help='Create a CRAB lumi.json file in the current directory.',
default=False)
return op.parse_args()
options, args = do_options()
db = Database(dbname = options.db_name, url = options.db_address, size = 1000)
map = {}
for k,v in options.keys:
map[k] = bool(v)
if options.crab:
data = db.crab(options.start_run, options.end_run, map, options.bfield)
f = open('lumi.json', 'w')
json.dump(data, f)
f.close()
elif options.doc_id:
print db.getDoc(doc_id)
else:
print db.search(options.start_run, options.end_run, map, options.bfield) | [
"metson@4525493e-7705-40b1-a816-d608a930855b"
] | metson@4525493e-7705-40b1-a816-d608a930855b |
a5230b855b505b17f14791a0061759b8f1b21930 | fa27b2e9668484959772c6ac37622a7442396347 | /sharing/app/api_1_0/register.py | 039c61f8e235d2d4e7f5478a4cc4115a74de729a | [] | no_license | tangxiangru/2017-sharing-backend | 5a3cc9ba6c22944046ae99221bee70245e326ffd | 0905e38c9a30296cf01950efa6eed2708807f957 | refs/heads/master | 2021-01-13T11:30:24.026822 | 2017-02-11T18:56:29 | 2017-02-11T18:56:29 | 81,680,679 | 1 | 0 | null | 2017-02-11T20:37:08 | 2017-02-11T20:37:08 | null | UTF-8 | Python | false | false | 994 | py | #coding:utf-8
from flask import jsonify, redirect, request, url_for, flash
from ..models import User
from .. import db
from . import api
#注册
@api.route('/register/',methods = ['POST'])
def register():
if request.method == 'POST':
email = request.get_json().get("email")
password = request.get_json().get("password")
username = request.get_json().get("username")
user = User ( username= username,email=email ,password=password)
#user = User.from_json(request.json)
db.session.add(user)
db.session.commit()
user_id=User.query.filter_by(email=email).first().id
#token = user.generate_confirmation_token()
#send_email(user.email,'请确认你的账户',
# 'auth/email/confirm',user = user,token = token)
#flash(u'确认邮件已经发往了你的邮箱')
return jsonify({
"created":user_id
})
| [
"504490160@qq.com"
] | 504490160@qq.com |
60c7985f07a5d4bcb5f9176bf1114a6c1cad3a3f | 07be15341071074f4d32494219003003648153a9 | /GongshwPlanService.py | 9d9fb1d3236e330009ef4ced3017937bdaf8d522 | [
"MIT"
] | permissive | gongshw/gongshw-plan-service | 48a6eb21245e896374b889bb8c5e3ccebfe0a415 | 93d4b2192a2111d4067daa4450d9cf647e244880 | refs/heads/master | 2021-01-01T18:12:10.358870 | 2015-02-23T15:11:28 | 2015-02-23T15:11:28 | 30,861,727 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | # coding=utf-8
from flask import Flask, request, jsonify, g
from Plan import RequestException
import Plan
import traceback
app = Flask(__name__)
def wrap_response(result):
return jsonify(result=result, error=None)
def success():
return wrap_response(True)
# 测试服务器
@app.route('/')
def ping():
return success()
# 得到计划
@app.route('/plan/<unit>/<int:index>', methods=['GET'])
def get_plans(index, unit):
return wrap_response(Plan.get_plans(index, unit))
# 得到当前的所有的计划
@app.route('/plan/active', methods=['GET'])
def get_current_plans():
return wrap_response(Plan.get_current_plans())
# 添加/修改一个计划
@app.route('/plan/<plan_id>', methods=['PUT', 'POST'])
def add_plan(plan_id):
plan_to_save = request.get_json()
if not plan_to_save['id'] == plan_id:
raise Exception('id in url not matched with id in request body')
plan_exist = Plan.get_plan(plan_id)
if plan_exist:
if 'sort' in plan_to_save and type(plan_to_save['sort']) in [float, int]:
Plan.update_plan_filed(plan_id, 'sort', plan_to_save['sort'])
else:
Plan.add_plan(plan_to_save)
return success()
# 删除一个计划
@app.route('/plan/<plan_id>', methods=['DELETE'])
def delete_plan(plan_id):
Plan.delete_plan(plan_id)
return success()
# 把一个计划标记为已完成
@app.route('/plan/<plan_id>/<index>/_done', methods=['PUT', 'POST'])
def finish_plan(plan_id, index):
Plan.add_plan_record(plan_id, index)
return success()
# 把一个计划标记为以未完成
@app.route('/plan/<plan_id>/<index>/_done', methods=['DELETE'])
def remove_finish_plan(plan_id, index):
Plan.delete_plan_record(plan_id, index)
return success()
@app.errorhandler(RequestException)
def request_error_handler(error):
traceback.print_exc()
return jsonify({'error': error.message}), 400
@app.teardown_appcontext
def close_connection(e):
if e is not None:
print e
db = getattr(g, '_db', None)
if db is not None:
db.close()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| [
"gongshw1992@gmail.com"
] | gongshw1992@gmail.com |
b176f452b4b04e1e896a1e8baad962ed8b125d50 | ed87ebb57f79f4f7efb51a8b08ebb0b49334ca1d | /apotos/make_folds.py | 6d2c5213e60504602e5928680a4db16973af9d2b | [] | no_license | Thagio/kaggle-aptos | 14aa9a5e78690540dc0ffc100221a54c0f99061d | f565335d34b46b7fa7ca925b7d325397df8e1fee | refs/heads/master | 2020-06-22T07:46:02.605062 | 2019-08-07T10:06:46 | 2019-08-07T10:06:46 | 197,674,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,380 | py |
# coding: utf-8
# In[2]:
# FIXME : 以下の関数は定義されたファイルの形式に依存するので、utilsに記載できない。
def is_env_notebook():
"""Determine wheather is the environment Jupyter Notebook"""
if 'get_ipython' not in globals():
# Python shell
return False
env_name = get_ipython().__class__.__name__
if env_name == 'TerminalInteractiveShell':
# IPython shell
return False
# Jupyter Notebook
return True
# In[3]:
#import sys
#sys.path.append('.')
import argparse
from collections import defaultdict, Counter
import random
import os
import pandas as pd
import tqdm
from IPython.core.debugger import Pdb
ON_KAGGLE: bool = 'KAGGLE_WORKING_DIR' in os.environ
if ON_KAGGLE:
from .dataset import DATA_ROOT,EXTERNAL_ROOT
else:
from dataset import DATA_ROOT,EXTERNAL_ROOT
# In[11]:
# make_foldsはマルチラベル用になってる。
def make_folds_for_multilabel(n_folds: int) -> pd.DataFrame:
df = pd.read_csv(DATA_ROOT / 'train.csv')
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split()
for cls in classes)
fold_cls_counts = defaultdict(int)
folds = [-1] * len(df)
for item in tqdm.tqdm(df.sample(frac=1, random_state=42).itertuples(),
total=len(df)):
cls = min(item.attribute_ids.split(), key=lambda cls: cls_counts[cls])
fold_counts = [(f, fold_cls_counts[f, cls]) for f in range(n_folds)]
min_count = min([count for _, count in fold_counts])
random.seed(item.Index)
fold = random.choice([f for f, count in fold_counts
if count == min_count])
folds[item.Index] = fold
for cls in item.attribute_ids.split():
fold_cls_counts[fold, cls] += 1
df['fold'] = folds
return df
def make_folds(n_folds:int,seed:int=42,rmdup:bool=True) -> pd.DataFrame:
if rmdup:
# 重複除去について
strmd5 = (pd.read_csv("../input/strmd5/strMd5.csv").
query("strMd5_nunique == 1")) # ラベルの不安な検体は除外 2検体はある
# 学習データとテストデータのフラグを作成
strmd5["dataset"] = ["train" if diagnosis >= 0 else "test" for diagnosis in strmd5["diagnosis"]]
# テストデータ
strmd5["strMd5_test_count"] = strmd5.strMd5_count - strmd5.strMd5_train_count
# 学習データの中でテストデータに存在するデータをリークと定義。
strmd5["leak"] = ["leak" if tup["dataset"] == "train" and tup["strMd5_test_count"] >=1 else "not leak"
for i,tup in strmd5.loc[:,["strMd5_test_count","dataset"]].iterrows()]
# strmd5 train
strmd5_train = (strmd5.
query("dataset == 'train' and leak == 'not leak'").
drop_duplicates(subset=["strMd5","diagnosis"]).
reset_index(drop=True)
)
strmd5_train["diagnosis"] = strmd5_train["diagnosis"].astype("int64")
# strmd5 train leak
strmd5_train_leak = (strmd5.
query("dataset == 'train' and leak == 'leak'").
drop_duplicates(subset=["strMd5","diagnosis"]).
reset_index(drop=True).
loc[:,["id_code","diagnosis"]]
)
strmd5_train_leak["fold"] = -1
df = strmd5_train.loc[:,["id_code","diagnosis"]]
else:
df = pd.read_csv(DATA_ROOT / 'train.csv')
# Pdb().set_trace()
cls_counts = Counter(cls for cls in df["diagnosis"])
fold_cls_counts = defaultdict(int)
folds = [-1] * len(df)
for item in tqdm.tqdm(df.sample(frac=1, random_state=seed).itertuples(),
total=len(df)):
# Pdb().set_trace()
cls = item.diagnosis
fold_counts = [(f, fold_cls_counts[f, cls]) for f in range(n_folds)]
min_count = min([count for _, count in fold_counts])
random.seed(item.Index)
fold = random.choice([f for f, count in fold_counts
if count == min_count])
folds[item.Index] = fold
#for cls in item.diagnosis:
fold_cls_counts[fold, cls] += 1
# from IPython.core.debugger import Pdb; Pdb().set_trace()
df['fold'] = folds
if rmdup:
df = pd.concat([df,strmd5_train_leak])
return df
# In[4]:
def external_data() -> pd.DataFrame:
df = pd.read_csv(EXTERNAL_ROOT / "trainLabels.csv") .rename(columns = {"id_code":"image","diagnosis":"level"})
df["fold"] = 99
return df
# In[13]:
if __name__ == "__main__":
pass
# df = external_data()
# print(df.head())
# In[12]:
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--n-folds', type=int, default=4)
## jupyter-notebookかどうか判定
if is_env_notebook():
args = parser.parse_args(args=[])
else:
args = parser.parse_args()
df = make_folds(n_folds=args.n_folds)
df.to_csv('folds.csv', index=None)
# from IPython.core.debugger import Pdb; Pdb().set_trace()
if __name__ == '__main__':
main()
| [
"hagio_taichi@data4cs.co.jp"
] | hagio_taichi@data4cs.co.jp |
781db3289dc529dcabb54cf418c3615c409d0251 | 5a6f15c49063cf976528fc4436fbbc093b894876 | /Src/Algorithms/TabularPredictionAlgorithms.py | 3b9359c153a229c14639a3917ea41e68c5cef36f | [] | no_license | julu123/julu1cme241 | d98face2e4d691ded804eddf5851e05d15fe14ad | 19dcc4cabeec894046ffbc3fd98de9f2d09e8c11 | refs/heads/master | 2020-04-15T19:58:58.433132 | 2019-11-25T11:27:42 | 2019-11-25T11:27:42 | 164,973,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,451 | py | import numpy as np
import random
from Algorithms.TabularBase import TabularBase
from Processes.Variables import State, Action, Policy, Transitions_Rewards_Action_B
# All my tabular methods are slightly simplified. I didn't really understand what generalizations needed to be
# done in advance. The methods do work though.
class PredictionMethods(TabularBase):
def __init__(self,
mdp: Transitions_Rewards_Action_B,
pol: Policy = None,
gamma: float = 0.99):
"It needs to take in an mdp in order to generate data. The prediction mehtods do not know the probabilities!"
TabularBase.__init__(self, mdp, gamma)
self.pol = pol
self.gamma = gamma
self.states = list(mdp)
def monte_carlo_first_visit(self,
episode_size: int = 500,
nr_episodes: int = 200,
print_text: bool = False):
v0 = {i: 0 for i in self.states}
g0 = v0.copy()
for i in range(nr_episodes):
sim_states, _, rewards = self.generate(self.pol, steps=episode_size, print_text=print_text)
g_t = rewards[0]
for j in range(1, len(sim_states) - 1):
g_t = g_t + self.gamma**j * rewards[j]
v0[sim_states[0]] = v0[sim_states[0]] + g_t
g0[sim_states[0]] += 1
for i in v0:
if g0[i] != 0:
v0[i] = v0[i]/g0[i]
return v0
def td_zero(self,
alpha: float = 0.1,
episode_size: int = 500,
nr_episodes: int = 200,
print_text: bool = False):
random.seed(1)
v0 = {i: 0 for i in self.states}
for i in range(nr_episodes):
sim_states, _, rewards = self.generate(self.pol, steps=episode_size, print_text=print_text)
for j in range(len(sim_states)-1):
current_state = sim_states[j]
next_state = sim_states[j+1]
v0[current_state] = v0[current_state] + alpha * (rewards[j] + self.gamma*v0[next_state] - v0[current_state])
return v0
def td_lambda(self,
alpha: float = 0.05,
lambd: float = 0.8,
episode_size: int = 500,
nr_episodes: int = 100000,
method: str = "Forward",
update: str = "Online",
print_text: bool = False):
v0 = {i: 0 for i in self.states}
if method == "Forward" and update == "Online":
vf_per_iterations = np.zeros((int(nr_episodes / 1), len(self.states)))
for i in range(nr_episodes):
sim_states, _, rewards = self.generate(self.pol, steps=episode_size, print_text=print_text)
for t in range(len(sim_states) - 1):
g_t_lambda = 0
final_g_t = 0
for n in range(1, len(sim_states) - 1 - t):
g_t = rewards[t]
for k in range(1, n + 1):
g_t = g_t + self.gamma * rewards[t + k]
final_g_t = g_t
g_t_lambda += lambd ** (n - 1) * g_t
g_t_lambda = (1 - lambd) * g_t_lambda + lambd**(len(sim_states)-1) * final_g_t
lr = alpha - alpha * i / nr_episodes
v0[sim_states[t]] = v0[sim_states[t]] + lr * (g_t_lambda - v0[sim_states[t]])
elif method == "Backward" and update == "Online":
vf_per_iterations = np.zeros((int(nr_episodes / 1), len(self.states)))
for i in range(nr_episodes):
sim_states, _, rewards = self.generate(self.pol, steps=episode_size, print_text=print_text)
e_trace = {i: 0 for i in self.states}
for t in range(len(sim_states) - 1):
for s in self.states:
e_trace[s] = e_trace[s] * lambd
e_trace[sim_states[t]] += 1
current_state = sim_states[t]
next_state = sim_states[t + 1]
lr = alpha - alpha * i / nr_episodes
v0[current_state] = v0[current_state] + lr * \
(rewards[t] + self.gamma * v0[next_state] - v0[current_state]) * \
e_trace[current_state]
elif method == "Forward" and update == "Offline":
vf_per_iterations = np.zeros((int(nr_episodes/1), len(self.states)))
for i in range(nr_episodes):
sim_states, _, rewards = self.generate(self.pol, steps=episode_size, print_text=print_text)
g_t_lambda = 0
final_g_t = 0
for t in range(1, len(sim_states) - 1):
g_t = rewards[0]
for n in range(1, t):
g_t = g_t + self.gamma**n * rewards[n]
final_g_t = g_t
g_t_lambda = g_t_lambda + g_t * lambd**(t-1)
g_t_lambda = g_t_lambda*(1-lambd) + lambd**(len(sim_states)-1) * final_g_t
lr = alpha - alpha * i / nr_episodes
v0[sim_states[0]] = v0[sim_states[0]] + lr * (g_t_lambda - v0[sim_states[0]])
if (i+1) % 1 == 0:
for j, k in enumerate(v0):
vf_per_iterations[int((i+1)/1-1), j] = v0[k]
elif method == "Backward" and update == "Offline":
vf_per_iterations = np.zeros((int(nr_episodes / 1), len(self.states)))
e_trace = {i: 0 for i in self.states}
for i in range(nr_episodes):
sim_states, _, rewards = self.generate(self.pol, steps=episode_size, print_text=print_text)
for t in range(len(sim_states) - 1):
for s in self.states:
e_trace[s] = e_trace[s] * lambd
e_trace[sim_states[t]] += 1
current_state = sim_states[t]
next_state = sim_states[t + 1]
lr = alpha - alpha * i / nr_episodes
v0[current_state] = v0[current_state] + lr * \
(rewards[t] + self.gamma * v0[next_state] - v0[current_state]) * \
e_trace[current_state]
return v0
| [
"julu1@stanford.edu"
] | julu1@stanford.edu |
7db882e4688f9ac422447f574f37c2abeeaeb1f8 | d9759a656cbd80573fc30e28b8e153acee5f8ba6 | /atom_types.py | 551e63185c8830b655444ecf975c3d0d2856578d | [] | no_license | eriksondale/liGAN | d8cf04c5cb3eaf5bc3799a98b733c14412ce27ef | 482f58e6cb898fbf71cfd786eb6fc25afe714ffb | refs/heads/master | 2020-05-01T11:19:48.249807 | 2019-03-21T21:33:18 | 2019-03-21T21:33:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,536 | py | from collections import namedtuple, defaultdict
import openbabel as ob
try:
table = ob.OBElementTable()
except AttributeError:
table = ob
get_atomic_num = table.GetAtomicNum
get_name = table.GetName
get_symbol = table.GetSymbol
get_max_bonds = table.GetMaxBonds
get_rgb = table.GetRGB
atom_type = namedtuple('atom_type', ['name', 'atomic_num', 'symbol', 'covalent_radius', 'xs_radius'])
smina_types = [
atom_type("Hydrogen", 1, "H", 0.37, 0.37),
atom_type("PolarHydrogen", 1, "H", 0.37, 0.37),
atom_type("AliphaticCarbonXSHydrophobe", 6, "C", 0.77, 1.90),
atom_type("AliphaticCarbonXSNonHydrophobe", 6, "C", 0.77, 1.90),
atom_type("AromaticCarbonXSHydrophobe", 6, "C", 0.77, 1.90),
atom_type("AromaticCarbonXSNonHydrophobe", 6, "C", 0.77, 1.90),
atom_type("Nitrogen", 7, "N", 0.75, 1.80),
atom_type("NitrogenXSDonor", 7, "N", 0.75, 1.80),
atom_type("NitrogenXSDonorAcceptor", 7, "N", 0.75, 1.80),
atom_type("NitrogenXSAcceptor", 7, "N", 0.75, 1.80),
atom_type("Oxygen", 8, "O", 0.73, 1.70),
atom_type("OxygenXSDonor", 8, "O", 0.73, 1.70),
atom_type("OxygenXSDonorAcceptor", 8, "O", 0.73, 1.70),
atom_type("OxygenXSAcceptor", 8, "O", 0.73, 1.70),
atom_type("Sulfur", 16, "S", 1.02, 2.00),
atom_type("SulfurAcceptor", 16, "S", 1.02, 2.00),
atom_type("Phosphorus", 15, "P", 1.06, 2.10),
atom_type("Fluorine", 9, "F", 0.71, 1.50),
atom_type("Chlorine", 17, "Cl", 0.99, 1.80),
atom_type("Bromine", 35, "Br", 1.14, 2.00),
atom_type("Iodine", 53, "I", 1.33, 2.20),
atom_type("Magnesium", 12, "Mg", 1.30, 1.20),
atom_type("Manganese", 25, "Mn", 1.39, 1.20),
atom_type("Zinc", 30, "Zn", 1.31, 1.20),
atom_type("Calcium", 20, "Ca", 1.74, 1.20),
atom_type("Iron", 26, "Fe", 1.25, 1.20),
atom_type("GenericMetal", -1, "M", 1.75, 1.20),
atom_type("Boron", 5, "B", 0.90, 1.92)
]
channel = namedtuple('channel', ['name', 'atomic_num', 'symbol', 'atomic_radius'])
def get_smina_type_channels(idx, use_covalent_radius):
channels = []
for i in idx:
name = smina_types[i].name
atomic_num = smina_types[i].atomic_num
symbol = smina_types[i].symbol
if use_covalent_radius:
atomic_radius = smina_types[i].covalent_radius
else:
atomic_radius = smina_types[i].xs_radius
channels.append(channel(name, atomic_num, symbol, atomic_radius))
return channels
def get_default_rec_channels(use_covalent_radius=False):
idx = [2, 3, 4, 5, 24, 25, 21, 6, 9, 7, 8, 13, 12, 16, 14, 23]
return get_smina_type_channels(idx, use_covalent_radius)
def get_default_lig_channels(use_covalent_radius=False):
idx = [2, 3, 4, 5, 19, 18, 17, 6, 9, 7, 8, 10, 13, 12, 16, 14, 15, 20, 27]
return get_smina_type_channels(idx, use_covalent_radius)
def get_default_channels(use_covalent_radius):
rec_channels = get_default_rec_channels(use_covalent_radius)
lig_channels = get_default_lig_channels(use_covalent_radius)
return rec_channels + lig_channels
| [
"mtr22@pitt.edu"
] | mtr22@pitt.edu |
12e3936893568ce3f48ea41898acde3506eb4f06 | 52855d750ccd5f2a89e960a2cd03365a3daf4959 | /ABC/ABC52_B.py | 6a5c62026f989697559e55494fbdcbc27af93a36 | [] | no_license | takuwaaan/Atcoder_Study | b15d4f3d15d48abb06895d5938bf8ab53fb73c08 | 6fd772c09c7816d147abdc50669ec2bbc1bc4a57 | refs/heads/master | 2021-03-10T18:56:04.416805 | 2020-03-30T22:36:49 | 2020-03-30T22:36:49 | 246,477,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | N = int(input())
S = input()
l = [0]
x = 0
for i in range(N):
if S[i] == "I":
x+=1
else:
x-=1
l.append(x)
print(max(l)) | [
"takutotakuwan@gmail.com"
] | takutotakuwan@gmail.com |
eece76dcadf20e7096ee607dea6649b8656ee52f | 97ead5252b1c21cb2a6b83e65ff3e8bd9895f5f5 | /best_model_finder/tuner.py | 7e3445bc0085465f14b7efdae628107224583890 | [] | no_license | shrddha-p-jain/Insurance-Fraud-Detection | d65d0c473e904647d437546d98ce6095bbb47369 | 091ccb31ab64cb73e8912b9d9e8ae47972e4a949 | refs/heads/main | 2023-07-22T19:06:26.391279 | 2021-09-06T10:36:46 | 2021-09-06T10:36:46 | 403,577,241 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,357 | py | from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from xgboost import XGBClassifier
from sklearn.metrics import roc_auc_score,accuracy_score
class Model_Finder:
"""
This class shall be used to find the model with best accuracy and AUC score.
Revisions: None
"""
def __init__(self,file_object,logger_object):
self.file_object = file_object
self.logger_object = logger_object
self.sv_classifier=SVC()
self.xgb = XGBClassifier(objective='binary:logistic',n_jobs=-1)
def get_best_params_for_svm(self,train_x,train_y):
"""
Method Name: get_best_params_for_naive_bayes
Description: get the parameters for the SVM Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the get_best_params_for_svm method of the Model_Finder class')
try:
# initializing with different combination of parameters
self.param_grid = {"kernel": ['rbf', 'sigmoid'],
"C": [0.1, 0.5, 1.0],
"random_state": [0, 100, 200, 300]}
#Creating an object of the Grid Search class
self.grid = GridSearchCV(estimator=self.sv_classifier, param_grid=self.param_grid, cv=5, verbose=3)
#finding the best parameters
self.grid.fit(train_x, train_y)
#extracting the best parameters
self.kernel = self.grid.best_params_['kernel']
self.C = self.grid.best_params_['C']
self.random_state = self.grid.best_params_['random_state']
#creating a new model with the best parameters
self.sv_classifier = SVC(kernel=self.kernel,C=self.C,random_state=self.random_state)
# training the mew model
self.sv_classifier.fit(train_x, train_y)
self.logger_object.log(self.file_object,
'SVM best params: '+str(self.grid.best_params_)+'. Exited the get_best_params_for_svm method of the Model_Finder class')
return self.sv_classifier
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in get_best_params_for_svm method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'SVM training failed. Exited the get_best_params_for_svm method of the Model_Finder class')
raise Exception()
def get_best_params_for_xgboost(self,train_x,train_y):
"""
Method Name: get_best_params_for_xgboost
Description: get the parameters for XGBoost Algorithm which give the best accuracy.
Use Hyper Parameter Tuning.
Output: The model with the best parameters
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object,
'Entered the get_best_params_for_xgboost method of the Model_Finder class')
try:
# initializing with different combination of parameters
self.param_grid_xgboost = {
"n_estimators": [100, 130], "criterion": ['gini', 'entropy'],
"max_depth": range(8, 10, 1)
}
# Creating an object of the Grid Search class
self.grid= GridSearchCV(XGBClassifier(objective='binary:logistic'),self.param_grid_xgboost, verbose=3,cv=5)
# finding the best parameters
self.grid.fit(train_x, train_y)
# extracting the best parameters
self.criterion = self.grid.best_params_['criterion']
self.max_depth = self.grid.best_params_['max_depth']
self.n_estimators = self.grid.best_params_['n_estimators']
# creating a new model with the best parameters
self.xgb = XGBClassifier(criterion=self.criterion, max_depth=self.max_depth,n_estimators= self.n_estimators, n_jobs=-1 )
# training the mew model
self.xgb.fit(train_x, train_y)
self.logger_object.log(self.file_object,
'XGBoost best params: ' + str(
self.grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')
return self.xgb
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in get_best_params_for_xgboost method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'XGBoost Parameter tuning failed. Exited the get_best_params_for_xgboost method of the Model_Finder class')
raise Exception()
def get_best_model(self,train_x,train_y,test_x,test_y):
"""
Method Name: get_best_model
Description: Find out the Model which has the best AUC score.
Output: The best model name and the model object
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object,
'Entered the get_best_model method of the Model_Finder class')
# create best model for XGBoost
try:
self.xgboost= self.get_best_params_for_xgboost(train_x,train_y)
self.prediction_xgboost = self.xgboost.predict(test_x) # Predictions using the XGBoost Model
if len(test_y.unique()) == 1: #if there is only one label in y, then roc_auc_score returns error. We will use accuracy in that case
self.xgboost_score = accuracy_score(test_y, self.prediction_xgboost)
self.logger_object.log(self.file_object, 'Accuracy for XGBoost:' + str(self.xgboost_score)) # Log AUC
else:
self.xgboost_score = roc_auc_score(test_y, self.prediction_xgboost) # AUC for XGBoost
self.logger_object.log(self.file_object, 'AUC for XGBoost:' + str(self.xgboost_score)) # Log AUC
# create best model for Random Forest
self.svm=self.get_best_params_for_svm(train_x,train_y)
self.prediction_svm=self.svm.predict(test_x) # prediction using the SVM Algorithm
if len(test_y.unique()) == 1:#if there is only one label in y, then roc_auc_score returns error. We will use accuracy in that case
self.svm_score = accuracy_score(test_y,self.prediction_svm)
self.logger_object.log(self.file_object, 'Accuracy for SVM:' + str(self.sv_score))
else:
self.svm_score = roc_auc_score(test_y, self.prediction_svm) # AUC for Random Forest
self.logger_object.log(self.file_object, 'AUC for SVM:' + str(self.svm_score))
#comparing the two models
if(self.svm_score < self.xgboost_score):
return 'XGBoost',self.xgboost
else:
return 'SVM',self.sv_classifier
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in get_best_model method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'Model Selection Failed. Exited the get_best_model method of the Model_Finder class')
raise Exception()
| [
"noreply@github.com"
] | noreply@github.com |
72e99ab5f865b18e80a5fd7dbe9e887b0bcfcdbc | 8e8273a3c9b87e58e46dd6ab575a33eb6fde9f62 | /version_manager/options_set.py | e9aa203b1975a30aac22e2e044a66baa54686947 | [] | no_license | mdrotthoff/version-manager-py | 69ddd1308f1f1c896739f583f372d1af09d3d384 | e5f388ff3856f7f4f1818215422610233b2dcb1d | refs/heads/master | 2020-12-07T07:07:02.762375 | 2020-01-08T22:52:38 | 2020-01-08T22:52:38 | 232,666,355 | 0 | 0 | null | 2020-01-08T21:46:51 | 2020-01-08T21:46:50 | null | UTF-8 | Python | false | false | 782 | py | from typing import Dict, List, Optional
import yaml
def get_parameter_values(parameter_values: Dict[str, str],
values_list: Optional[List[str]]) -> Dict[str, str]:
"""
Override the parameter values that are given in the list.
It assumes each parameter is in the 'KEY=VALUE' format.
"""
if not values_list:
return parameter_values
for value in values_list:
tokens = value.split('=', 2)
parameter_values[tokens[0]] = tokens[1]
return parameter_values
def get_parameters_from_file(file_name: Optional[str]) -> Dict[str, str]:
if not file_name:
return dict()
with open(file_name, 'r', encoding='utf-8') as stream:
result = list(yaml.safe_load_all(stream))[0]
return result
| [
"bogdan.mustiata@gmail.com"
] | bogdan.mustiata@gmail.com |
d8330bd2056ad980bf0bf06bbfdcea2a48d482f0 | dbdc002660adf3f633c4d5d4eb890ff43ba229a7 | /funcoes_com_retorno.py | 35a1a1ac1a078efe40f71a346472f67c48ebc534 | [] | no_license | ArthurKVasque07/PythonGEEK | df1f184435a863ce872df1e366463b4fec9a6c64 | bd8b86608fd854643d3f81f02b48db88f4e6f832 | refs/heads/master | 2022-10-06T18:49:04.441047 | 2020-06-10T20:54:18 | 2020-06-10T20:54:18 | 271,382,829 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,354 | py | """
Funções com retorno
numeros = [1, 2, 3]
ret_pop = numeros.pop()
print(f'Retorno de pop: {ret_pop}')
ret_pr = print(numeros)
print(f'Retorno de print: {ret_pr}')
OBS: Em Python, quando uma função não retorna nenhum valor, o retorno é None
OBS: Funções Python que retornam valores, devem retornar estes valores com a
palavra reservada return
OBS: Não precisamos necessariamente criar uma variável para receber o retorno
de uma função. Podemos passar a execução da função para outras funções.
# Vamos refatorar esta função para que ela retorno o valor
def quadrado_de_7():
return 7 * 7
# Criamos uma variável para receber o retorno da função
ret = quadrado_de_7()
print(f'Retorno {ret}')
print(f'Retorno: {quadrado_de_7()}')
# Refatorando a primeira função
def diz_oi():
return 'Oi '
alguem = 'Pedro!'
print(diz_oi())
print(alguem)
OBS: Sobre a palavra reservada return
1 - Ela finaliza a função, ou seja, ela sai da execução da função;
2 - Podemos ter, em uma função, diferentes returns;
3 - Podemos, em uma função, retornar qualquer tipo de dado e até mesmo múltiplos valores;
# Exemplos 1 - Ela finaliza a função, ou seja, ela sai da execução da função;
def diz_oi():
print('Estou sendo executado antes do retorno...')
return 'Oi! '
print('Estou sendo executado após o retorno...')
print(diz_oi())
# Exemplo 2 - Podemos ter, em uma função, diferentes returns;
def nova_funcao():
variavel = False
if variavel:
return 4
elif variavel is None:
return 3.2
return 'b'
print(nova_funcao())
# Exemplo 3 - Podemos, em uma função, retornar qualquer tipo de dado e até mesmo múltiplos valores;
def outra_funcao():
return 2, 3, 4, 5
#num1, num2, num3, num4 = outra_funcao()
#print(num1, num2, num3, num4)
print(outra_funcao())
print(type(outra_funcao()))
# Vamos criar uma função para jogar a moeda
from random import random
def joga_moeda():
# Gera um número pseudo-randômico entre 0 e 1
if random() > 0.5:
return 'Cara'
return 'Coroa'
print(joga_moeda())
"""
# Erros comuns na utilização do retorno, que na verdade nem é erro, mas sim codificação desnecessária.
def eh_impar():
numero = 61
if numero % 2 != 0:
return True
return False
print(eh_impar())
| [
"arthurkvasque.eng@outlook.com"
] | arthurkvasque.eng@outlook.com |
3a4162c73e2895e4844d4a8ce5c4a057e8fa230e | cb703e45cf56ec816eb9203f171c0636aff0b99c | /Dzien06/loger.py | 0e546809184bbae08d85b4ec2e6a1b2e188b982b | [] | no_license | marianwitkowskialx/Enzode | dc49f09f086e4ca128cd189852331d3c9b0e14fb | 67d8fd71838d53962b4e58f73b92cb3b71478663 | refs/heads/main | 2023-06-04T20:58:17.486273 | 2021-06-24T16:37:53 | 2021-06-24T16:37:53 | 366,424,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py |
# Przykład logowania w Pythonie
import logging
log_format="%(asctime)s:%(levelname)s:%(filename)s:%(message)s"
logging.basicConfig(
format=log_format,
handlers= [
logging.StreamHandler(),
logging.FileHandler("app1.log")
],
level=logging.DEBUG,
#filename="app.log",
datefmt="%Y-%m-%dT%H:%M:%S%z",
)
logging.debug("debug message")
logging.info("info message")
logging.warning("warning message")
logging.error("error message")
logging.fatal("fatal message")
try:
y = 1/0
except Exception as exc:
logging.critical(exc, exc_info=True) | [
"marian.witkowski@gmail.com"
] | marian.witkowski@gmail.com |
b9a7e5bcbcc641fbd3a75a860f0167b605278136 | 06fb125430cfc6b7cd9972a1c8843a57a600a869 | /booktime/main/migrations/0001_initial.py | 10b6d792ba1c93fefe627e2b2e4e68c11e5b8cf8 | [] | no_license | thtan89/djangotable2 | 7ad570d9ef19dba5804c2068abdfe7fb62d615ec | cf8cb9e8d129db56b4c788e4de2a458ce001ee98 | refs/heads/master | 2022-11-24T20:02:18.672819 | 2019-08-06T04:50:07 | 2019-08-06T04:50:07 | 200,616,495 | 0 | 0 | null | 2022-11-22T04:11:04 | 2019-08-05T08:38:51 | Python | UTF-8 | Python | false | false | 4,329 | py | # Generated by Django 2.2.4 on 2019-08-05 07:15
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import main.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('description', models.TextField(blank=True)),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('slug', models.SlugField(max_length=48)),
('active', models.BooleanField(default=True)),
('in_stock', models.BooleanField(default=True)),
('date_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='ProductTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('slug', models.SlugField(max_length=48)),
('description', models.TextField(blank=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='product-images')),
('thumbnail', models.ImageField(null=True, upload_to='product-thumbnails')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Product')),
],
),
migrations.AddField(
model_name='product',
name='tags',
field=models.ManyToManyField(blank=True, to='main.ProductTag'),
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', main.models.UserManager()),
],
),
]
| [
"thtan@tssb.com.my"
] | thtan@tssb.com.my |
880ee6e8c98c55f807f32262520214eb46a8e0a9 | 291241959449cba24057816d67bdfc42bcf060ac | /core/serializers.py | 857771437ad66efc73b7a53ff41fbf46e7f2b0b8 | [
"MIT"
] | permissive | Umutbek/courses_app_neobis | 105aba08ab82ec3ea1e96b3ae25095c570d622fe | aca8215f2cfa0174cea832469b5611618ea07384 | refs/heads/master | 2022-11-29T02:18:20.207882 | 2020-08-11T17:48:31 | 2020-08-11T17:48:31 | 286,809,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | from rest_framework import serializers
from core import models
class CategorySerializer(serializers.ModelSerializer):
"""Serialize Category part"""
class Meta:
model = models.Category
fields = "__all__"
class BranchSerializer(serializers.ModelSerializer):
"""Serialize Branch"""
class Meta:
model = models.Branch
fields = "__all__"
class ContactSerializer(serializers.ModelSerializer):
"""Serialize Contact"""
class Meta:
model = models.Contact
fields = "__all__"
class CoursesSerializer(serializers.ModelSerializer):
"""Serialize Courses"""
branches = BranchSerializer(many=True)
contacts = ContactSerializer(many=True)
class Meta:
model = models.Courses
fields = ('id', 'name', 'description',
'category', 'logo', 'branches', 'contacts')
def create(self, validated_data):
branch = validated_data.pop('branches')
contact = validated_data.pop('contacts')
course = models.Courses.objects.create(**validated_data)
for b in branch:
models.Branch.objects.create(course=course, **b)
for c in contact:
models.Contact.objects.create(course=course, **c)
return course
| [
"noreply@github.com"
] | noreply@github.com |
99c7a87a5d8431b21888a5a8c5512f6f205f3704 | fd7598754b87536d3072edee8e969da2f838fa03 | /chapter3_programming17.py | 257697649b65df0980c01265e13efa08d4a817ce | [] | no_license | dorabelme/Python-Programming-An-Introduction-to-Computer-Science | 7de035aef216b2437bfa43b7d49b35018e7a2153 | 3c60c9ecfdd69cc9f47b43f4a8e6a13767960301 | refs/heads/master | 2020-05-02T23:19:44.573072 | 2019-03-28T21:27:20 | 2019-03-28T21:27:20 | 178,261,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | # Program using Newton's method to approximate square root
import math
def main():
# Title and description of the Program
print("\nSquare Root Approximator\n")
print("This program calculates the approximation of the square root of " +\
"a number using Newton's method.")
# Obtain the number to take the square root of, the number of times to improve
# the 'guess', and the initial guess itself
num = int(input("\nEnter the number whose square root you'd like to calculate: "))
n = int(input("Enter the number of times Newton's method should iterate: "))
guess = float(input("Enter your initial guess of what the square root should be: "))
# Calculate the square root using Newton's method
for i in range(n):
guess = (guess + num / guess) / 2
# Display result for the user
print("\nThe approximate square root of ", num, " is ", guess, ".", sep="")
print("\nThe error in this approximation is ", math.sqrt(num) - guess, ".", sep="")
main()
| [
"contact.dorabelme@gmail.com"
] | contact.dorabelme@gmail.com |
91548eaadeb4ea8966098bd2101213fadc03cd07 | 653443f348293a6f8bc6b371077f508de81a960b | /libro/problemas_resueltos/Capitulo2/problema2_1.py | 39b1175af56bbc3f503fcd6b617b89090968fddf | [] | no_license | RicardoBernal72/CYPRicardoBS | b6527f5e476df310cecae4ef997a7be17cf02189 | 8295523ba1641cbfba013215406d190d0a2af1ba | refs/heads/master | 2020-07-23T18:46:51.968914 | 2019-12-10T19:54:42 | 2019-12-10T19:54:42 | 207,672,072 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | N=float(input("numero de sonidos por min: "))
if N>0:
T=N/4 + 40
print(f"la temperatura aproximada del ambiente es de {T}°")
else:
print("Fin del programa")
| [
"RicardoBernal72"
] | RicardoBernal72 |
e43289d08b2bee5b02db3fd8e63c0ab77b14b898 | 4f793320d5d2d003b8e32d7d0204bc152f703d31 | /hypercane/hfilter/containing_pattern.py | 948a41498d104eff2c5521e4ca164b1533d8e629 | [
"MIT"
] | permissive | himarshaj/hypercane | 77ea458e75033a51fa452c557e82eb8ff5e0f887 | 99ac84834e2aad57cdf4687469a63b6305d20e47 | refs/heads/master | 2023-03-29T22:10:48.123857 | 2021-04-13T23:17:45 | 2021-04-13T23:17:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | import logging
import concurrent.futures
import re
from ..utils import match_pattern
module_logger = logging.getLogger('hypercane.hfilter.patterns')
def filter_pattern(input_urims, cache_storage, regex_pattern, include):
filtered_urims = []
compiled_pattern = re.compile(regex_pattern)
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_urim = {executor.submit(match_pattern, urim, cache_storage, compiled_pattern): urim for urim in input_urims }
for future in concurrent.futures.as_completed(future_to_urim):
urim = future_to_urim[future]
try:
match = future.result()
if include == True and match is not None:
filtered_urims.append(urim)
elif include == False and match is None:
filtered_urims.append(urim)
except Exception as exc:
module_logger.exception('URI-M [{}] generated an exception: [{}]'.format(urim, exc))
module_logger.critical("failed to perform pattern match for [{}], skipping...".format(urim))
return filtered_urims
| [
"jones.shawn.m@gmail.com"
] | jones.shawn.m@gmail.com |
0470fae819522f030e278572527f496454c5ea54 | cb0a2055ec70f178f9e4a6fc11b3474419c91718 | /src/compas_fea/structure/interaction.py | da0ba450684e750041c94db6dad9abf75c9d22ab | [
"MIT"
] | permissive | ming91915/compas_fea | 63b8388662a8d088580ea721eaf0fb1b8e74e596 | 1be3f6ce980bb9c001915ea1b3b75df08f7a50dd | refs/heads/master | 2020-03-16T09:09:37.318758 | 2018-05-07T11:51:58 | 2018-05-07T11:51:58 | 132,609,915 | 2 | 0 | null | 2018-05-08T13:03:28 | 2018-05-08T13:03:27 | null | UTF-8 | Python | false | false | 1,303 | py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = ['Andrew Liew <liew@arch.ethz.ch>']
__copyright__ = 'Copyright 2018, BLOCK Research Group - ETH Zurich'
__license__ = 'MIT License'
__email__ = 'liew@arch.ethz.ch'
__all__ = [
# 'HeatTransfer',
]
class HeatTransfer(object):
""" Heat transfer across an interface.
Parameters
----------
name : str
Heat transfer name.
amplitude : str
Name of the heat transfer amplitude function.
interface : str
Name of the interaction interface.
sink_temp : float
Sink temperature in K.
film_coef : float
Film coefficient.
ambient_temp : float
Ambient temperature in K.
emissivity : float
Emissivity.
Returns
-------
None
"""
def __init__(self, name, amplitude, interface, sink_temp, film_coef, ambient_temp, emissivity):
self.__name__ = 'HeatTransfer'
self.name = name
self.amplitude = amplitude
self.interface = interface
self.sink_temp = sink_temp
self.film_coef = film_coef
self.ambient_temp = ambient_temp
self.emissivity = emissivity
| [
"liew@arch.ethz.ch"
] | liew@arch.ethz.ch |
69fd83990f8d65b3de84b868c8044b4418654031 | 58b87ea29a95a5ceeaae4c2d7db1b16502ed158f | /Numerical Analysis/NumpyEx.py | e1d46e96f9ca5c400cabb5f12dbaf62414e01781 | [] | no_license | meyerpa/Python | b609e8c036b478b20cd17a4cc47b71c129c968f8 | 3797f9be3341e69d5e9eccfc1b4e7f52fdd9c666 | refs/heads/master | 2021-01-01T03:58:40.183829 | 2018-03-14T14:24:57 | 2018-03-14T14:24:57 | 56,526,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | from numpy import zeros
| [
"meyerpa@mnstate.edu"
] | meyerpa@mnstate.edu |
641d57c8eb047f84c0272f6d000da25da437cc5a | 647959c0d6ab322d1a6fd7478a78d47772f029af | /monitoraggio_MP/old_files/air_quality.py | 423efbeff8d2475cbede3cc796ae0d092b6a8215 | [] | no_license | PaoloMiseo/Air-pollutant-measurement | 67160560ff2a0a64f5b1175b435500e5fbda24ee | f03fdba9dade75dc2658a7162132644a8306d5ec | refs/heads/master | 2021-01-01T01:39:42.233771 | 2020-02-08T12:05:48 | 2020-02-08T12:05:48 | 239,125,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py |
import serial, time
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib import rc
from datetime import datetime
ser = serial.Serial('/dev/ttyUSB0')
sampling_time = 2190 #ms
# Graph Parameters
x_len = 25
y_range = [0, 100]
# Graph creation
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_ylim(y_range)
# Title and labels
plt.title('Monitoraggio del materiale particolato di Pablito')
plt.xlabel('Time')
plt.ylabel('[\mu g/m^3]')
# Lists to display
xs = []
ys1 = []
ys2 = []
# Create display line
#line1, = ax.plot(xs, ys1, lw=3)
#line2, = ax.plot(xs, ys2, lw=3)
def get_sensor_data():
data = []
for index in range(0, 10):
datum = ser.read()
data.append(datum)
pmtwofive = int.from_bytes(b''.join(data[2:4]), byteorder='little')/10
pmten = int.from_bytes(b''.join(data[4:6]), byteorder='little')/10
return pmtwofive, pmten
def animate(i, xs, ys1, ys2):
pmtwofive, pmten = get_sensor_data()
now = datetime.now()
xs.append(now.strftime("%H:%M:%S"))
ys1.append(pmtwofive) #pm2.5
ys2.append(pmten) #pm10
# Limit the number of elements
#ys1 = ys1[-x_len:]
#ys2 = ys2[-x_len:]
min_val = min(min(ys1,ys2))
max_val = max(max(ys1,ys2))
ax.set_ylim([0.9*min_val,1.1*max_val])
# axis update
ax.clear()
ax.plot(xs, ys1, xs, ys2)
ax.legend(['PM2.5', 'PM10'])
# plotm properties
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('Monitoraggio del materiale particolato di Pablito')
plt.xlabel('Time')
plt.ylabel('[\mu g/m^3]')
plt.grid()
print("PM2.5 = {mp25}, PM10 = {mp10}".format(mp10=pmten, mp25=pmtwofive))
anim = FuncAnimation(fig, animate, fargs=(xs,ys1,ys2,),
frames=200, interval=sampling_time)
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
d1aa5a956db621b997e15a643af44bad365da72b | bd690f474c0bd83320cde40db81bd6661d71be05 | /mapserver/django/django_project/mapserver/__init__.py | 5159896d98851f4448b7b4f7b1ffe3c7839a6d51 | [
"MIT",
"LGPL-2.1-only"
] | permissive | inasafe/inasafe-fba | 0c48bd231e4cd47a4a5d6ffde2859370b45b4ae3 | a8ab2054624fd3e93521bdd5ef2a8c00ee5b4923 | refs/heads/master | 2021-07-11T09:58:26.049441 | 2020-07-01T14:58:58 | 2020-07-01T14:58:58 | 232,991,302 | 1 | 7 | MIT | 2021-03-19T23:38:28 | 2020-01-10T07:32:38 | JavaScript | UTF-8 | Python | false | false | 75 | py | __author__ = 'Irwan Fathurrahman <irwan@kartoza.com>'
__date__ = '09/06/20' | [
"lana.pcfre@gmail.com"
] | lana.pcfre@gmail.com |
d1e6faa2b86d68166dd5d08790d18f69e6ca26a7 | a9ce2f90130070b809f06a5b1f981cdd28b58533 | /node_modules/webpack-dev-server/node_modules/fsevents/build/config.gypi | ec1873f401795ba763f9174a62fbd52ad68c92bb | [
"MIT"
] | permissive | bmhan319/reactCalc | 1cb7ca776cc3175c11deb2c2c8e133d921c737e8 | 41b765583a1d51f8af229756b65db9758ddbf650 | refs/heads/master | 2023-01-11T14:12:33.907662 | 2020-11-14T05:49:16 | 2020-11-14T05:49:16 | 202,610,254 | 0 | 0 | null | 2023-01-04T07:19:11 | 2019-08-15T20:54:46 | JavaScript | UTF-8 | Python | false | false | 5,603 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"is_debug": 0,
"llvm_version": "0.0",
"napi_build_version": "5",
"node_byteorder": "little",
"node_code_cache": "yes",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_report": "true",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_large_pages_script_lld": "false",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "72.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"xcode_version": "8.0",
"nodedir": "/Users/bmhan319/Library/Caches/node-gyp/12.13.1",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/bmhan319/.npm-init.js",
"userconfig": "/Users/bmhan319/.npmrc",
"cidr": "",
"node_version": "12.13.1",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/bmhan319/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.8 node/v12.13.1 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/5r/9wbv2zxj2cl9vsp7g29pw3y40000gq/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"bmhan319@hotmail.com"
] | bmhan319@hotmail.com |
80f79e093a6bf655acd89b8a66db70c18f2b89a5 | 1cf3c9ed26259417feecaa65cc582d6543ad3e0d | /money_changer/exchange/views.py | 2b7fe89fc20d483ee5dc5a01d34cd37ad549b55f | [
"MIT"
] | permissive | moshe742/money-exchange | ad56ec457a6982637ebd8e54599e4a125b63fb94 | 37320e96c5413d0b0a0633068a99ef4b23c93a0e | refs/heads/main | 2023-04-06T23:26:16.337482 | 2021-04-10T21:17:11 | 2021-04-10T21:17:11 | 356,627,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,492 | py | from datetime import datetime as dt
from datetime import timedelta
from django.shortcuts import render
from django.views import View
from lxml import etree
import requests
from io import BytesIO
from exchange.forms import ExchangeForm
# Create your views here.
def parse_xml(xml_data):
bytes_string = bytes(xml_data.strip(''), encoding='utf-8')
bytes_io = BytesIO(bytes_string)
tree = etree.parse(bytes_io)
return tree.getroot()
def get_currencies(xml_root):
result = {
'NIS': {
'currency_code': 'NIS',
'rate': 1.0,
'unit': 1.0,
}
}
for currency in xml_root:
d = {}
if currency.tag == 'CURRENCY':
for item in currency:
if item.tag in ['RATE', 'UNIT']:
d[item.tag.lower()] = float(item.text)
if item.tag == 'CURRENCYCODE':
d['currency_code'] = item.text
result[d['currency_code']] = d
return result
def shekel_to_foreign(rate, amount, num_of_units):
return num_of_units * amount / rate
def foreign_to_shekel(rate, amount, num_of_units):
return rate * amount / num_of_units
def get_rate_unit(currencies, currency):
return currencies[currency]['rate'], currencies[currency]['unit']
class ExchangeView(View):
currency_codes = {
'NIS': '00',
'USD': '01',
'GBP': '02',
'JPY': '31',
'EUR': '27',
'AUD': '18',
'CAD': '06',
'DKK': '12',
'NOK': '28',
'ZAR': '17',
'SEK': '03',
'CHF': '05',
'JOD': '69',
'LBP': '70',
'EGP': '79',
}
def get(self, request):
form = ExchangeForm(initial={
'from_currency': '00',
'to_currency': '01',
'date': dt.today().strftime('%d/%m/%Y'),
'currency_amount': 1,
})
return render(request, 'exchange/index.html', context={
'form': form,
})
def post(self, request):
curr_number_to_code = {v: k for k, v in self.currency_codes.items()}
form = ExchangeForm(request.POST)
result = 'There was an error'
if form.is_valid():
url = 'http://www.boi.org.il/currency.xml'
from_currency = curr_number_to_code[form.cleaned_data['from_currency']]
to_currency = curr_number_to_code[form.cleaned_data['to_currency']]
currency_amount = float(form.cleaned_data['currency_amount'])
if from_currency == to_currency:
return render(request, 'exchange/index.html', context={
'form': form,
'result': currency_amount
})
payload = {
'rdate': form.cleaned_data['date'].strftime('%Y%m%d')
}
res = requests.get(url, params=payload)
num_days = 1
while 'ERROR' in res.text:
date_to_check = dt.strptime(payload['rdate'], '%Y%m%d')
payload['rdate'] = (date_to_check - timedelta(days=num_days)).strftime('%Y%m%d')
res = requests.get(url, params=payload)
num_days -= 1
parsed_xml = parse_xml(res.text)
currencies = get_currencies(parsed_xml)
from_currency_rate, from_currency_unit, to_currency_rate, to_currency_unit = 1, 1, 1, 1
try:
from_currency_rate, from_currency_unit = get_rate_unit(currencies, from_currency)
except KeyError:
pass
try:
to_currency_rate, to_currency_unit = get_rate_unit(currencies, to_currency)
except KeyError:
pass
# to_currency_rate = currencies[to_currency]['rate']
# to_currency_unit = currencies[to_currency]['unit']
if from_currency == 'NIS':
result = shekel_to_foreign(to_currency_rate, currency_amount, to_currency_unit)
elif to_currency == 'NIS':
result = foreign_to_shekel(from_currency_rate, currency_amount, from_currency_unit)
else:
converted = foreign_to_shekel(from_currency_rate, currency_amount, from_currency_unit)
result = shekel_to_foreign(to_currency_rate, converted, to_currency_unit)
return render(request, 'exchange/index.html', context={
'form': form,
'result': result
})
| [
"moshegrey@gmail.com"
] | moshegrey@gmail.com |
10adb072e9eabcec4d2ad0f61ee7d6d29b38c97c | fdcb2cdee4d5b398eed4eefc830213234e3e83a5 | /01_MIT_Learning/week_2/lectures_and_examples/3_guess_my_number.py | 12e1fa8891345cf687bad33b67bd047e96880487 | [] | no_license | daftstar/learn_python | be1bbfd8d7ea6b9be8407a30ca47baa7075c0d4b | 4e8727154a24c7a1d05361a559a997c8d076480d | refs/heads/master | 2021-01-20T08:53:29.817701 | 2018-01-15T22:21:02 | 2018-01-15T22:21:02 | 90,194,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | # #######################################################
# Create a program that guesses a secret number!
# The program works as follows:
# you (user) thinks of an integer between 0 (inclusive) and 100 (not inclusive).
# The computer makes guesses, and you give it input -
# is its guess too high or too low?
# Using bisection search, the computer will guess the user's
# secret number
# #######################################################
low = 0
high = 100
correct = False
print ("Please think of a number between 0 and 100!")
while correct == False:
guess = (high + low) // 2
print("Is your secret number %s?" % guess)
response = input("Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. ")
if response == 'c':
correct == True
break
elif response == 'l':
# we guessed too low. Set the floor to the current guess (midpoint)
low = guess
elif response == 'h':
# we guessed too high. Set the ceiling to the current guess (midpoint)
high = guess
else:
print("Sorry, I did not understand your input.")
print('Game over. Your secret number was: %s' % guess)
# #########
# ORIGINAL WAY, HAD WAY TOO MUCH REPETITION
# ##########
# while correct == False:
# print ("Is your secret number %s?" % guess)
# response = input("Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly.")
# if response == "c":
# print ("Game over. Your secret number was: ", mid)
# correct = True
# break
# elif response == "l":
# low = mid
# mid = round((low + high) / 2)
# guess = mid
# elif response == "h":
# high = mid
# mid = round((low + high) / 2)
# guess = mid
# else:
# response = input("Sorry, I did not understand your input.") | [
"nikdaftary@gmail.com"
] | nikdaftary@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.