index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
43,196 | fsociety6/SmartAurangabadHack | refs/heads/master | /CCWebsite/account/migrations/0005_auto_20200121_1256.py | # Generated by Django 2.2.5 on 2020-01-21 07:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0004_auto_20200121_1113'),
]
operations = [
migrations.AlterField(
model_name='extendedusermodel',
name='phone_number',
field=models.DecimalField(decimal_places=0, max_digits=10),
),
]
| {"/CCWebsite/account/views.py": ["/CCWebsite/account/forms.py"], "/CCWebsite/account/forms.py": ["/CCWebsite/account/models.py"], "/CCWebsite/account/admin.py": ["/CCWebsite/account/models.py"], "/CCWebsite/health/admin.py": ["/CCWebsite/health/models.py"]} |
43,197 | fsociety6/SmartAurangabadHack | refs/heads/master | /CCWebsite/account/migrations/0003_auto_20200121_1111.py | # Generated by Django 2.2.5 on 2020-01-21 05:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0002_auto_20200121_1057'),
]
operations = [
migrations.AlterField(
model_name='extendedusermodel',
name='location',
field=models.IntegerField(max_length=6),
),
]
| {"/CCWebsite/account/views.py": ["/CCWebsite/account/forms.py"], "/CCWebsite/account/forms.py": ["/CCWebsite/account/models.py"], "/CCWebsite/account/admin.py": ["/CCWebsite/account/models.py"], "/CCWebsite/health/admin.py": ["/CCWebsite/health/models.py"]} |
43,198 | fsociety6/SmartAurangabadHack | refs/heads/master | /CCWebsite/account/forms.py | from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User
from .models import ExtendedUserModel
from account.models import ExtendedUserModel
class UserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UserExtendedForm(forms.ModelForm):
class Meta:
model = ExtendedUserModel
fields = ['phone_number', 'age', 'location', 'gender']
class LoginForm(forms.ModelForm):
class Meta:
model= User
fields=['username','password']
class EditForm(forms.ModelForm):
class Meta:
model= ExtendedUserModel
fields=[
'age',
'gender',
'location',
'phone_number',
]
def clean(self):
return self.cleaned_data | {"/CCWebsite/account/views.py": ["/CCWebsite/account/forms.py"], "/CCWebsite/account/forms.py": ["/CCWebsite/account/models.py"], "/CCWebsite/account/admin.py": ["/CCWebsite/account/models.py"], "/CCWebsite/health/admin.py": ["/CCWebsite/health/models.py"]} |
43,199 | fsociety6/SmartAurangabadHack | refs/heads/master | /CCWebsite/account/admin.py | from django.contrib import admin
from .models import ExtendedUserModel
# Register your models here.
admin.site.register(ExtendedUserModel)
| {"/CCWebsite/account/views.py": ["/CCWebsite/account/forms.py"], "/CCWebsite/account/forms.py": ["/CCWebsite/account/models.py"], "/CCWebsite/account/admin.py": ["/CCWebsite/account/models.py"], "/CCWebsite/health/admin.py": ["/CCWebsite/health/models.py"]} |
43,200 | fsociety6/SmartAurangabadHack | refs/heads/master | /CCWebsite/health/migrations/0002_crowdsource.py | # Generated by Django 2.2.5 on 2020-01-21 12:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('health', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Crowdsource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Age', models.IntegerField()),
('Gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('Disease', models.CharField(max_length=20)),
('Stored', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| {"/CCWebsite/account/views.py": ["/CCWebsite/account/forms.py"], "/CCWebsite/account/forms.py": ["/CCWebsite/account/models.py"], "/CCWebsite/account/admin.py": ["/CCWebsite/account/models.py"], "/CCWebsite/health/admin.py": ["/CCWebsite/health/models.py"]} |
43,201 | fsociety6/SmartAurangabadHack | refs/heads/master | /CCWebsite/account/models.py | from django.contrib.auth.models import User
from django.db import models
from phone_field import PhoneField
class ExtendedUserModel(models.Model):
choice=[('MALE','Male'),('FEMALE','Female')]
age = models.IntegerField()
gender = models.CharField(max_length=20,choices=choice)
location = models.DecimalField(max_digits=6, decimal_places=0)
phone_number = models.DecimalField(max_digits=10, decimal_places=0)
user_object = models.OneToOneField(User, related_name='extendeduser', on_delete=models.CASCADE)
def __str__(self):
return f'{self.user_object.username}\'s Profile'
| {"/CCWebsite/account/views.py": ["/CCWebsite/account/forms.py"], "/CCWebsite/account/forms.py": ["/CCWebsite/account/models.py"], "/CCWebsite/account/admin.py": ["/CCWebsite/account/models.py"], "/CCWebsite/health/admin.py": ["/CCWebsite/health/models.py"]} |
43,202 | fsociety6/SmartAurangabadHack | refs/heads/master | /CCWebsite/health/models.py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Disease(models.Model):
ID1 = models.IntegerField()
name = models.CharField(max_length=1000)
def __str__(self):
return f'{self.name}'
class Crowdsource(models.Model):
choice = [('MALE', 'Male'), ('FEMALE', 'Female')]
disease = [('TYPHOID', 'typhoid'), ('DELIRIA', 'deliria'), ('DENGUE', 'dengue'), ('MALARIA', 'malaria')]
Age = models.IntegerField()
location = models.CharField(max_length=6)
Gender = models.CharField(max_length=10, choices=choice)
Created_By = models.ForeignKey(User, on_delete=models.CASCADE, null=False, blank=False)
Disease = models.CharField(max_length=20)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
# gramseva
def __str__(self):
return f'{self.created_date}'
| {"/CCWebsite/account/views.py": ["/CCWebsite/account/forms.py"], "/CCWebsite/account/forms.py": ["/CCWebsite/account/models.py"], "/CCWebsite/account/admin.py": ["/CCWebsite/account/models.py"], "/CCWebsite/health/admin.py": ["/CCWebsite/health/models.py"]} |
43,203 | fsociety6/SmartAurangabadHack | refs/heads/master | /CCWebsite/account/migrations/0006_auto_20200121_2141.py | # Generated by Django 2.2.5 on 2020-01-21 16:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0005_auto_20200121_1256'),
]
operations = [
migrations.AlterField(
model_name='extendedusermodel',
name='gender',
field=models.CharField(choices=[('MALE', 'Male'), ('FEMALE', 'Female')], max_length=20),
),
migrations.AlterField(
model_name='extendedusermodel',
name='user_object',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='extendeduser', to=settings.AUTH_USER_MODEL),
),
]
| {"/CCWebsite/account/views.py": ["/CCWebsite/account/forms.py"], "/CCWebsite/account/forms.py": ["/CCWebsite/account/models.py"], "/CCWebsite/account/admin.py": ["/CCWebsite/account/models.py"], "/CCWebsite/health/admin.py": ["/CCWebsite/health/models.py"]} |
43,204 | fsociety6/SmartAurangabadHack | refs/heads/master | /CCWebsite/health/admin.py | from django.contrib import admin
from .models import Disease,Crowdsource
# Register your models here.
# class Details(admin.ModelAdmin):
# fieldsets=[
# ("Personal Details",{"fields":["Age","Gender","Created_By","created_date","modified_date"]}),
# ("Disease",{"fields":["Disease"]})
# ]
admin.site.register(Disease)
admin.site.register(Crowdsource) | {"/CCWebsite/account/views.py": ["/CCWebsite/account/forms.py"], "/CCWebsite/account/forms.py": ["/CCWebsite/account/models.py"], "/CCWebsite/account/admin.py": ["/CCWebsite/account/models.py"], "/CCWebsite/health/admin.py": ["/CCWebsite/health/models.py"]} |
43,205 | fsociety6/SmartAurangabadHack | refs/heads/master | /CCWebsite/CCWebsite/routing.py | from channels.routing import ProtocolTypeRouter
from channels.security.websocket import AllowedHostsOriginValidator
application = ProtocolTypeRouter({
# # Empty for now (http->django views is added by default)
# 'websocket': AllowedHostsOriginValidator(
#
# )
})
| {"/CCWebsite/account/views.py": ["/CCWebsite/account/forms.py"], "/CCWebsite/account/forms.py": ["/CCWebsite/account/models.py"], "/CCWebsite/account/admin.py": ["/CCWebsite/account/models.py"], "/CCWebsite/health/admin.py": ["/CCWebsite/health/models.py"]} |
43,211 | Nitsua365/MazeGenerator | refs/heads/main | /mazeGenerator.py | from numpy import ndarray
from random import randint
import cv2
class MazeNode:
def __init__(self, coordinate, index):
self.coordinate = coordinate
self.index = index
def __eq__(self, other):
return self.coordinate[0] == other.coordinate[0] and \
self.coordinate[1] == other.coordinate[1]
def __str__(self):
return "coordinate: (x: " + str(self.coordinate[0]) + ", y: " + str(self.coordinate[1]) + ") index: " + str(
self.index)
def __getitem__(self, item):
if 2 > item >= 0:
return self.coordinate[item]
else:
assert False
def __hash__(self):
return hash(str(self))
class MazeGenerator:
def __init__(self, mazeWidth, mazeHeight, wallWidth, nodeDim):
self.mazeNodeWidth = mazeWidth
self.mazeNodeHeight = mazeHeight
self.wallWidth = wallWidth
self.nodeDimension = nodeDim
self.buffer = ndarray(shape=((self.mazeNodeHeight * self.nodeDimension) + ((self.mazeNodeHeight + 1) * wallWidth), (self.mazeNodeWidth * self.nodeDimension) + ((self.mazeNodeWidth + 1) * wallWidth), 3), dtype=int)
self.adjList = []
self.startLoc = []
self.endLoc = []
self.searchStack = []
self.visited = [False for i in range(0, len(self.adjList))]
for i in range(0, len(self.buffer)):
for j in range(0, len(self.buffer[i])):
self.buffer[i][j] = [0, 0, 0]
nodeWidthCount = self.wallWidth
while nodeWidthCount < self.buffer.shape[0]:
nodeHeightCount = self.wallWidth
while nodeHeightCount < self.buffer.shape[1]:
self.adjList.append(MazeNode([nodeWidthCount, nodeHeightCount], len(self.adjList)))
nodeHeightCount += self.nodeDimension + self.wallWidth
nodeWidthCount += self.nodeDimension + self.wallWidth
# find start location
start = end = 0
for i in range(0, len(self.adjList)):
if self.adjList[i][0] == self.wallWidth:
start = i
break
for i in range(start, len(self.adjList)):
if self.adjList[i][0] != self.wallWidth:
end = i
break
self.startLoc = self.adjList[randint(start, end)]
print(str(start) + " " + str(end))
start = end = self.buffer.shape[0] - (self.nodeDimension + self.wallWidth)
for i in range(0, len(self.adjList)):
if self.adjList[i][0] == (self.buffer.shape[0] - (self.nodeDimension + self.wallWidth)):
start = i
break
for i in range(start, len(self.adjList)):
if self.adjList[i][0] != (self.buffer.shape[0] - (self.nodeDimension + self.wallWidth)):
end = i
break
self.endLoc = self.adjList[randint(start, end)]
print("start: " + str(self.startLoc))
print("end: " + str(self.endLoc))
def generateMaze(self):
self.__DFS(self.startLoc)
def __DFS(self, node):
print("DFS")
def __addEdge(self, coord1, coord2):
print("addedge")
def writeMaze(self, fileName):
hasPng = fileName.endswith(".png") or fileName.endswith(".PNG")
if not hasPng:
fileName += ".png"
cv2.imwrite(fileName, self.buffer)
| {"/main.py": ["/mazeGenerator.py"]} |
43,212 | Nitsua365/MazeGenerator | refs/heads/main | /main.py | from mazeGenerator import MazeGenerator
def main():
mazeNodeWidthInput = int(input("enter node width: "))
mazeNodeHeightInput = int(input("enter node height: "))
mazeWallWidth = int(input("enter wall width: "))
mazeNodeSize = int(input("enter maze node size: "))
mazeOutputFileName = input("enter output file name: ")
mazeGen = MazeGenerator(mazeNodeWidthInput, mazeNodeHeightInput, mazeWallWidth, mazeNodeSize)
mazeGen.writeMaze(mazeOutputFileName)
if __name__ == '__main__':
main()
| {"/main.py": ["/mazeGenerator.py"]} |
43,213 | XinBow99/dacrd-by-python | refs/heads/master | /Dcard.py | ##########imports##########
import requests
import json
import os
# 停止SSL報錯
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
###########################
# 解析JSON
###########################
class Dcard:
def __init__(self):
self.dSession = requests.session()
self.mainUrl = "https://www.dcard.tw/_api/forums/"
self.forums = ''
self.popular = ''
self.limit = 0
self.infomation = {
'f': {
'likeCount': 0,
'commentCount': 0
},
'm':
{
'likeCount': 0,
'commentCount': 0
},
'media': {
'zero':
{
'likeCount': 0,
'commentCount': 0
},
'notzero':
{
'likeCount': 0,
'commentCount': 0
}
},
'total':
{
'forums': '',
'likeCount': 0,
'commentCount': 0
}
}
self.forumsData = list()
def showMessage(self):
data = self.forumsData
popular = self.popular
limit = self.limit
forums = self.forums
print("[Info]板塊:{}|熱門:{}|欲擷取筆數:{}|總共抓取{}筆資料".format(
forums, popular, limit, len(data)))
self.infomation['total']['forums'] = forums
for singleData in self.forumsData:
# total
self.infomation['total']['likeCount'] += int(
singleData['likeCount'])
self.infomation['total']['commentCount'] += int(
singleData['commentCount'])
# 男性
if singleData['gender'] == "M":
self.infomation['m']['likeCount'] += int(
singleData['likeCount'])
self.infomation['m']['commentCount'] += int(
singleData['commentCount'])
# 女性
elif singleData['gender'] == "F":
self.infomation['f']['likeCount'] += int(
singleData['likeCount'])
self.infomation['f']['commentCount'] += int(
singleData['commentCount'])
# 附圖
if len(singleData['mediaMeta']) > 0:
self.infomation['media']['notzero']['likeCount'] += int(
singleData['likeCount'])
self.infomation['media']['notzero']['commentCount'] += int(
singleData['commentCount'])
else:
self.infomation['media']['zero']['likeCount'] += int(
singleData['likeCount'])
self.infomation['media']['zero']['commentCount'] += int(
singleData['commentCount'])
def showCatch(self, i, data):
print(
("【Catch_{}】"
"\n\t性別:{}"
"\n\t標題:{}"
"\n\t摘要:{}"
"\n\t標籤:{}"
"\n\t圖片數量:{}"
"\n\t留言數量:{}"
"\n\t愛心數量:{}").format(
i + 1,
data["gender"],
data["title"],
data["excerpt"][0:14] + '...',
data["topics"],
len(data["mediaMeta"]),
data["commentCount"],
data["likeCount"]
)
)
def getForumsInfor(self, forums='nkfust', popular=True, limit=30):
self.forums = forums
self.limit = limit
if popular:
self.popular = 'true'
else:
self.popular = 'false'
res = self.dSession.get(self.mainUrl + forums + '/posts?',
params={
'popular': self.popular, # 熱門
'limit': str(self.limit) # 顯示文章篇數,最多100篇
},
verify=False)
self.forumsData = res.json()
self.showMessage()
def getThisInfo(self):
print(("###########[Info|{}]###########\n"
"|---------------------|\n"
" 男性發文統計\n"
" ├———留言:{} 則\n"
" └———愛心:{} 個\n"
"|---------------------|\n"
" 女性發文統計\n"
" ├———留言:{} 則\n"
" └———愛心:{} 個\n"
"|---------------------|\n"
" 發文附圖統計\n"
" ├―——有圖\n"
" │ ├———留言:{} 則\n"
" │ └———愛心:{} 個\n"
" └――—沒圖\n"
" ├———留言:{} 則\n"
" └———愛心:{} 個\n"
"|---------------------|\n"
"################################\n").format(
self.infomation['total']['forums'],
self.infomation['m']['commentCount'],
self.infomation['m']['likeCount'],
self.infomation['f']['commentCount'],
self.infomation['f']['likeCount'],
self.infomation['media']['notzero']['commentCount'],
self.infomation['media']['notzero']['likeCount'],
self.infomation['media']['zero']['commentCount'],
self.infomation['media']['zero']['likeCount']
))
def downloadImage(self, gender="none"):
folder_path = './dacad/' + self.forums + '/'
if (os.path.exists(folder_path) == False): # 判斷主資料夾是否存在
os.makedirs(folder_path) # Create folder
for i, data in enumerate(self.forumsData):
if data['gender'] == gender or gender == "none":
self.showCatch(i, data)
media = data['mediaMeta']
if len(media) > 0:
del media[0]
for index, image_url in enumerate(media):
image = self.dSession.get(image_url['url'])
img_path = folder_path + data['title'] + '/'
if (os.path.exists(img_path) == False): # 判斷副資料夾是否存在
os.makedirs(img_path) # Create folderF
# 以byte的形式將圖片數據寫入
with open(img_path + image_url['url'].split('/')[-1], 'wb') as file:
file.write(image.content)
file.flush()
file.close() # close file
print("目前:第 {} 張照片,剩餘 {} 張需要下載".format(
index + 1, len(media) - index - 1))
# http://dangerlover9403.pixnet.net/blog/post/207823890-%5Bpython%5D-day14---python-%E5%BE%9E%E7%B6%B2%E8%B7%AF%E6%8A%93%E5%9C%96%E7%89%87
print(("---------------\n"
"{}-圖片下載完成\n"
"準備下載下一張...").format(data["title"]))
print("[Info]全部圖片下載完成...")
# init
'''
if __name__ == "__main__":
user = Dcard()
user.getForumsInfor()
user.downloadImage()
'''
| {"/main.py": ["/Dcard.py"]} |
43,214 | XinBow99/dacrd-by-python | refs/heads/master | /main.py | import Dcard
if __name__ == "__main__":
user = Dcard.Dcard()
user.getForumsInfor('nkfust', popular=True, limit=30)
#user.downloadImage()
user.getThisInfo()
| {"/main.py": ["/Dcard.py"]} |
43,217 | runvnc/zork_on_gemini | refs/heads/master | /data.py | import os, subprocess, sys
from urllib.parse import unquote
from shlex import quote
from pathlib import Path
import time
parent_pipes = {}
child_pipes = {}
mypath = os.path.dirname(os.path.realpath(__file__))
def fname(which,usr): return f'{mypath}/data/{which}_{usr}'
def outf(usr): return fname('OUTPUT',usr)
def inpf(usr): return fname('INPUT', usr)
def activef(usr): return fname('ACTIVE', usr)
def user_active(usr): return os.path.exists(activef(usr))
def send_command(usr, cmd):
with open(inpf(usr), 'w') as f:
f.write(cmd)
def wait(usr, which, maxtries):
time.sleep(0.25)
tries = 0
while tries < maxtries and not os.path.exists(which):
time.sleep(0.25)
print(__name__,which,"Waiting for file to exist:",which)
tries += 1
def wait_for_output(usr):
wait(usr, outf(usr), 100)
with open(outf(usr)) as f:
text = f.read()
return text
def wait_for_input(usr):
wait(usr, inpf(usr), 10)
return os.path.exists(inpf(usr))
def wipe_all():
for f in Path(f'{mypath}/data').glob('*'):
if f.is_file(): f.unlink()
| {"/spawner.py": ["/control_zork.py"], "/control_zork.py": ["/data_redis.py"]} |
43,218 | runvnc/zork_on_gemini | refs/heads/master | /spawner.py | #!/usr/bin/python
import os, subprocess, sys
from urllib.parse import unquote
from pathlib import Path
import time
from multiprocessing import Process, set_start_method
import logging, redis, datetime
from control_zork import *
logging.basicConfig(filename='spawner.log', level=logging.DEBUG)
pubsub = {}
mypath = os.path.dirname(os.path.realpath(__file__))
redisconn = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
pubsub = redisconn.pubsub()
pubsub.subscribe('app_spawn')
def checkmessages():
while True:
for msg in pubsub.listen():
if msg != None and msg['type'] != 'subscribe':
spawn_session(msg['data'], init)
time.sleep(0.1)
def spawn_session(usr, func):
user = usr
logging.info("Trying to spawn")
p = Process(target=func, args=(user,))
p.daemon = True
p.start()
logging.info("Spawned?")
redisconn.set(f'active_{usr}', 1)
logging.info("Set active")
checkmessages()
| {"/spawner.py": ["/control_zork.py"], "/control_zork.py": ["/data_redis.py"]} |
43,219 | runvnc/zork_on_gemini | refs/heads/master | /data_redis.py | import os, subprocess, sys
from urllib.parse import unquote, quote
from pathlib import Path
import time
from multiprocessing import Process, set_start_method
import logging, redis, datetime
pubsub = {}
mypath = os.path.dirname(os.path.realpath(__file__))
redisconn = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
def spawn_session(usr, func):
redisconn.publish('app_spawn',usr)
# user = usr
# logging.info("Trying to spawn")
# set_start_method('spawn')
# p = Process(target=func, args=(user,))
# p.daemon = True
# p.start()
# logging.info("Spawned?")
# redisconn.set(f'active_{usr}', 1)
# logging.info("Set active")
def checkpipe(user, direction, sub=False):
key = f'app_{direction}_{user}'
if key in pubsub:
return pubsub[key]
else:
pubsub[key] = redisconn.pubsub()
if sub:
logging.info(f"Subscribing to app_{direction}_{user}")
pubsub[key].subscribe(f'app_{direction}_{user}')
return pubsub[key]
def writepipe(usr, direction,data):
logging.info(f"Trying to publish app_{direction}_{usr} :" + data)
redisconn.publish(f'app_{direction}_{usr}',quote(data))
def readpipe(usr, direction):
pipe = checkpipe(usr,direction,True)
msg = pipe.get_message()
#msg = next(pipe.listen())
#logging.info(__name__+"msg from sub is "+str(msg))
if not (msg is None) and msg['type'] == 'subscribe':
msg = pipe.get_message()
if msg is None: return None
logging.info("pipe received message: "+str(msg))
return unquote(msg['data'])
def waitread(usr, direction):
# readpipe(usr, direction)
txt = None
tries = 0
while tries < 150 and txt == None:
txt = readpipe(usr,direction)
time.sleep(0.002)
tries += 1
return txt
def user_active(usr):
lastping = redisconn.get(f'ping_{usr}')
if lastping is None:
return False
else:
lastping = float(lastping)
return (time.time() - lastping < 8)
| {"/spawner.py": ["/control_zork.py"], "/control_zork.py": ["/data_redis.py"]} |
43,220 | runvnc/zork_on_gemini | refs/heads/master | /data_pipes.py | import os, subprocess, sys
from urllib.parse import unquote
from shlex import quote
from pathlib import Path
import time
from multiprocessing import Process, Pipe, set_start_method
import logging
#parent_pipes = {}
#child_pipes = {}
mypath = os.path.dirname(os.path.realpath(__file__))
def spawn_session(usr, func):
user = usr
set_start_method('spawn')
#parent_conn, child_conn = Pipe()
#parent_pipes[user] = parent_conn
#child_pipes[user] = child_conn
p = Process(target=func, args=(user,))
p.daemon = True
p.start()
def checkpipe(usr, direction):
if not os.path.exists(f'{mypath}/data/app_{direction}_{usr}'):
Path(f'{mypath}/data/app_{direction}_{usr}').touch()
return False
else:
return True
def writepipe(usr, direction,data):
checkpipe(usr, direction)
logging.info(f"attempt to open write to pipe {usr} {direction}")
with open(f"{mypath}/data/app_{direction}_{usr}",'w') as f:
f.write(data)
logging.info(f"wrote data to {direction}: "+data)
def readpipe(usr, direction):
checkpipe(usr, direction)
logging.info(f"attempt to open read from pipe {usr} {direction}")
text = ''
with open(f'{mypath}/data/app_{direction}_{usr}','r') as f:
logging.info(f"attempt to read from pipe {usr} {direction}")
text = f.read()
logging.info(f"read complete {usr} {direction}")
os.remove(f'{mypath}/data/app_{direction}_{usr}')
return text
def waitread(usr, direction):
txt = ''
tries = 0
while tries < 44 and txt == '':
txt = readpipe(usr,direction)
time.sleep(0.35)
tries += 1
return txt
def user_active(usr):
return checkpipe(usr,'active')
| {"/spawner.py": ["/control_zork.py"], "/control_zork.py": ["/data_redis.py"]} |
43,221 | runvnc/zork_on_gemini | refs/heads/master | /control_zork.py | #!/usr/bin/python
import os, pexpect, sys, time, traceback
from data_redis import *
import logging
import pexpect.replwrap
import atexit
APPTIMEOUT = 60 * 30
PINGINTERVAL = 1
def exiting():
logging.info("Child exiting for some reason."+__name__)
def init(user):
if os.fork() == 0: # <--
return
logging.basicConfig(filename='spawner.log', level=logging.DEBUG)
lastinput = time.time()
lastping = time.time()
atexit.register(exiting)
logging.info("Child Spawning Zork..")
#child = pexpect.spawn(mypath+'/zork',encoding='utf-8')
torun = mypath+f'/zork {user}'
prompt = "\n>"
child = pexpect.replwrap.REPLWrapper(torun,prompt,None)
#child.logfile = open(mypath+'/outzork.log','w')
logging.info("Child Waiting for prompt..")
text = child.run_command('look')
logging.info("Child sending Zork initial output..")
writepipe(user,'up',text)
logging.info("Child start of loop")
#logging.info("child status:" + str(child.status))
while True:
try:
if time.time()-lastinput>APPTIMEOUT:
logging.info("Timeout. Suicide.")
sys.exit(0)
if time.time()-lastping>PINGINTERVAL:
logging.info("Child Trying to receive..")
redisconn.set(f'ping_{user}',time.time())
lastping = time.time()
cmd = waitread(user,'down')
#print("Child sending to zork: "+str(cmd))
if not (cmd is None):
if cmd != '' and cmd != ' ':
lastinput = time.time()
try:
text = child.run_command(cmd,timeout=0.15)
except Exception as ee:
text = child.child.before
if not child.child.isalive():
text = child.child.before
writepipe(user,'up',text)
logging.info("App ended. Exiting.")
sys.exit(0)
writepipe(user,'up',text)
#print("Child done with sendline to zork: "+cmd)
#print("received result: ",text)
#else:
# logging.info("Child read none!")
except Exception as e:
logging.info("Exception in control_zork!")
logging.info(traceback.format_exception(*sys.exc_info()))
| {"/spawner.py": ["/control_zork.py"], "/control_zork.py": ["/data_redis.py"]} |
43,222 | runvnc/zork_on_gemini | refs/heads/master | /start_server.py | #!/bin/bash
rm *.log
redis-cli --scan --pattern active* | xargs redis-cli del
while true; do
jetforce --hostname zork.club --host 167.71.119.170 --tls-certfile /etc/letsencrypt/live/zork.club/fullchain.pem --tls-keyfile /etc/letsencrypt/live/zork.club/privkey.pem
done
| {"/spawner.py": ["/control_zork.py"], "/control_zork.py": ["/data_redis.py"]} |
43,223 | runvnc/zork_on_gemini | refs/heads/master | /gemini.py | import os
from urllib.parse import unquote
from shlex import quote
from pathlib import Path
import shortuuid
query = ''
if 'QUERY_STRING' in os.environ:
query = unquote(os.environ["QUERY_STRING"])
user = ''
if 'REMOTE_USER' in os.environ:
user = quote(unquote(os.environ["REMOTE_USER"]))
user = user[:30]
if 'TLS_CLIENT_HASH' in os.environ:
user += '_'+unquote(os.environ["TLS_CLIENT_HASH"])[-5:]
else:
user += '_'+shortuuid.uuid()[-5:]
def respond(code, meta):
print(f'{code} {meta}\r\n')
INPUT = 10
NEED_CERT = 60
SUCCESS = 20
| {"/spawner.py": ["/control_zork.py"], "/control_zork.py": ["/data_redis.py"]} |
43,281 | vikrant462/Word_Puzzle_Game | refs/heads/master | /homepage.py | import sqlite3 as sql
import winsound
import random
from tkinter import *
import PIL.Image
import tkinter
from PIL import ImageTk, Image
import PIL.Image
import os
#sound
winsound.PlaySound("game_menu",winsound.SND_ASYNC)
#ENDsound
a=['movies','fruits','cities','plants','animals']
f=open('file.txt','w')
f.write(random.choice(a))
f.close()
root = Tk()
background_image=ImageTk.PhotoImage(PIL.Image.open("new.jpg"))
background_label =Label(root, image=background_image)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
root.resizable(False, False)
root.title("WELCOME")
root.geometry('483x500')
#button
x1=Button(root)
photo=PhotoImage(file="start.gif")
x1.config(image=photo,width="160",height="62",command=root.destroy,activebackground="black",bg="black", bd=0)
x1.place(relx=0.545,rely=0.60,x=10, y=100, anchor=NE)
#END button
#dropsown
tkvar = StringVar(root)
photo2=PhotoImage(file="image/random.gif")
choices = { 'movies','fruits','cities','plants','animals'}
tkvar.set('random') # set the default option
root.wm_attributes('-fullscreen', True)
root.wm_attributes('-transparent', root['bg'])
popupMenu = OptionMenu(root, tkvar, *choices)
popupMenu.config(image=photo2,width="170",height="60",activebackground="#800000",bg="black", bd=.5, highlightbackground= "white")
photo2=PhotoImage(file=str("image/"+tkvar.get()+".gif"))
popupMenu.config(image=photo2)
popupMenu.place(relx=0.43,rely=0.56)
def fili(strk):
f=open('file.txt','w')
f.write(strk)
#print(f.read())
f.close()
def change_dropdown(*args):
global photo2, popupMenu
photo2=PhotoImage(file=str("image/"+tkvar.get()+".gif"))
popupMenu.config(image=photo2,width="170",height="60",activebackground="#800000",bg="black", bd=.5, highlightbackground= "white")
fili(tkvar.get())
tkvar.trace('w', change_dropdown)
#dropdown1
tkvar1 = StringVar(root)
photo3=PhotoImage(file="image/easy.gif")
choices1 = { 'easy','hard'}
tkvar1.set('easy') # set the default option
f=open('file1.txt','w')
f.write(tkvar1.get())
f.close()
popupMenu1 = OptionMenu(root, tkvar1, *choices1)
popupMenu1.config(image=photo3,width="155",height="50",activebackground="#800000",bg="black", bd=.5, highlightbackground= "white")
photo3=PhotoImage(file=str("image/"+tkvar1.get()+".gif"))
popupMenu1.config(image=photo3)
popupMenu1.place(relx=0.436,rely=0.64)
def fili1(strk):
f=open('file1.txt','w')
f.write(strk)
f.close()
def change_dropdown1(*args):
global photo3, popupMenu1
photo3=PhotoImage(file=str("image/"+tkvar1.get()+".gif"))
popupMenu1.config(image=photo3,width="155",height="50",activebackground="#800000",bg="black", bd=.5, highlightbackground= "white")
fili1(tkvar1.get())
tkvar1.trace('w', change_dropdown1)
root.mainloop()
#END dropdown
##END home page
| {"/MAIN_FILE_If_Using_Idle.py": ["/beta1.py", "/homepage.py"]} |
43,282 | vikrant462/Word_Puzzle_Game | refs/heads/master | /MAIN_FILE_If_Using_Idle.py | import imp
import tkinter
import beta1
import homepage
if __name__=='__main__':
imp.reload(homepage)
while True:
imp.reload(beta1)
beta1.main()
if beta1.reset_val==1:
continue
if beta1.exit_val==1:
break
| {"/MAIN_FILE_If_Using_Idle.py": ["/beta1.py", "/homepage.py"]} |
43,283 | vikrant462/Word_Puzzle_Game | refs/heads/master | /beta1.py | ### Packages
import winsound
import tkinter
import PIL.Image
from PIL import ImageTk, Image
import threading
import sqlite3 as sql
import random
import time
import numpy as np
from tkinter import *
import tkinter.messagebox as tm
### END Packages
### Global variables
##word creation
i,g,count,beg_row,beg_col,m=0,0,0,0,0,0
original_letters=[]
score_word=[]
score_made,actual_score=0,0
prev_beg_color,prev_end_color="gray79","gray79"
words=[]
###file
f=open('file.txt','r')
strk=f.read()
print(strk)
f.close()
f=open('file1.txt','r')
strk1=f.read()
#print(strk)
f.close()
def database(strk):
global words
conn=sql.connect('word_list.db')
c=conn.cursor()
c.execute("SELECT * FROM "+strk)
rows = c.fetchall()
for p in rows:
for q in p:
words.append(q)
conn.close()
###END file
##reset
reset_val,exit_val=0,0
##display
txt=''
r,grn,b='fff','999','000'
fnl_color='#fff999000'
##clock
now,tym,cnt=0,0,0
### END Global variables
### Main
def main():
global tym
main.root=Tk()
main.root.wm_attributes('-fullscreen', True)
main.root.title("Word_Puzzle")
#main.root.geometry('200x100')
background_image=ImageTk.PhotoImage(PIL.Image.open("white.jpg"))
background_label =Label(main.root, image=background_image)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
main.butons=[]
main.frame=Frame(main.root,bg="magenta3",relief="raised",borderwidth=15)
main.frame.place(x=10,y=50)
winsound.PlaySound("game_menu",winsound.SND_ASYNC)
for i in range(1,11):
for j in range(1,11):
b=button(i,j)
main.butons.append(b)
database(strk)
arange_words()
display()
tym=int(time.strftime("%S"))%30
t1=threading.Thread(target=update_clock)
t1.start()
main.root.mainloop()
### END Main
###Options
##Reset
def reset():
global reset_val
main.root.destroy()
reset_val=1
##END reset
##Quit
def end():
global exit_val
main.root.destroy()
exit_val=1
winsound.PlaySound("exit",winsound.SND_ASYNC)
##END Quit
###END options
##Update clock
def update_clock():
global now,tym,cnt,txt,tmpvar
#now = time.strftime("%H:%M:%S")
now = time.strftime("%S")
now=int(now)%30
now=(now-tym)
if now<0:
now=30+now
if now==29 and cnt==0:
cnt=1
score=score_made
if cnt==1:
txt='Time Over:\n'+str(score_made)+' Points'
winsound.PlaySound("Buzzer",winsound.SND_ASYNC)
tmpvar=1 #to stop further action of buttons
display.labelx.configure(text=txt)
elif score_made==actual_score:
winsound.PlaySound("Short_triumphal.wav",winsound.SND_ASYNC)
txt='Congratulations\nTime:'+str(now)+' Sec'
tmpvar=1 #to stop further action of buttons
display.labelx.configure(text=txt)
else:
now=30-now
txt='Time Remaining:\n'+str(now)+' Sec'
display.labelx.configure(text=txt)
display.labelframe.after(10, update_clock)
##END update clock
###END Time
### Graphics
##Arrange words
def arange_words():
mb=main.butons
global original_letters,words,actual_score
for x in range(len(mb)):
original_letters.append('.')
#assigining words
temp_word=[]
x=0
while x<7:
overlap=0
w=random.choice(words)
if w not in temp_word:
word_len=len(w)
req_len=10-word_len+1
dir=random.choice('123')
#horizontal
if dir=='1':
col=random.choice(range(0,req_len))
row=random.choice(range(0,10))
column=col
for i in range(word_len):
if not(original_letters[row*10+column]=='.' or original_letters[row*10+column]==w[i]):
overlap=1
break
column+=1
if overlap==0:
for i in range(word_len):
mb[row*10+col].a.set(w[i])
original_letters[row*10+col]=w[i]
col+=1
else:
continue
#Vertical
if dir=='2':
row=random.choice(range(0,req_len))
col=random.choice(range(0,10))
rows=row
for i in range(word_len):
if not(original_letters[rows*10+col]=='.' or original_letters[rows*10+col]==w[i]):
overlap=1
break
rows+=1
if overlap==0:
for i in range(word_len):
mb[row*10+col].a.set(w[i])
original_letters[row*10+col]=w[i]
row+=1
else:
continue
#diagonal
if dir=='3':
col=random.choice(range(0,req_len))
row=random.choice(range(0,req_len))
column=col
rows=row
for i in range(word_len):
if not(original_letters[rows*10+column]=='.' or original_letters[rows*10+column]==w[i]):
overlap=1
break
column+=1
rows+=1
if overlap==0:
for i in range(word_len):
mb[row*10+col].a.set(w[i])
original_letters[row*10+col]=w[i]
col+=1
row+=1
else:
continue
temp_word.append(w)
actual_score+=word_len
x+=1
##END arrange words
##button
tmpvar=0
class button:
def __init__(self,row,col):
global strk1
d=' '
if strk1=='hard':
d='abcdefghijklmnopqrstuvwxyz'
self.a=StringVar()
self.b=Button(main.frame,textvariable=self.a,command=self.fun,bg='gray79',fg='black',font=("arial bold ",15),width=2,height=1)
self.a.set(random.choice(d))
#abcdefghijklmnopqrstuvwxyz
self.b.grid(row=row,column=col,ipadx=30,ipady=15,padx=1,pady=1)
def fun(self):
global tmpvar
l=self.a.get()
but=self.b
bg_color=but['bg']
#print('fun',a)
if tmpvar==0:
select(l,but,bg_color)
##END button
##display
def display():
global fnl_color
display.labelframe=Frame(main.root,bg="black",relief="raised",borderwidth=15,height=750,width=520)
display.labelframe.place(x=1000,y=50)
Title_label=Label(display.labelframe,text="SCORE BOARD",padx=71,relief="raised",borderwidth=1,bg="coral2",fg="black",font=("Times bold ",35))
Title_label.place(x=0,y=0)
label1=Label(display.labelframe,text="Total_Score",padx=45,height=1,width=21,relief="raised",borderwidth=1,bg="chocolate1",fg="black",font=("arial bold ",25))
label1.place(x=0,y=60)
c=IntVar()
label2=Label(display.labelframe,textvariable=c,padx=21,relief="raised",borderwidth=1,bg="tan1",fg="black",font=("arial bold ",25))
c.set(actual_score)
label2.place(x=396,y=60)
label3=Label(display.labelframe,text="Score_Gained",padx=45,height=1,width=21,relief="raised",borderwidth=1,bg="chocolate1",fg="black",font=("arial bold ",25))
label3.place(x=0,y=411)
d=IntVar()
label4=Label(display.labelframe,textvariable=d,padx=20,relief="raised",borderwidth=1,height=1,width=2,bg=fnl_color,fg="black",font=("arial bold ",25))
d.set(score_made)
label4.place(x=396,y=411)
v,c=0,103
r,g,b='fff','999','000'
val=[IntVar(),IntVar(),IntVar(),IntVar(),IntVar(),IntVar(),IntVar(),IntVar(),IntVar()]
index=[StringVar(),StringVar(),StringVar(),StringVar(),StringVar(),StringVar(),StringVar()]
e1=[StringVar(),StringVar(),StringVar(),StringVar(),StringVar(),StringVar(),StringVar()]
for x in range(7):
label=Label(display.labelframe,text='',padx=6,height=1,width=25,relief="raised",bg="khaki2",fg="black",font=("arial bold ",25))
label.place(x=0,y=c)
color='#'+r+g+b
g=str((int(g)-100))
label=Label(display.labelframe,textvariable=val[v],padx=19,height=1,width=2,relief="raised",bg=color,fg="white",font=("arial bold ",25))
label.place(x=396,y=c)
label=Label(display.labelframe,textvariable=index[v],height=1,width=2,relief="raised",bg=color,fg="white",font=("arial bold ",25))
label.place(x=0,y=c)
label=Label(display.labelframe,textvariable=e1[v],padx=6,height=1,width=17,relief="raised",bg=color,fg="black",font=("arial bold ",25))
label.place(x=50,y=c)
if v<len(score_word):
a=score_word[v]
else:
a=''
e1[v].set(a)
if len(a)==0:
t=''
else:
t=len(a)
val[v].set(t)
index[v].set(v+1)
c+=44
v+=1
label=Label(display.labelframe,text='',padx=4,pady=14,height=2,width=30,relief="raised",bg="khaki2",fg="black",font=("arial bold ",20))
label.place(x=0,y=454)
reset_buton=Button(display.labelframe,text="Reset",padx=42,height=1,width=7,relief="raised",borderwidth=3,bg="red2",fg="black",font=("arial bold ",25),command=reset)
reset_buton.place(x=10,y=466)
exit_buton=Button(display.labelframe,text="Quit",padx=42,height=1,width=7,relief="raised",borderwidth=3,bg="red2",fg="black",font=("arial bold ",25),command=end)
exit_buton.place(x=250,y=466)
label=Label(display.labelframe,text='',padx=4,pady=4,height=5,width=30,relief="raised",bg="red",fg="black",font=("arial bold ",20))
label.place(x=0,y=550)
display.labelx=Label(display.labelframe,text=txt,padx=30,height=4,width=21,relief="raised",borderwidth=1,bg="chocolate1",fg="black",font=("arial bold ",25))
display.labelx.place(x=14,y=557)
##END display
### END Graphics
### Selection Algorithm
##select
def select(l,but,bg_color):
global actual_score,score_made
mb=main.butons
global prev_beg_color,prev_end_color,score_word
butonlist=[]
for x in range(len(mb)):
l=mb[x].b
butonlist.append(l)
letterlist=[]
for x in range(len(mb)):
l=mb[x].a.get()
letterlist.append(l)
letter=np.asarray(letterlist)
letter=letter.reshape(10,10)
global g,beg_row,beg_col
buton=np.asarray(butonlist)
buton=buton.reshape(10,10)
#start
global i,m
for row in range(10):
for col in range(10):
if(str(but)==str(buton[row][col])):
if i%2==0:
beg_row=row
beg_col=col
possiblebtn(buton,row,col)
i+=1
prev_beg_color=bg_color
else:
end_row=row
end_col=col
i+=1
prev_end_color=bg_color
buton[row][col].configure(bg="limegreen",fg="black")
x=Check_Word(buton,beg_row,beg_col,end_row,end_col)
#Check_Database
global words,count
if x in words and x not in score_word:
winsound.PlaySound("word.wav",winsound.SND_ASYNC)
m=x
count+=1
#print(x)
score_word.append(x)
score_made+=len(x)
global r,grn,b,fnl_color
if int(grn)<100:
grn='999'
fnl_color='#'+r+grn+b
grn=str((int(grn)-100))
else:
winsound.PlaySound("wrong_beep.wav",winsound.SND_ASYNC)
buton[row][col].configure(bg=prev_end_color,fg="black")
buton[beg_row][beg_col].configure(bg=prev_beg_color,fg="black")
display()
##END select
##possible button
def possiblebtn(buton,row,col):
for x in range(10):
for y in range(10):
if original_letters[x*10+y]=='.....':
buton[x][y].configure(bg="gray79",fg="black")
if i%2==0:
#fgcolor="white"
buton[row][col].configure(bg="red")
##END possible button
##check word
def Check_Word(buton,beg_row,beg_col,end_row,end_col):
global count,words,score_word
word=""
mb=main.butons
color=['#fff999000','#fff888000','#fff777000','#fff666000','#fff555000','#fff444000','#fff333000']
b=count%7
bg_color=color[b]
if beg_row==end_row:
for x in range(beg_col,end_col+1):
o=mb[beg_row*10+x].a.get()
word+=o
if (word in words) and (word not in score_word):
for x in range(beg_col,end_col+1):
buton[beg_row][x].configure(bg=bg_color,fg="black")
elif beg_col==end_col:
for x in range(beg_row,end_row+1):
o=mb[x*10+beg_col].a.get()
word+=o
if (word in words) and (word not in score_word):
for x in range(beg_row,end_row+1):
buton[x][beg_col].configure(bg=bg_color,fg="black")
elif ((end_row-beg_row)==(end_col-beg_col)):
y=beg_row
for x in range(beg_col,end_col+1):
o=mb[y*10+x].a.get()
word+=o
y+=1
y=beg_row
if (word in words) and (word not in score_word):
for x in range(beg_col,end_col+1):
buton[y][x].configure(bg=bg_color,fg="black")
y+=1
if word!="":
return word
##END check word
###END Selection Algorithm | {"/MAIN_FILE_If_Using_Idle.py": ["/beta1.py", "/homepage.py"]} |
43,284 | HarryThuku/News-Highlight | refs/heads/master | /app/requests.py | import urllib.request, json
from .models import Source, Article
import ssl
api_key = None
sources_url = None
top_headlines_url = None
news_by_sources = None
search_url = None
context = ssl._create_unverified_context()
def configure_requests(app):
'''
'''
global sources_url, top_headlines_url, api_key, news_by_sources, search_url
sources_url = app.config['SOURCES_URL']
top_headlines_url = app.config['TOP_HEADLINES_URL']
api_key = app.config['API_KEY']
news_by_sources = app.config['NEWS_BY_SOURCES_URL']
search_url = app.config['SEARCH_URL']
def get_sources():
'''
'''
url = sources_url.format(api_key)
with urllib.request.urlopen(url, context=context) as response:
data = response.read()
data = json.loads(data)
sources = []
if data['sources']:
sources_list = data['sources']
sources = process_sources(sources_list)
return sources
def search_news(keyword):
url = search_url.format(keyword,api_key)
'''
'''
with urllib.request.urlopen(url, context=context) as response:
data = json.loads(response.read())
articles = []
if data['articles']:
article_list = data['articles']
articles = process_articles(article_list)
return articles
def process_sources(source_list):
'''
'''
sources = []
for source in source_list:
id = source.get('id')
name = source.get('name')
description = source.get('description')
url = source.get('url')
category = source.get('category')
language = source.get('language')
country = source.get('country')
if language =='en':
news_source = Source( id, name, description, url, category, language, country )
sources.append(news_source)
return sources
def get_article(category):
'''
'''
url = top_headlines_url.format(category, api_key)
with urllib.request.urlopen(url, context=context) as response:
data = json.loads(response.read())
articles = []
if data['articles']:
article_list = data['articles']
articles = process_articles(article_list)
return articles
def get_article_source(sources):
'''
'''
url = news_by_sources.format(sources, api_key)
with urllib.request.urlopen(url, context=context) as response:
data = json.loads(response.read())
articles = []
if data['articles']:
article_list = data['articles']
articles = process_articles(article_list)
return articles
def process_articles(article_list):
'''
'''
articles = []
for article in article_list:
source = article.get('source')
author = article.get('author')
title = article.get('title')
description = article.get('description')
url = article.get('url')
urlToImage = article.get('urlToImage')
publishedAt = article.get('publishedAt')
content = article.get('content')
if urlToImage:
article_source = Article(source, author, title, description, url, urlToImage, publishedAt, content)
articles.append(article)
return articles | {"/app/main/views.py": ["/app/requests.py"]} |
43,285 | HarryThuku/News-Highlight | refs/heads/master | /app/main/errors.py | from flask import render_template
from . import main
@main.errorhandler(404)
def four_Ow_four(error):
return render_template('error.html'),404
| {"/app/main/views.py": ["/app/requests.py"]} |
43,286 | HarryThuku/News-Highlight | refs/heads/master | /app/main/views.py | from flask import render_template, request, redirect, url_for
from . import main
from ..requests import get_sources, get_article, get_article_source, search_news
@main.route('/')
def index():
title = 'Home | News Highlights'
sources = get_sources()
general_news = get_article('general')
return render_template('index.html', title = title, sources = sources, general_news = general_news)
@main.route('/sources/<id>')
def sources_route(id):
title = id
source_data = get_article_source(id)
sources = get_sources()
news_source = None
for source in sources:
if source.id == id:
news_source = source
return render_template('sources.html',title=title, news_source = news_source, source_data = source_data)
@main.route('/search/<key_word>')
def sources_search(key_word):
searched_news = search_news(key_word)
title = f'search results for {key_word}'
return render_template('search.html',title=title, articles = search_news)
| {"/app/main/views.py": ["/app/requests.py"]} |
43,287 | HarryThuku/News-Highlight | refs/heads/master | /config.py | import os
class Config:
'''
'''
SOURCES_URL = 'https://newsapi.org/v2/sources?language=en&apiKey={}'
NEWS_BY_SOURCES_URL = 'https://newsapi.org/v2/top-headlines?sources={}&apiKey={}'
TOP_HEADLINES_URL = 'https://newsapi.org/v2/top-headlines?country=us&category={}&apiKey={}' # it displays news items filtered by category. should be used in the home-page.
SEARCH_URL = 'https://newsapi.org/v2/everything?q={}apiKey={}'
API_KEY = os.environ.get('API_KEY')
class DevConfig(Config):
'''
'''
DEBUG = True
class ProdConfig(Config):
'''
'''
pass
config_options = {
'development': DevConfig,
'production': ProdConfig
} | {"/app/main/views.py": ["/app/requests.py"]} |
43,288 | hui2018/CopyFileFromExcelPath | refs/heads/master | /runHemoTool.py | import os
def runHemo():
filePath = 'C:\\Users\\Jack\\Desktop\\des'
arr = os.listdir(filePath)
for a in range(len(arr)):
#os.system('hemo exe location -csv ' + "path of the .csv files arr[a]")
print(arr[a])
#os.system('C:\\Users\\Jack\\Desktop\\test\\test.xlsx') | {"/readFile.py": ["/runHemoTool.py"]} |
43,289 | hui2018/CopyFileFromExcelPath | refs/heads/master | /readFile.py | import pandas as pd
import shutil as st
import runHemoTool as hemo
def main():
df = pd.read_excel('C:\\Users\\Jack\\Desktop\\test\\test.xlsx', sheet_name='Sheet2')
mylist = df['A'].tolist()
for a in range(len(mylist)):
splitting = mylist[a].split('\\')
fileName = splitting[len(splitting)-1]
st.copyfile(mylist[a], 'C:\\Users\\Jack\\Desktop\\des\\' + fileName)
hemo.runHemo()
if __name__ == "__main__":
main()
| {"/readFile.py": ["/runHemoTool.py"]} |
43,292 | jordandpflum/TextAnalysis | refs/heads/master | /webScraper.py | from selenium import webdriver, common
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
import pandas as pd
import sys
def edmundWebScraper(url, pages):
"""
Scrape Data from a given forum on Edmund's
:param url: url of page one of Edmund forum
pages: number of pages of forum to scrape
:return: returns a pandas dataframe of comments
"""
# Initialize Driver
driver = webdriver.Chrome('chromedriver.exe', options=chrome_options)
driver.set_page_load_timeout(60)
driver.get(url)
# Create Empty Dataframe
comments = pd.DataFrame()
for i in range(1, pages+1):
sys.stdout.write('\r')
sys.stdout.write('Percent Complete: ' + str(round((i / (pages + 1)) * 100, 2)) + '%' + ', Page: ' + str(i))
sys.stdout.flush()
url = f'{url}/p{i}'
try:
page_comments = scrapePageComments(driver, url)
comments = comments.append(page_comments)
except common.exceptions.TimeoutException:
continue
driver.find_element_by_class_name('Next').click()
return comments
def scrapePageComments(driver, url):
"""
Scrape Data from a given page of a forum on Edmund's
:param driver: webdriver
:return: returns a pandas dataframe of comments
"""
# Create Empty containers for Values
c_dates = []
c_texts = []
c_authors = []
# Update Driver
driver.get(url)
# Retrieve Comments
ul_comments = driver.find_elements_by_xpath('//*[@id="Content"]/div[4]/div[1]/ul')[0]
comments = ul_comments.find_elements_by_tag_name('li')
for comment in comments:
try:
comment_id = comment.get_attribute('id')[8:]
# If Block Quote
if comment.find_elements_by_xpath(f'//*[@id="Comment_{comment_id}"]/div/div[3]/div/div[1]/blockquote'):
element = driver.find_element_by_tag_name('blockquote')
driver.execute_script("""
var element = arguments[0];
element.parentNode.removeChild(element);
""", element)
text = comment.find_element_by_xpath(f'//*[@id="Comment_{comment_id}"]/div/div[3]/div/div[1]').text
# If not block quote
else:
text = comment.find_elements_by_xpath(f'//*[@id="Comment_{comment_id}"]/div/div[3]/div/div[1]')[0].text
date = comment.find_element_by_xpath(f'//*[@id="Comment_{comment_id}"]/div/div[2]/div[2]/span/a/time').get_attribute('datetime')
author = comment.find_element_by_xpath(f'//*[@id="Comment_{comment_id}"]/div/div[2]/div[1]/span[1]/a[2]').text
c_dates.append(date)
c_authors.append(author)
c_texts.append(text)
except IndexError:
continue
except common.exceptions.StaleElementReferenceException:
continue
return {'date': c_dates, 'author': c_authors, 'text': c_texts}
| {"/mianDriver_Jordan.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"], "/mainDriver_Katelyn.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"]} |
43,293 | jordandpflum/TextAnalysis | refs/heads/master | /MDS.py |
def MDS():
return None
| {"/mianDriver_Jordan.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"], "/mainDriver_Katelyn.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"]} |
43,294 | jordandpflum/TextAnalysis | refs/heads/master | /mianDriver_Jordan.py | from webScraper import edmundWebScraper
from replaceModelWBrand import replaceModelWBrand
from MDS import MDS
from calculateLift import calculateLift
from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
import pandas as pd
url = 'https://forums.edmunds.com/discussion/3941/general/x/i-spotted-a-new-insert-make-model-today'
pages = 336
comments = edmundWebScraper(url, pages)
comments.to_csv("results.csv") | {"/mianDriver_Jordan.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"], "/mainDriver_Katelyn.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"]} |
43,295 | jordandpflum/TextAnalysis | refs/heads/master | /mainDriver_Bryant.py | import numpy as np
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from sklearn.manifold import MDS
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(X)
mds = MDS(2,random_state=0)
X_2d = mds.fit_transform(X_scaled)
colors = ['red','green','blue']
plt.rcParams['figure.figsize'] = [7, 7]
plt.rc('font', size=14)
for i in np.unique(data.target):
subset = X_2d[data.target == i]
x = [row[0] for row in subset]
y = [row[1] for row in subset]
plt.scatter(x,y,c=colors[i],label=data.target_names[i])
plt.legend()
plt.show()
| {"/mianDriver_Jordan.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"], "/mainDriver_Katelyn.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"]} |
43,296 | jordandpflum/TextAnalysis | refs/heads/master | /mainDriver_Katelyn.py | from webScraper import edmundWebScraper
from replaceModelWBrand import replaceModelWBrand
from MDS import MDS
from calculateLift import calculateLift | {"/mianDriver_Jordan.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"], "/mainDriver_Katelyn.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"]} |
43,297 | jordandpflum/TextAnalysis | refs/heads/master | /calculateLift.py |
def calculateLift():
return None
| {"/mianDriver_Jordan.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"], "/mainDriver_Katelyn.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"]} |
43,298 | jordandpflum/TextAnalysis | refs/heads/master | /replaceModelWBrand.py |
def replaceModelWBrand(tokens, wordBrandCSV):
"""
Replace Model Occurance with Brand
:param tokens: list of all important words in comments
:param wordBrandCSV: CSV of word-brand association
:return: a list of allBrands mentioned in the comments
"""
brandWordAssociation = pd.read_csv('wordBrandCSV')
brandWords = brandWordAssociation['Model']
brandWordAssociation.set_index('Model')
brandWordAssociation = brandWordAssociation.to_dict()
for i in range(len(tokens)):
word = tokens[i].lower()
if word in brandWords:
tokens[i] = brandWordAssociation[word]
return tokens
| {"/mianDriver_Jordan.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"], "/mainDriver_Katelyn.py": ["/webScraper.py", "/replaceModelWBrand.py", "/MDS.py", "/calculateLift.py"]} |
43,303 | ericlearning/inverse-kinematics | refs/heads/main | /utils/graphics.py | import torch
import arcade
import torch.nn as nn
import torch.optim as optim
from arcade.key import *
from .visualization import draw_arm
from .losses import mseloss, constraint
keys_inc = [Q, W, E, R, T, Y, U, I, O]
keys_dec = [A, S, D, F, G, H, J, K, L]
class Kinematics(arcade.Window):
def __init__(self, w, h, title, all_thetas, all_arms):
super().__init__(w, h, title)
arcade.set_background_color(arcade.color.WHITE)
self.w = w
self.h = h
self.all_thetas = all_thetas
self.all_arms = all_arms
self.increments = torch.zeros((len(all_thetas)))
self.arm = None
def update_arm(self):
self.arm, self.cur_coord = draw_arm(
self.all_thetas, self.all_arms, self.w, self.h)
def setup(self):
self.update_arm()
def on_draw(self):
arcade.start_render()
self.arm.draw()
def on_update(self, _):
with torch.no_grad():
self.all_thetas += self.increments
if self.increments.any():
self.update_arm()
def set_increment(self, symbol, values):
if symbol in keys_inc:
idx = keys_inc.index(symbol)
if idx < len(self.increments):
self.increments[idx] = values[0]
if symbol in keys_dec:
idx = keys_dec.index(symbol)
if idx < len(self.increments):
self.increments[idx] = values[1]
def on_key_press(self, symbol, _):
self.set_increment(symbol, [0.1, -0.1])
def on_key_release(self, symbol, _):
self.set_increment(symbol, [0, 0])
class InvKinematics(Kinematics):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.all_thetas = nn.Parameter(self.all_thetas)
self.optim = optim.Adam([self.all_thetas], 0.02)
self.target_coord = None
def on_mouse_motion(self, x, y, _, __):
self.target_coord = (x-self.w/2, y-self.h/2)
def on_update(self, _):
with torch.no_grad():
self.all_thetas += self.increments
self.all_thetas %= (3.1415 * 2)
self.update_arm()
if not self.increments.any() and self.target_coord is not None:
self.optim.zero_grad()
loss = mseloss(self.cur_coord, self.target_coord, self.w, self.h)
loss.backward()
self.optim.step()
class InvKinematicsConstraint(Kinematics):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.all_thetas = nn.Parameter(self.all_thetas)
self.optim = optim.Adam([self.all_thetas], 0.02)
self.target_coord = None
self.straight = 0.1
def on_mouse_motion(self, x, y, _, __):
self.target_coord = (x-self.w/2, y-self.h/2)
def on_update(self, _):
with torch.no_grad():
self.all_thetas += self.increments
self.all_thetas %= (3.1415 * 2)
self.update_arm()
if not self.increments.any() and self.target_coord is not None:
self.optim.zero_grad()
loss_mse = mseloss(self.cur_coord, self.target_coord, self.w, self.h)
loss_con = constraint(self.all_thetas[1:])
loss = loss_mse + loss_con * self.straight
loss.backward()
self.optim.step()
def on_key_press(self, symbol, _):
if symbol == A:
self.straight += 0.1
print(self.straight)
elif symbol == S:
self.straight -= 0.1
print(self.straight) | {"/utils/graphics.py": ["/utils/visualization.py", "/utils/losses.py"], "/vis_inv_constraint.py": ["/utils/graphics.py"], "/vis_kinematics.py": ["/utils/graphics.py"], "/utils/visualization.py": ["/utils/kinematics.py"]} |
43,304 | ericlearning/inverse-kinematics | refs/heads/main | /vis_inv_constraint.py | import math
import torch
import arcade
import torch.nn as nn
from utils.graphics import InvKinematicsConstraint
WIDTH = 800
HEIGHT = 800
COUNT = 200
all_thetas = torch.rand((COUNT,)) * (3.1415 * 2)
all_arms = torch.rand((COUNT,)) * (15) + 5
all_thetas = torch.full((COUNT,), 0, dtype=torch.float32)
all_arms = torch.full((COUNT,), 5, dtype=torch.float32)
# all_arms = torch.linspace(20, 1, COUNT, dtype=torch.float32)
# all_arms = 10 ** torch.linspace(
# math.log(40, 10), math.log(10, 10),
# COUNT, dtype=torch.float32)
# all_arms = 10 ** torch.linspace(
# math.log(10, 10), math.log(40, 10),
# COUNT, dtype=torch.float32)
print(all_arms)
def main():
window = InvKinematicsConstraint(
w=WIDTH,
h=HEIGHT,
title='Kinematics',
all_thetas=all_thetas,
all_arms=all_arms
)
window.setup()
arcade.run()
if __name__ == '__main__':
main() | {"/utils/graphics.py": ["/utils/visualization.py", "/utils/losses.py"], "/vis_inv_constraint.py": ["/utils/graphics.py"], "/vis_kinematics.py": ["/utils/graphics.py"], "/utils/visualization.py": ["/utils/kinematics.py"]} |
43,305 | ericlearning/inverse-kinematics | refs/heads/main | /vis_kinematics.py | import torch
import arcade
from utils.graphics import Kinematics
WIDTH = 800
HEIGHT = 800
COUNT = 9
all_thetas = torch.rand((COUNT,)) * (3.1415 * 2)
all_arms = torch.rand((COUNT,)) * (50) + 50
def main():
window = Kinematics(
w=WIDTH,
h=HEIGHT,
title='Kinematics',
all_thetas=all_thetas,
all_arms=all_arms
)
window.setup()
arcade.run()
if __name__ == '__main__':
main() | {"/utils/graphics.py": ["/utils/visualization.py", "/utils/losses.py"], "/vis_inv_constraint.py": ["/utils/graphics.py"], "/vis_kinematics.py": ["/utils/graphics.py"], "/utils/visualization.py": ["/utils/kinematics.py"]} |
43,306 | ericlearning/inverse-kinematics | refs/heads/main | /utils/visualization.py | import torch
import arcade
from skimage.color import hsv2rgb
from .kinematics import forward_kinematics, forward_kinematics_all
@torch.no_grad()
def draw_arm_naive(thetas, arms, w, h):
prev_x, prev_y = 0, 0
shapes = arcade.ShapeElementList()
for i in range(1, len(thetas)+1):
joint = arcade.create_ellipse_filled(
prev_x+w//2, prev_y+h//2, 5, 5, arcade.color.BLACK)
x, y = forward_kinematics(thetas[:i], arms[:i])
x = int(x)
y = int(y)
line = arcade.create_line(
x+w//2, y+h//2, prev_x+w//2, prev_y+h//2,
arcade.color.RED, 3)
prev_x, prev_y = x, y
shapes.append(joint)
shapes.append(line)
return shapes
def draw_arm(thetas, arms, w, h, draw_joints=False):
prev_x, prev_y = 0, 0
shapes = arcade.ShapeElementList()
all_x, all_y = forward_kinematics_all(thetas, arms)
for i, cur_theta in enumerate(thetas):
if draw_joints:
joint = arcade.create_ellipse_filled(
prev_x+w//2, prev_y+h//2, 5, 5, arcade.color.BLACK)
shapes.append(joint)
x, y = all_x[i], all_y[i]
x = int(x)
y = int(y)
color = int(abs(float(cur_theta) - 3.1415) / 3.1415 * 255)
line = arcade.create_line(
x+w//2, y+h//2, prev_x+w//2, prev_y+h//2, (color, 0, 0), 3)
prev_x, prev_y = x, y
shapes.append(line)
return shapes, (all_x[-1], all_y[-1]) | {"/utils/graphics.py": ["/utils/visualization.py", "/utils/losses.py"], "/vis_inv_constraint.py": ["/utils/graphics.py"], "/vis_kinematics.py": ["/utils/graphics.py"], "/utils/visualization.py": ["/utils/kinematics.py"]} |
43,307 | ericlearning/inverse-kinematics | refs/heads/main | /utils/losses.py | import torch
def mseloss(coord1, coord2, w=None, h=None):
if w is None or h is None:
return (coord1[0] - coord2[0]) ** 2 + (coord1[1] - coord2[1]) ** 2
else:
return ((coord1[0] - coord2[0]) / w) ** 2 + ((coord1[1] - coord2[1]) / h) ** 2
def constraint(thetas):
return -torch.abs(thetas - 3.1415).mean() | {"/utils/graphics.py": ["/utils/visualization.py", "/utils/losses.py"], "/vis_inv_constraint.py": ["/utils/graphics.py"], "/vis_kinematics.py": ["/utils/graphics.py"], "/utils/visualization.py": ["/utils/kinematics.py"]} |
43,308 | ericlearning/inverse-kinematics | refs/heads/main | /utils/kinematics.py | import torch
def forward_kinematics(thetas, arms):
thetas = thetas.reshape(1, -1)
thetas_acc = thetas @ torch.triu(torch.ones((len(arms), len(arms))))
thetas_acc = thetas_acc.flatten()
x = torch.cos(thetas_acc) @ arms
y = torch.sin(thetas_acc) @ arms
return x, y
def forward_kinematics_all(thetas, arms):
thetas = thetas.reshape(1, -1)
thetas_acc = thetas @ torch.triu(torch.ones((len(arms), len(arms))))
thetas_acc = thetas_acc.flatten()
x = (torch.cos(thetas_acc) * arms).reshape(1, -1)
y = (torch.sin(thetas_acc) * arms).reshape(1, -1)
all_x = x @ torch.triu(torch.ones((len(arms), len(arms))))
all_y = y @ torch.triu(torch.ones((len(arms), len(arms))))
return all_x[0], all_y[0] | {"/utils/graphics.py": ["/utils/visualization.py", "/utils/losses.py"], "/vis_inv_constraint.py": ["/utils/graphics.py"], "/vis_kinematics.py": ["/utils/graphics.py"], "/utils/visualization.py": ["/utils/kinematics.py"]} |
43,309 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command/clean_networks.py | import argparse
from autocompose.cleaner import clean_networks
from .command import Command
__parser = argparse.ArgumentParser(prog="autocompose clean-networks", description='Remove all Docker networks.')
clean_networks_command = Command(__parser, clean_networks)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,310 | rapid7/autocompose | refs/heads/master | /autocompose/updater.py | from .authenticator import get_authorization_data
from .util import print_docker_output
def update_images(aws_session, docker_client, **kwargs):
"""
Updates any Docker images from ECR.
:param aws_session:
:param docker_client:
:param kwargs:
:return:
"""
print('Updating ECR Docker images...')
authorization_data = get_authorization_data(aws_session)
repo_tag = authorization_data['proxyEndpoint'].replace('https://', '')
for image in docker_client.images(all=True):
if image['RepoTags'] is not None:
for tag in image['RepoTags']:
if tag.startswith(repo_tag):
print_docker_output(docker_client.pull(tag, stream=True))
print('Done.')
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,311 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command/login.py | import argparse
from autocompose.authenticator import login_to_ecs
from .command import Command
__parser = argparse.ArgumentParser(prog="autocompose login", description='Login to ECR.')
login_command = Command(__parser, login_to_ecs)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,312 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command/path.py | import argparse
from autocompose.util import print_paths
from .command import Command
__parser = argparse.ArgumentParser(prog="autocompose path", description='Print the autocompose paths.')
path_command = Command(__parser, print_paths)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,313 | rapid7/autocompose | refs/heads/master | /tests/test_util.py | import unittest
from autocompose.util import Util
class TestReplaceTemplateVariables(unittest.TestCase):
def test_replace(self):
self.assertEqual(4, Util.replace_template_variables(3, {3: 4}))
self.assertEqual([1, 2, 4], Util.replace_template_variables([1, 2, 3], {3: 4}))
self.assertEqual({1: 2, 3: 5}, Util.replace_template_variables({1: 2, 3: '4'}, {'4': 5}))
self.assertRaises(TypeError, Util.replace_template_variables, [3, 'not a dictionary'])
class TestDeepMerge(unittest.TestCase):
def test_1(self):
a = {'a': '1'}
b = {'b': '2'}
self.assertEqual({'a': '1', 'b': '2'}, Util.deep_merge(a, b))
a = {'a': '1'}
c = {'a': '2'}
self.assertEqual(c, Util.deep_merge(a, c))
self.assertEqual([1, 2, 3, 4, 5], Util.deep_merge([1, 2, 3], [3, 4, 5]))
self.assertEqual(4, Util.deep_merge(5, 4))
d = {'a': 1, 'b': [1, 2, 3, 4, 5], 'c': {'a': 1, 'b': [1, 2, 3, 4, 5]}}
e = {'d': 2, 'b': [6, 7], 'c': {'c': 1, 'd': [1, 2]}}
merged = {'a': 1, 'd': 2, 'b': [1, 2, 3, 4, 5, 6, 7], 'c': {'a': 1, 'b': [1, 2, 3, 4, 5], 'c': 1, 'd': [1, 2]}}
self.assertEqual(merged, Util.deep_merge(d, e))
if __name__ == '__main__':
unittest.main()
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,314 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command/compose.py | import argparse
from autocompose.composer import print_compose_file
from .command import Command
__parser = argparse.ArgumentParser(prog="autocompose compose", description='Create a docker-compose.yml file.')
__parser.add_argument(dest='scenarios', nargs='*', help='Scenarios and/or services.')
compose_command = Command(__parser, print_compose_file)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,315 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command/clean_containers.py | import argparse
from autocompose.cleaner import clean_containers
from .command import Command
__parser = argparse.ArgumentParser(prog="autocompose clean-containers", description='Remove all Docker containers.')
clean_containers_command = Command(__parser, clean_containers)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,316 | rapid7/autocompose | refs/heads/master | /autocompose/pusher.py | from .authenticator import get_authorization_data
from .util import *
def tag_to_ecr(aws_session, docker_client, tag):
if tag is None or tag is '':
tag = 'latest'
service_name = get_service_name()
repo = __get_docker_repository_name(aws_session, service_name)
full_tag = service_name + ':' + tag
image = __get_docker_image(docker_client, full_tag)
# Tag to the ECR repo
try:
docker_client.tag(repository=repo, image=image, tag=tag)
except BaseException as e:
print(e)
raise Exception('An error occurred when tagging the image "' + image + '" with the tag "' + full_tag + '".')
def push_to_ecs(aws_session, docker_client, image_name=None, tag=None):
"""
Pushes the docker image represented by the current directory up to AWS's ECR.
:param aws_session: The AWS session.
:param docker_client: The Docker client
:param image_name The name of the image.
:param tag: The tag to apply to the Docker image. Default is latest.
:return:
"""
if image_name is None:
image_name = get_service_name()
print('image_name: ' + image_name)
if tag is None:
tag = 'latest'
repo = __get_docker_repository_name(aws_session, image_name)
full_tag = image_name + ':' + tag
image = __get_docker_image(docker_client, full_tag)
# Push to the ecs repo
print('Pushing the image "' + full_tag + '" up to "' + repo + '"...')
try:
docker_client.tag(repository=repo, image=image, tag=tag)
except BaseException as e:
print(e)
raise Exception('An error occurred when tagging the image "' + image + '" with the tag "' + full_tag + '".')
try:
print_docker_output(docker_client.push(repository=repo, stream=True, tag=tag))
except BaseException as e:
print(e)
raise Exception('An error occurred when pushing "' + full_tag + '" to ECR.')
print('The image "' + full_tag + '" has now been pushed up to "' + repo + '".')
def __get_docker_image(docker_client, repo_tag):
images = docker_client.images()
for image in images:
tags = image['RepoTags']
if tags is not None and repo_tag in tags:
return image
raise Exception('Could not find image')
def __get_docker_repository_name(aws_session, service_name):
url = get_authorization_data(aws_session)['proxyEndpoint']
return url.replace('https://', '') + '/' + service_name
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,317 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command/update_images.py | import argparse
from autocompose.updater import update_images
from .command import Command
__parser = argparse.ArgumentParser(prog="autocompose update-images", description='Update images.')
update_images_command = Command(__parser, update_images)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,318 | rapid7/autocompose | refs/heads/master | /autocompose/cleaner.py | import docker
from docker import errors
def clean_containers(docker_client, **kwargs):
"""
Removes all containers from the local machine.
:param docker_client: The Docker client
:return: None.
"""
images = docker_client.containers(all=True)
print('Killing and removing all Docker containers...')
for image in images:
try:
docker_client.remove_container(image, force=True)
except docker.errors.NotFound:
pass
print('Done.')
def clean_images(docker_client, **kwargs):
"""
Removes all docker images from the local machine.
:param docker_client: The Docker client
:return: None.
"""
print('Removing all Docker images...')
empty = False
while not empty:
empty = True
for image in docker_client.images(all=True):
try:
docker_client.remove_image(image, force=True)
except docker.errors.NotFound:
pass
except docker.errors.APIError:
print('Could not remove image "' + image['Id'] + '"')
print('Done.')
def clean_networks(docker_client, **kwargs):
"""
Remove all docker networks from the local machine.
:param docker_client: The Docker client
:return:
"""
print('Removing all non-default Docker networks...')
for network in docker_client.networks():
if network['Name'] not in ['bridge', 'host', 'none']:
try:
docker_client.remove_network(net_id=network['Id'])
except docker.errors.APIError:
print('Could not remove network "' + network['Name'] + '"')
print('Done.')
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,319 | rapid7/autocompose | refs/heads/master | /autocompose/authenticator.py | import base64
import json
from botocore.exceptions import ClientError as BotoClientError
from .util import *
# Config directory for Docker
__docker_config_directory = os.path.join(os.environ['HOME'], '.docker')
# Docker config file
__docker_config_file = os.path.join(__docker_config_directory, 'config.json')
def login_to_ecs(aws_session, docker_client, **kwargs):
"""
Logs in to AWS's Docker Registry.
:param aws_session: The AWS session.
:param docker_client: The Docker client
:return: None
"""
print('Getting authorization data from AWS...')
try:
authorization_data = get_authorization_data(aws_session)
except Exception as e:
raise Exception('Unable to login to ECR. Make sure AWS credentials are set and valid.')
# Get the authorization token. It contains the username and password for the ECR registry.
if 'authorizationToken' not in authorization_data:
raise Exception('Authorization data is missing an "authorizationToken" (docker registry password)')
authorization_token = authorization_data['authorizationToken']
# Get the proxy endpoint. It's the URL for the ECR registry.
if 'proxyEndpoint' not in authorization_data:
raise Exception('Authorization data is missing a "proxyEndpoint" (docker registry url)')
registry = authorization_data['proxyEndpoint']
# Get the username and password from the authorization token.
decoded = base64.b64decode(authorization_token).decode('utf-8')
username, password = decoded.split(':')
# Log in to the registry
print('Logging into ECR Registry "' + registry + '"...')
try:
result = docker_client.login(username=username, password=password, registry=registry, reauth=True)
except BaseException as e:
print(e)
raise Exception('Error logging into ECR')
if 'Status' not in result or not result['Status'] == 'Login Succeeded':
raise Exception('Error logging into ECR')
# The boto3 login function does not save the authorization token.
# So here we save it manually. to ${HOME}/.docker/config.json
print('Saving Docker login to "' + __docker_config_file + '"...')
__save_docker_login(registry, authorization_token)
if registry.startswith("https://"):
__save_docker_login(registry[len("https://"):], authorization_token)
print('Login Succeeded. You can can push to and pull from "' + registry + '".')
def get_authorization_data(aws_session):
"""
Retrieve authorization data for ECR from AWS.
See http://boto3.readthedocs.io/en/latest/reference/services/ecr.html#ECR.Client.get_authorization_token
:param aws_session: The AWS session.
:return: The first element in the authorizationData array.
"""
aws_client = aws_session.client('ecr')
try:
response = aws_client.get_authorization_token()
except BotoClientError:
raise Exception('Unable to get a login via the AWS client. Have you ran \'autocompose login\' ?')
if 'authorizationData' not in response:
raise Exception('Unable to get a login via the AWS client. Have you ran \'autocompose login\' ?')
authorization_data = response['authorizationData']
if len(authorization_data) == 0:
raise Exception('Authorization data was empty. ')
return authorization_data[0]
def __save_docker_login(registry, authorization_token):
"""
Persist authorization for a Docker registry to the Docker config file.
:param registry: The name of the Docker registry
:param authorization_token: The authorization token which contains the username and password.
:return: None
"""
if os.path.exists(__docker_config_file):
with open(__docker_config_file, 'r') as fd:
config = json.load(fd)
else:
config = {}
if 'auths' not in config:
config['auths'] = {}
if not os.path.exists(__docker_config_directory):
os.mkdir(__docker_config_directory)
config['auths'][registry] = {'auth': authorization_token}
with open(__docker_config_file, 'w+') as fd:
json.dump(config, fd)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,320 | rapid7/autocompose | refs/heads/master | /autocompose/constants.py | # Constants used throughout autocompose.
AUTOCOMPOSE_VERSION_KEY = 'autocompose-version'
AUTOCOMPOSE_BUILD_VERSION_KEY = 'autocompose-build-version'
AUTOCOMPOSE_UP_VERSION_KEY = 'autocompose-up-version'
AUTOCOMPOSE_IMAGE_KEY = 'autocompose-image'
AUTOCOMPOSE_TEMPLATES_KEY = 'autocompose-templates'
AUTOCOMPOSE_SERVICE_FILE = 'service.yml'
AUTOCOMPOSE_SCENARIO_FILE = 'scenario.yml'
DOCKERFILE = 'Dockerfile'
DOCKER_COMPOSE_FILE = 'docker-compose.yml'
DOCKER_COMPOSE_SERVICES_FILE = 'docker-compose-service.yml'
DOCKERFILE_SH = 'Dockerfile.sh'
TEMPLATE_VARIABLES_KEY = 'template-variables'
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,321 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command/clean_images.py | import argparse
from autocompose.cleaner import clean_images
from .command import Command
__parser = argparse.ArgumentParser(prog="autocompose clean-images", description='Remove all Docker images.')
clean_images_command = Command(__parser, clean_images)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,322 | rapid7/autocompose | refs/heads/master | /autocompose/builder.py | import shutil
import subprocess
from .constants import *
from .pusher import tag_to_ecr
from .util import *
def build(aws_session, docker_client, image_name=None, tag=None):
"""
Builds a docker image from the current directory.
:param aws_session: The AWS session.
:param docker_client: The Docker client
:param image_name The name of the image.
:param tag: The tag to apply to the Docker image. Default is latest.
:return:
"""
service_name = get_service_name()
print('Looking for the location of the service "' + service_name + '" in the AUTOCOMPOSE_PATH...')
autocompose_config_file = get_first_from_paths(os.path.join('services', service_name), AUTOCOMPOSE_SERVICE_FILE)
autocompose_config = yaml.load(open(autocompose_config_file, 'r'))
# Get the name of the image
if AUTOCOMPOSE_IMAGE_KEY not in autocompose_config:
raise Exception('No Autocompose image specified')
image = autocompose_config[AUTOCOMPOSE_IMAGE_KEY]
print('The service "' + service_name + '" wants to use the image "' + image + '".')
# Find the directory where the image recipe resides
print('Looking for the location of the image "' + image + '" in the AUTOCOMPOSE_PATH...')
image_path = __get_image_path(image)
if image_path is None:
raise Exception('Could not find the image ' + image)
print('Using the path "' + image_path + '"')
# Copy files from the recipe to the current directory
print('Copying files from "' + image_path + '" to your current directory...')
copied_files = __copy_files(image_path)
# If the Dockerfile.sh file exists, execute it
if os.path.exists(DOCKERFILE_SH):
print(DOCKERFILE_SH + ' exists. Executing...')
try:
subprocess.call(['bash', DOCKERFILE_SH])
except BaseException as e:
print(e)
__fail(copied_files)
raise Exception('An error occurred while executing Dockerfile.sh')
print('Dockerfile.sh executed successfully.')
# Execute 'docker build .'
if image_name is None:
image_name = service_name
if tag is None:
repo_tag = image_name
else:
repo_tag = image_name + ':' + tag
print('Calling "docker build ." (and tagging image with "' + repo_tag + '")')
try:
__build_docker_image(docker_client, path='.', tag=repo_tag)
except BaseException as e:
print(e)
__fail(copied_files)
raise Exception('An error occurred when running "docker build .". Make sure the Dockerfile is correct.')
print('Image built successfully.')
# Cleanup copied files
print('Cleaning up copied files...')
__cleanup(copied_files)
print('Tagging image with ECR repository...')
tag_to_ecr(aws_session, docker_client, tag)
print('Image tagged.')
def __get_image_path(image_name):
"""
Search for docker image recipes in the autocompose path directories.
The first path is always the first one returned.
:param image_name: The name of the image.
:return: The path to the docker image recipe, None if it wasn't found.
"""
images = get_from_paths('images', image_name)
if len(images) < 1:
return None
return images[0]
def __copy_files(image_path):
"""
Copies files from a docker image recipe path to the current path.
:param image_path: The docker image recipe path.
:return: A list of the files which were copied. (Absolute file names)
"""
files = os.listdir(path=image_path)
copied_files = []
for file in files:
if os.path.exists(file):
print(' - Did not copy "' + file + '" because a file of the same name already exists')
else:
print(' - "' + file + '"')
copied_files.append(file)
shutil.copy(os.path.join(image_path, file), file)
return copied_files
def __cleanup(copied_files):
"""
Clean up any copied files.
:param copied_files: A list of copied files to be deleted.
:return: Nothing.
"""
for file in copied_files:
print(' - "' + file + '"')
os.remove(file)
def __build_docker_image(docker_client, path, tag):
"""
Calls 'docker build' with the given path and tag.
:param docker_client: The docker client.
:param path: The path to build.
:param tag: The tag to give the built docker image.
:return: Nothing.
"""
print_docker_output(docker_client.build(path=path, tag=tag, stream=True))
def __fail(copied_files):
"""
Prints a fail message and calls cleanup.
:param copied_files:
:return:
"""
print('An error has occurred.')
print('Cleaning up copied files...')
__cleanup(copied_files)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,323 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command/build.py | import argparse
from autocompose.builder import build
from .command import Command
__parser = argparse.ArgumentParser(prog="autocompose build",
description='Build a Docker image for the current directory.')
__parser.add_argument('--image-name', default=None, help='Image name. Default is the current directory.')
__parser.add_argument('--tag', default='latest', help='Tag to add to the image.')
build_command = Command(__parser, build)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,324 | rapid7/autocompose | refs/heads/master | /autocompose/util.py | import os
import re
import sys
import yaml
from compose import progress_stream
__autocompose_service_name = None
class ExplicitYamlDumper(yaml.SafeDumper):
"""
A yaml dumper that will never emit aliases.
"""
def ignore_aliases(self, data):
return True
def replace_template_variables(obj, terms):
"""
Recursively replaces the values of any keys in obj which are defined in the terms dictionary.
Terms must be a dictionary.
:param obj: Any object.
:param terms: A dictionary of values to replace.
:return: the given obj, with any terms replaced.
"""
if not isinstance(terms, dict):
raise TypeError('Terms must be of type dictionary')
if isinstance(obj, dict):
for key, value in obj.items():
new_key = replace_template_variables(key, terms)
new_value = replace_template_variables(value, terms)
if new_key != key:
obj.pop(key)
obj[new_key] = new_value
return obj
elif isinstance(obj, list):
return [replace_template_variables(element, terms) for element in obj]
else:
for key, value in terms.items():
if key in obj:
return obj.replace(key, value)
return obj
def deep_merge(a, b):
"""
Merges b into a, recursively.
This is a special recursive dictionary merge, made specifically for docker compose files.
If a and b are both dictionaries, their keys are recursively merged. Keys in b write over keys in a.
If a and b are both lists, the elements of b are added to a. Duplicate values are removed.
If a and b are any other types, b is returned.
:param a: Any object.
:param b: Any object.
:return: b merged into a.
"""
if isinstance(a, dict) and isinstance(b, dict):
for key in b:
if key in a:
a[key] = deep_merge(a[key], b[key])
else:
a[key] = b[key]
return a
elif isinstance(a, list) and isinstance(b, list):
# Add all elements of b to a
return list(set(a + b))
elif b is None:
return a
else:
# Copy b's value into a.
return b
def get_from_paths(sub_path, file_pattern):
"""
Search through the AUTOCOMPOSE_PATHs for files in the sub-path which match the given file_pattern
:param sub_path: The sub-path to look for files in each autocompose path directory.
:param file_pattern: A pattern to match files.
:return: A list of files.
"""
paths = os.environ['AUTOCOMPOSE_PATH'].split(":")
results = []
for path in paths:
try:
files = os.listdir(path=os.path.join(path, sub_path))
for file in files:
if re.fullmatch(file_pattern, file):
results.append(os.path.join(path, sub_path, file_pattern))
except FileNotFoundError:
pass
return results
def get_first_from_paths(sub_path, file_pattern):
results = get_from_paths(sub_path, file_pattern)
if len(results) == 0:
raise Exception(
'No file ' + os.path.join(sub_path, file_pattern) + ' was found in any of the autocompose paths.')
return results[0]
def get_all_from_paths(sub_path):
"""
Search through the AUTOCOMPOSE_PATHs for all files in the sub-path.
:param sub_path: The sub-path to look for files in each autocompose path directory.
:return: A list of files.
"""
paths = os.environ['AUTOCOMPOSE_PATH'].split(":")
results = []
for path in paths:
try:
files = os.listdir(path=os.path.join(path, sub_path))
files = [os.path.join(path, sub_path, file) for file in files]
results.extend(files)
except FileNotFoundError:
pass
return results
def print_paths(**kwargs):
"""
Prints the AUTOCOMPOSE_PATH directories to stdout.
:return:
"""
paths = os.environ['AUTOCOMPOSE_PATH'].split(":")
for path in paths:
print(path)
def get_current_directory():
return os.path.basename(os.getcwd())
def get_service_name():
if __autocompose_service_name is None:
return get_current_directory()
return __autocompose_service_name
def set_service_name(autocompose_service_name):
global __autocompose_service_name
__autocompose_service_name = autocompose_service_name
def get_config(directory, sub_directory, file_pattern):
"""
Loads a YAML config from the AUTOCOMPOSE_PATH.
:param directory: The top-level directory name to search.
:param sub_directory: The specific sub-directory.
:param file_pattern: A file pattern to match files in the directory/sub-directory.
:return: The first found config as a dictionary.
"""
configs = get_from_paths(os.path.join(directory, sub_directory), file_pattern)
if len(configs) > 0:
config = yaml.load(open(configs[0]))
else:
config = {}
if config is None:
config = {}
return config
def get_user_config():
user_config_directory = os.path.join(os.environ['HOME'], '.autocompose')
user_config_file = os.path.join(user_config_directory, 'config.yml')
try:
with open(user_config_file, 'r') as file:
user_config = yaml.load(file)
except:
user_config = {}
if user_config is None:
user_config = {}
return user_config
def print_docker_output(stream):
progress_stream.stream_output(stream, sys.stdout)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,325 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command/command.py | class Command(object):
def __init__(self, argument_parser, function):
self.argument_parser = argument_parser
self.function = function
def parse_and_execute(self, args, aws_session, docker_client):
arguments = self.argument_parser.parse_args(args)
self.function(aws_session=aws_session, docker_client=docker_client, **vars(arguments))
pass
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,326 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command/push.py | import argparse
from autocompose.pusher import push_to_ecs
from .command import Command
__parser = argparse.ArgumentParser(prog="autocompose push", description='Push a Docker image to ECR.')
__parser.add_argument('--image-name', default=None, help='Image name. Default is the current directory.')
__parser.add_argument('--tag', default='latest', help='Tag to add to the image.')
push_command = Command(__parser, push_to_ecs)
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,327 | rapid7/autocompose | refs/heads/master | /autocompose/composer.py | from .authenticator import get_authorization_data
from .constants import *
from .util import *
def print_compose_file(aws_session, scenarios, **kwargs):
"""
Prints a generated docker-compose file out to stdout.
:param aws_session: The AWS session.
:param scenarios: The scenarios and/or services.
:return: None
"""
docker_compose_file = build_compose_file(aws_session, scenarios=scenarios)
# Setting default_flow_style = False prints out multi-line arrays.
output = yaml.dump(docker_compose_file, default_flow_style=False, Dumper=ExplicitYamlDumper)
# Fix the array formatting.
output = output.replace('- ', ' - ')
print(output)
def build_compose_file(aws_session, scenarios):
"""
Builds a docker-compose configuration dictionary, given a list of scenarios.
:param aws_session: The aws_session.
:param scenarios: a list of autocompose scenarios.
:return: A docker-compose configuration as a dictionary.
"""
user_config = get_user_config()
# Start with an empty configuration.
docker_compose_config = {}
template_variables = {}
# Merge every scenario into the configuration
for scenario_name in scenarios:
__merge_scenario(aws_session, docker_compose_config, scenario_name, template_variables)
# Look for template variables in the user config
__add_user_config_template_variables(user_config, template_variables)
__apply_template_variables(docker_compose_config, template_variables)
# Default version to 3
if 'version' not in docker_compose_config:
docker_compose_config['version'] = '3'
return docker_compose_config
def __merge_scenario(aws_session, docker_compose_config, scenario_name, template_variables):
"""
Merge the contents of a scenario into the docker-compose config.
:param aws_session: The aws_session.
:param docker_compose_config: The docker-compose config being currently built.
:param scenario_name: The name of the scenario to merge.
:param template_variables: The template variables to add to.
:return:
"""
scenario_config = __get_scenario_config(scenario_name)
service_names = __get_service_names(scenario_config)
deep_merge(template_variables, __get_scenario_template_variables(scenario_config))
# Merge the configs of all services of the scenario
for service_name in service_names:
__merge_service(aws_session, service_name, docker_compose_config)
# Merge the scenario's docker-compose.yml config
scenario_compose_config = __get_scenario_compose_config(scenario_name)
deep_merge(docker_compose_config, scenario_compose_config)
def __get_scenario_config(scenario_name):
"""
Get a scenarios configuration from the AUTOCOMPOSE_PATH.
:param scenario_name: The name of the scenario.
:return: The config of the scenario. {} if the scenario cannot be found.
"""
all_scenarios = get_from_paths('scenarios', scenario_name)
if len(all_scenarios) == 0:
# look for a single service
all_services = get_from_paths(os.path.join('services', scenario_name), AUTOCOMPOSE_SERVICE_FILE)
if len(all_services) < 1:
raise Exception('Could not find the scenario ' + scenario_name)
scenario_config = {'services': [scenario_name]}
else:
scenario_config = yaml.load(open(os.path.join(all_scenarios[0], AUTOCOMPOSE_SCENARIO_FILE)))
if scenario_config is None:
scenario_config = {}
return scenario_config
def __get_service_names(scenario_config):
"""
Gets the list of services from the scenario config.
If no services are given, an empty list is returned.
:param scenario_config: The scenario config.
:return: A list of services. [] if no service names are found.
"""
if 'services' in scenario_config:
service_names = scenario_config['services']
else:
service_names = []
if not isinstance(service_names, list):
raise Exception('"services" is not a list. It must be a list of services')
return service_names
def __merge_service(aws_session, service_name, docker_compose_config):
"""
Merge the contents of a service into the docker-compose config.
:param aws_session: The aws_session.
:param service_name: The name of the service to merge.
:param docker_compose_config: The docker-compose config being currently built.
:return:
"""
service_name, version = __parse_version_from_service_name(service_name)
service_compose_config = __get_docker_compose_config(service_name)
service_config = __get_service_config(service_name)
__add_service(service_compose_config, service_name)
__add_docker_image(aws_session, service_compose_config, service_name, version)
deep_merge(docker_compose_config, service_compose_config)
if AUTOCOMPOSE_TEMPLATES_KEY in service_config:
for template in service_config[AUTOCOMPOSE_TEMPLATES_KEY]:
__apply_template(docker_compose_config, service_name, template)
def __parse_version_from_service_name(service_name):
"""
Parse the actual service name and version from a service name in the "services" list of a scenario.
Scenario services may include their specific version. If no version is specified, 'latest' is the default.
:param service_name: The name of the service
:return: The service name, The service version
"""
if ':' in service_name:
return service_name.split(':')
return service_name, 'latest'
def __add_service(compose_config, service_name):
"""
Adds a service to a docker-compose config.
If the 'services' section does not exist, it is created.
:param compose_config: The docker-compose config
:param service_name: The name of the service.
:return:
"""
if 'services' not in compose_config:
compose_config['services'] = {}
if service_name not in compose_config['services']:
compose_config['services'][service_name] = {}
def __get_docker_image(aws_session, service_name, tag):
"""
Gets the 'image' to be applied to a given service.
:param aws_session: The AWS session.
:param service_name: The name of the service.
:param tag: The tag for the service.
:return: The complete docker image string.
"""
url = get_authorization_data(aws_session)['proxyEndpoint']
return url.replace('https://', '') + '/' + service_name + ':' + tag
def __add_docker_image(aws_session, compose_config, service_name, tag):
"""
Adds the Docker image to a service in a docker-compose config.
The image is only added if an existing image doesn't exist for the service.
:param aws_session: The AWS session.
:param compose_config: The docker-compose config being modified.
:param service_name: The name of the service.
:param tag: The tag to give the service.
:return:
"""
if 'image' not in compose_config['services'][service_name]:
url = __get_docker_image(aws_session, service_name, tag)
__add_service(compose_config, service_name)
compose_config['services'][service_name]['image'] = url
def __get_scenario_template_variables(scenario_config):
"""
Gets the template_variables from a scenario.
:param scenario_config: The scenario.
:return: A dictionary of template_variables.
"""
template_variables = {}
if TEMPLATE_VARIABLES_KEY in scenario_config:
if isinstance(scenario_config[TEMPLATE_VARIABLES_KEY], dict):
for key in scenario_config[TEMPLATE_VARIABLES_KEY]:
template_variables["${" + key + "}"] = scenario_config[TEMPLATE_VARIABLES_KEY][key]
template_variables["$" + key] = scenario_config[TEMPLATE_VARIABLES_KEY][key]
return template_variables
def __get_docker_compose_config(service_name):
"""
Gets the docker-compose config for an autocompose service from the AUTOCOMPOSE_PATH.
:param service_name: The name of the service
:return: The docker-compose config for the service.
"""
return get_config('services', service_name, DOCKER_COMPOSE_FILE)
def __get_scenario_compose_config(scenario_name):
"""
Gets the docker-compose config for an autocompose scenario from the AUTOCOMPOSE_PATH.
:param scenario_name: The name of the scenario
:return: The docker-compose config for the scenario.
"""
return get_config('scenarios', scenario_name, DOCKER_COMPOSE_FILE)
def __get_service_config(service_name):
"""
Gets the autocompose config for an autocompose service from the AUTOCOMPOSE_PATH.
:param service_name: The name of the service.
:return: The config for the service.
"""
return get_config('services', service_name, AUTOCOMPOSE_SERVICE_FILE)
def __apply_template(docker_compose_config, service_name, template):
"""
Applies a template to a given service in a given docker-compose config.
:param docker_compose_config: The docker-compose config being currently built.
:param service_name: The name of the service.
:param template: The template to add to the service in the docker-compose config.
:return:
"""
template_global_config = __get_global_template_config(template)
template_service_config = __get_service_template_config(template)
__add_service(docker_compose_config, service_name)
template_config = {'services': {}}
template_config['services'][service_name] = template_service_config
deep_merge(docker_compose_config, template_global_config)
deep_merge(docker_compose_config, template_config)
def __get_global_template_config(template_name):
"""
Get the global template config from an autocompose template from the AUTOCOMPOSE_PATH.
:param template_name: The name of the template.
:return: The global docker-compose config for the template.
"""
return get_config('templates', template_name, DOCKER_COMPOSE_FILE)
def __get_service_template_config(template_name):
"""
Get the per-service template config from an autocompose template from the AUTOCOMPOSE_PATH.
:param template_name: The name of the template.
:return: The per-service docker-compose config for the template.
"""
return get_config('templates', template_name, DOCKER_COMPOSE_SERVICES_FILE)
def __apply_template_variables(docker_compose_config, template_variables):
"""
Apply the given template variables to the given docker-compose config.
:param docker_compose_config: The docker-compose config being currently built.
:param template_variables: Key-value pairs to replace keys and values in the docker-compose config.
:return:
"""
return replace_template_variables(docker_compose_config, template_variables)
def __add_user_config_template_variables(user_config, template_variables):
if 'template-variables' in user_config:
for key, value in user_config['template-variables'].items():
template_variables["${" + key + "}"] = value
template_variables["$" + key] = value
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,328 | rapid7/autocompose | refs/heads/master | /autocompose/command_line/command_line.py | #!/usr/bin/env python3
import argparse
import os
import sys
import boto3
import docker
from .command.build import build_command
from .command.clean_containers import clean_containers_command
from .command.clean_images import clean_images_command
from .command.clean_networks import clean_networks_command
from .command.compose import compose_command
from .command.login import login_command
from .command.path import path_command
from .command.push import push_command
from .command.update_images import update_images_command
from ..util import set_service_name
commands = {
'build': build_command,
'clean-containers': clean_containers_command,
'clean-images': clean_images_command,
'clean-networks': clean_networks_command,
'compose': compose_command,
'login': login_command,
'path': path_command,
'push': push_command,
'update-images': update_images_command
}
parser = argparse.ArgumentParser(description='Dynamically create docker-compose files.')
parser.add_argument(choices=list(commands.keys()), dest='COMMAND', help='The command to run.')
parser.add_argument('--service-name',
help='Explicitly specify the service name instead of assuming it is the name of the current '
'directory.')
parser.add_argument('--aws-access-key-id', help='The AWS access key.')
parser.add_argument('--aws-secret-access-key', help='The AWS secret key.')
parser.add_argument('--aws-session-token', help='The AWS session token.')
parser.add_argument('--aws-profile', help='The AWS profile.')
parser.add_argument('--region', default='us-east-1', help='The AWS region.')
parser.add_argument(dest='ARGUMENTS', nargs=argparse.REMAINDER)
def __setup_config_directory():
# Check that the user config folder exists.
user_config_directory = os.path.join(os.environ['HOME'], '.autocompose')
user_config_file = os.path.join(user_config_directory, 'config.yml')
if not os.path.exists(user_config_directory):
os.mkdir(user_config_directory)
elif not os.path.isdir(user_config_directory):
raise Exception('User config directory "' + user_config_directory + '" is not a directory.')
if not os.path.exists(user_config_file):
with open(user_config_file, 'w') as file:
pass
elif not os.path.isfile(user_config_file):
raise Exception('User config file "' + user_config_file + '" is not a file.')
def __require_python_version():
req_version = (3, 4)
cur_version = sys.version_info
if cur_version < req_version:
print("Your Python interpreter is too old. Autocompose requires Python 3.4 or higher.")
exit(1)
def main():
__require_python_version()
args = parser.parse_args()
command = args.COMMAND
# Check that the user config directory and file exists.
__setup_config_directory()
if command not in commands.keys():
print('Not a command: ' + command)
exit(-1)
if args.service_name is not None:
set_service_name(args.service_name)
print('service-name set to ' + args.service_name)
aws_session = boto3.Session(aws_access_key_id=args.aws_access_key_id,
aws_secret_access_key=args.aws_secret_access_key,
aws_session_token=args.aws_session_token,
region_name=args.region,
profile_name=args.aws_profile)
docker_client = docker.APIClient()
try:
commands[command].parse_and_execute(args=args.ARGUMENTS, aws_session=aws_session, docker_client=docker_client)
except Exception as e:
print("Unexpected error:", sys.exc_info()[1])
if __name__ == "__main__":
main()
| {"/autocompose/command_line/command/clean_networks.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/updater.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/login.py": ["/autocompose/authenticator.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/path.py": ["/autocompose/util.py", "/autocompose/command_line/command/command.py"], "/tests/test_util.py": ["/autocompose/util.py"], "/autocompose/command_line/command/compose.py": ["/autocompose/composer.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/clean_containers.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/pusher.py": ["/autocompose/authenticator.py", "/autocompose/util.py"], "/autocompose/command_line/command/update_images.py": ["/autocompose/updater.py", "/autocompose/command_line/command/command.py"], "/autocompose/authenticator.py": ["/autocompose/util.py"], "/autocompose/command_line/command/clean_images.py": ["/autocompose/cleaner.py", "/autocompose/command_line/command/command.py"], "/autocompose/builder.py": ["/autocompose/constants.py", "/autocompose/pusher.py", "/autocompose/util.py"], "/autocompose/command_line/command/build.py": ["/autocompose/builder.py", "/autocompose/command_line/command/command.py"], "/autocompose/command_line/command/push.py": ["/autocompose/pusher.py", "/autocompose/command_line/command/command.py"], "/autocompose/composer.py": ["/autocompose/authenticator.py", "/autocompose/constants.py", "/autocompose/util.py"], "/autocompose/command_line/command_line.py": ["/autocompose/command_line/command/build.py", "/autocompose/command_line/command/clean_containers.py", "/autocompose/command_line/command/clean_images.py", "/autocompose/command_line/command/clean_networks.py", "/autocompose/command_line/command/compose.py", "/autocompose/command_line/command/login.py", "/autocompose/command_line/command/path.py", "/autocompose/command_line/command/push.py", "/autocompose/command_line/command/update_images.py", "/autocompose/util.py"]} |
43,331 | huangchaoxing/Use-a-CNN-to-drive-a-differential-mobile-robot | refs/heads/master | /train.py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 2 16:47:30 2019
@author: HP
"""
from matplotlib import pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import numpy as np
from data_split import train_valid_split
from steerNet import SteerNet
import torch.optim as optim
from tensorboardX import SummaryWriter
ITER_NUM=60
LR=1e-3
BATCH_SIZE=40
TRAIN_COUNT = 619
VALID_COUNT = 32
train_loader,validation_loader=train_valid_split(batch_size=BATCH_SIZE,valid_portion=0.05)
model = SteerNet()
device = torch.device("cuda")
print(device)
model = model.to(device)
criterion = nn.MSELoss()
# optimizer = optim.Adam(params=model.parameters(),weight_decay=5e-3,lr=LR)
writer = SummaryWriter()
train_loss_dic={}
train_acc_dic = {}
valid_acc_dic = {}
valid_loss_dic={}
print("Ready !")
step=0
for epoch in range(ITER_NUM):
epoch_loss=0
epoch_valid_loss=0
print("This is epoch:",epoch)
optimizer = optim.Adam(params=model.parameters(),weight_decay=1e-2,lr=LR)
# LR = LR*0.95
right_train = 0
right_valid = 0
for data in train_loader:
image=data["image"].to(device)
label=data["steering"].to(device)
optimizer.zero_grad()
output=model(image)
output = torch.squeeze(output,1)
loss=criterion(output,label)
loss.backward()
optimizer.step()
epoch_loss+=loss.item()
prediction = torch.argmax(output.data,1)
# print("pred",prediction)
# print("la",torch.argmax(label,1))
right_train = right_train + (prediction == torch.argmax(label,1)).sum().item()
# print("eq",(prediction == torch.argmax(label,1)))
step=step+1
train_acc_dic[epoch] = right_train/TRAIN_COUNT
train_loss_dic[epoch]=epoch_loss/(TRAIN_COUNT/BATCH_SIZE)
print("------------------------")
print("The epoch acc is,",train_acc_dic[epoch],"The epoch loss is,",train_loss_dic[epoch])
step = 0
for data in validation_loader:
image=data["image"].to(device)
label=data["steering"].to(device)
output=model(image)
output = torch.squeeze(output,1)
loss=criterion(output,label)
epoch_valid_loss+=loss.item()
prediction = torch.argmax(output.data,1)
right_valid = right_valid + (prediction == torch.argmax(label,1)).sum().item()
# _, prediction = torch.max(output.data, 1)
# right_valid = right_valid + (prediction == label).sum().item()
valid_acc_dic[epoch] = right_valid/VALID_COUNT
valid_loss_dic[epoch]= epoch_valid_loss/(VALID_COUNT/BATCH_SIZE)
print("The valid acc is,",valid_acc_dic[epoch],"The valid loss is,",valid_loss_dic[epoch])
print("******************************")
print("training finished !")
model = model.to(torch.device("cpu"))
torch.save(model.state_dict(), "ourNet.pt")
plt.figure(1)
eps=train_loss_dic.keys()
plt.plot(eps,train_loss_dic.values())
plt.show()
plt.figure(2)
plt.plot(eps,valid_loss_dic.values())
plt.show()
plt.figure(3)
plt.plot(eps,valid_loss_dic.values(),eps,train_loss_dic.values())
plt.show()
plt.figure(4)
plt.plot(eps,train_acc_dic.values(),eps,valid_acc_dic.values())
plt.show()
| {"/train.py": ["/data_split.py"]} |
43,332 | huangchaoxing/Use-a-CNN-to-drive-a-differential-mobile-robot | refs/heads/master | /data_split.py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 2 16:59:13 2019
@author: HP
"""
import torch
import numpy as np
import torchvision
#from utils import plot_images
import steerDS
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
import matplotlib.pyplot as plt
def imshow(img):
img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)))
plt.show()
def train_valid_split(batch_size,valid_portion):
normalization=transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
transform= transforms.Compose([transforms.Resize((72,72)),transforms.ToTensor(),normalization])
data_set=steerDS.SteerDataSet("../dev_data/training_data",".jpg",transform)
num_data=len(data_set)
indices=list(range(num_data))
split=int(np.floor(valid_portion*num_data))
train_index,valid_index=indices[split:],indices[:split] #prepare the spilt index for training and validation
print("Train")
print(len(train_index))
print(len(valid_index))
training_sampler=SubsetRandomSampler(train_index)
validation_sampler=SubsetRandomSampler(valid_index)
# get the training set and validation set
training_loader=torch.utils.data.DataLoader(data_set,batch_size=batch_size,shuffle=False,sampler=training_sampler,num_workers=0)
validation_loader=torch.utils.data.DataLoader(data_set,batch_size=batch_size,shuffle=False,sampler=validation_sampler,num_workers=0)
#sample_shower=torch.utils.data.DataLoader(training_set,batch_size=4,shuffle=False,num_workers=0)
#test_loader=torch.utils.data.DataLoader(test_set,num_workers=0)
return training_loader,validation_loader
# if(__name__ == "__main__"):
# train_loader,validation_loader=train_valid_split(batch_size=1,valid_portion=0.1)
# for data in train_loader:
# image = data["image"]
# label = data["steering"]
# imshow(image[0])
# print(label)
# # imshow(torchvision.utils.make_grid(images))
# print(labels)
| {"/train.py": ["/data_split.py"]} |
43,333 | stickzman/honors_thesis | refs/heads/master | /PolicyGradient/tutorial/tut_cartpole_with_save.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import gym
import matplotlib.pyplot as plt
from tut_policy_gradient_agent import agent
try:
xrange = xrange
except:
xrange = range
env = gym.make('CartPole-v0')
gamma = 0.99
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
tf.reset_default_graph() #Clear the Tensorflow graph.
myAgent = agent(lr=1e-2,s_size=4,a_size=2,h_size=10) #Load the agent.
total_episodes = 8000 #Set total number of episodes to train agent on.
max_ep = 201
update_frequency = 5
doTrain = True
init = tf.global_variables_initializer()
avgList = []
replayList = []
saver = tf.train.Saver()
# Launch the tensorflow graph
with tf.Session() as sess:
sess.run(init)
print("Restore session?")
restore = input("Y/N (No): ").lower()
if len(restore) > 0 and restore[0] == 'y':
saver.restore(sess, "tmp/model.ckpt")
print("Model restored.")
print("Continue training?")
train = input("Y/N (Yes): ").lower()
if len(train) > 0 and train[0] == 'n':
doTrain = False
print("Model will not be updated.")
i = 0
total_reward = []
total_length = []
gradBuffer = sess.run(tf.trainable_variables())
for ix,grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
while i < total_episodes:
s = env.reset()
running_reward = 0
ep_history = []
for j in range(max_ep):
#Probabilistically pick an action given our network outputs.
a_dist = sess.run(myAgent.output,feed_dict={myAgent.state_in:[s]} )
a = np.random.choice(a_dist[0],p=a_dist[0])
a = np.argmax(a_dist == a)
s1,r,d,_ = env.step(a) #Get our reward for taking an action given a bandit.
ep_history.append([s,a,r,s1])
s = s1
running_reward += r
if d == True:
#Update the network.
if doTrain:
ep_history = np.array(ep_history)
ep_history[:,2] = discount_rewards(ep_history[:,2])
feed_dict={myAgent.reward_holder:ep_history[:,2],
myAgent.action_holder:ep_history[:,1],myAgent.state_in:np.vstack(ep_history[:,0])}
grads = sess.run(myAgent.gradients, feed_dict=feed_dict)
for idx,grad in enumerate(grads):
gradBuffer[idx] += grad
if i % update_frequency == 0 and i != 0:
feed_dict= dictionary = dict(zip(myAgent.gradient_holders, gradBuffer))
_ = sess.run(myAgent.update_batch, feed_dict=feed_dict)
for ix,grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
total_reward.append(running_reward)
total_length.append(j)
break
#Record the actions taken
if i%(total_episodes/10)==0:
actionList = np.array(ep_history)[:,1]
replayList.append(actionList)
#Update our running tally of scores.
if i % 100 == 0:
avgList.append(np.mean(total_reward[-100:]))
print(str((i/total_episodes)*100) + "%")
#print(running_reward)
i += 1
print("Display history?")
display = input("Y/N [No]: ").lower()
if len(display) > 0 and display[0] == 'y':
for actionList in replayList:
env.reset()
env.render()
for action in actionList:
env.step(action)
env.render()
avgX = np.linspace(0, len(total_reward), len(avgList))
plt.plot(total_reward)
plt.plot(avgX, avgList)
plt.show()
print("Save model?");
save = input("Y/N (No): ").lower()
if len(save) > 0 and save[0] == 'y':
save_path = saver.save(sess, "tmp/model.ckpt")
print("Model saved in file: %s" % save_path)
| {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,334 | stickzman/honors_thesis | refs/heads/master | /PolicyGradient/tutorial/tut_cartpole.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import gym
import matplotlib.pyplot as plt
from tut_policy_gradient_agent import agent
try:
xrange = xrange
except:
xrange = range
env = gym.make('CartPole-v0')
gamma = 0.99
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
tf.reset_default_graph() #Clear the Tensorflow graph.
myAgent = agent(lr=1e-2,s_size=4,a_size=2,h_size=8) #Load the agent.
total_episodes = 5000 #Set total number of episodes to train agent on.
max_ep = 201
update_frequency = 25
init = tf.global_variables_initializer()
avg_rewards = []
# Launch the tensorflow graph
with tf.Session() as sess:
sess.run(init)
i = 0
total_reward = []
total_length = []
gradBuffer = sess.run(tf.trainable_variables())
for ix,grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
while i < total_episodes:
s = env.reset()
running_reward = 0
ep_history = []
for j in range(max_ep):
#Probabilistically pick an action given our network outputs.
a_dist = sess.run(myAgent.output,feed_dict={myAgent.state_in:[s]})
a = np.random.choice(a_dist[0],p=a_dist[0])
a = np.argmax(a_dist == a)
#if i%1000 == 0: env.render()
s1,r,d,_ = env.step(a) #Get our reward for taking an action given a bandit.
ep_history.append([s,a,r,s1])
s = s1
running_reward += r
if d == True:
#Update the network.
ep_history = np.array(ep_history)
ep_history[:,2] = discount_rewards(ep_history[:,2])
feed_dict={myAgent.reward_holder:ep_history[:,2],
myAgent.action_holder:ep_history[:,1],myAgent.state_in:np.vstack(ep_history[:,0])}
grads = sess.run(myAgent.gradients, feed_dict=feed_dict)
for idx,grad in enumerate(grads):
gradBuffer[idx] += grad
if i % update_frequency == 0 and i != 0:
feed_dict= dictionary = dict(zip(myAgent.gradient_holders, gradBuffer))
sess.run(myAgent.update_batch, feed_dict=feed_dict)
for ix,grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
total_reward.append(running_reward)
total_length.append(j)
break
#Update our running tally of scores.
if i % 100 == 0:
avg_rewards.append(np.mean(total_reward[-100:]))
print(str((i/total_episodes)*100) + "%")
i += 1
avgX = np.linspace(0, len(total_reward), len(avg_rewards))
plt.plot(total_reward)
plt.plot(avgX, avg_rewards)
#plt.plot(avg_rewards)
plt.show() | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,335 | stickzman/honors_thesis | refs/heads/master | /Q-Value/Neural Network/cartpole.py | import numpy as np
import random
import gym
import tensorflow as tf
import matplotlib.pyplot as plt
learningRate = 0.02
env = gym.make("CartPole-v0")
tf.reset_default_graph()
#Build TensorFlow graph
observations = tf.placeholder(shape = [1,4], dtype=tf.float32)
weights = tf.Variable(tf.random_uniform([4,2],0,0.01))
Qvals = tf.matmul(observations, weights)
chosenAction = tf.argmax(Qvals, 1)
#Create loss function
realQvals = tf.placeholder(shape=[1, 2], dtype=tf.float32)
loss = tf.reduce_sum(tf.square(realQvals - Qvals))
trainer = tf.train.GradientDescentOptimizer(learning_rate=learningRate)
update = trainer.minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
discountRate = 0.1
e = .5
totalEps = 5000
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(totalEps):
rAll = 0
first = False
obs = env.reset()
for t in range(200):
action = sess.run(chosenAction, {observations: [obs]})[0]
qVals = sess.run(Qvals, {observations: [obs]})
#if np.random.rand(1) < e:
# action = random.randint(0, 1)
newObs, reward, done, _ = env.step(action)
#if done == True: reward = -1
newQvals = sess.run(Qvals, {observations: [newObs]})
futureReward = np.max(newQvals)
qVals[0][action] = reward + discountRate * futureReward
#Update the model
sess.run(update, {realQvals: qVals, observations: [obs]})
obs = newObs
#if i%500 == 0: env.render()
rAll += reward
if done == True:
e = 1/(i+1)
#e = 1./((i/50) + 10)
break
rList.append(rAll)
print("Completed episode " + str(i))
#Graph the total rewards per episode
plt.plot(rList)
plt.show() | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,336 | stickzman/honors_thesis | refs/heads/master | /PolicyGradient/tutorial/tut_policy_gradient_agent.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
class agent():
def __init__(self, lr, s_size,a_size,h_size):
#These lines established the feed-forward part of the network. The agent takes a state and produces an action.
self.state_in= tf.placeholder(shape=[None,s_size],dtype=tf.float32)
hidden = slim.fully_connected(self.state_in,h_size,biases_initializer=None,activation_fn=tf.nn.relu)
self.output = slim.fully_connected(hidden,a_size,activation_fn=tf.nn.softmax,biases_initializer=None)
self.chosen_action = tf.argmax(self.output,1)
#The next six lines establish the training proceedure. We feed the reward and chosen action into the network
#to compute the loss, and use it to update the network.
self.reward_holder = tf.placeholder(shape=[None],dtype=tf.float32)
self.action_holder = tf.placeholder(shape=[None],dtype=tf.int32)
self.indexes = tf.range(0, tf.shape(self.output)[0]) * tf.shape(self.output)[1] + self.action_holder
self.responsible_outputs = tf.gather(tf.reshape(self.output, [-1]), self.indexes)
#self.responsible_outputs_array = tf.slice(self.output, [0, self.action_holder], [tf.shape(self.output)[0], 1])
#self.responsible_outputs = tf.reshape(self.responsible_outputs_array, [-1])
self.loss = -tf.reduce_mean(tf.log(self.responsible_outputs)*self.reward_holder)
tvars = tf.trainable_variables()
self.gradient_holders = []
for idx,var in enumerate(tvars):
placeholder = tf.placeholder(tf.float32,name=str(idx)+'_holder')
self.gradient_holders.append(placeholder)
self.gradients = tf.gradients(self.loss,tvars)
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
self.update_batch = optimizer.apply_gradients(zip(self.gradient_holders,tvars)) | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,337 | stickzman/honors_thesis | refs/heads/master | /PolicyGradient/frozenlake.py | import numpy as np
import gym
from policy_gradient_agent import Agent
import matplotlib.pyplot as plt
env = gym.make("FrozenLake-v0")
agent = Agent(.2, 16, 4, 20, 10, .99)
num_eps = 10000
total_rewards = []
avg_rewards = []
for i in range(num_eps):
s = env.reset()
running_reward = 0
for t in range(999):
a = agent.chooseAction(np.identity(16)[s:s+1][0])
newS, r, d, _ = env.step(a)
agent.observe(np.identity(16)[s:s+1][0], a, r, d)
running_reward += r
s = newS
if d:
if i%100 == 0:
print(str(i/num_eps*100) + "%")
avg_rewards.append(np.mean(total_rewards[-100:]))
total_rewards.append(running_reward)
break
avgX = np.linspace(0, len(total_rewards), len(avg_rewards))
plt.plot(total_rewards)
plt.plot(avgX, avg_rewards)
plt.show() | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,338 | stickzman/honors_thesis | refs/heads/master | /Actor-Critic/critic.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import gym
import matplotlib.pyplot as plt
env = gym.make("CartPole-v0")
val_lr = 0.01
pol_lr = 0.001
discount_rate = .98
state_size = 4
max_eps = 5000
max_timesteps = 201
#Define Tensorflow graph
tf.reset_default_graph()
#Critic
state_in = tf.placeholder(shape=[None,state_size], dtype=tf.float32)
hidden_val_layer = slim.fully_connected(state_in, 4)
value_output = slim.fully_connected(hidden_val_layer, 1, biases_initializer=None, activation_fn=None)
#Actor
hidden_pol_layer = slim.fully_connected(state_in, 8, biases_initializer=None)
pol_output = slim.fully_connected(hidden_pol_layer, 2, biases_initializer=None, activation_fn=tf.nn.softmax)
#Update graph error
#Actor
advantage_holder = tf.placeholder(shape=[1], dtype=tf.float32)
action_holder = tf.placeholder(shape=[1], dtype=tf.int32)
indexes = tf.range(0, tf.shape(pol_output)[0]) * tf.shape(pol_output)[1] + action_holder
responsible_outputs = tf.gather(tf.reshape(pol_output, [-1]), indexes)
pol_loss = -tf.reduce_mean(tf.log(responsible_outputs)*advantage_holder)
pol_optimizer = tf.train.AdamOptimizer(learning_rate=pol_lr)
update_pol = pol_optimizer.minimize(pol_loss)
#Critic Update
value_holder = tf.placeholder(shape=[None, 1], dtype=tf.float32)
val_loss = tf.reduce_sum(tf.square(value_holder - value_output))
val_optimizer = tf.train.AdamOptimizer(learning_rate=val_lr)
update_values = val_optimizer.minimize(val_loss)
total_rewards = []
avg_rewards = []
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epNum in range(max_eps):
s = env.reset()
ep_rewards = 0
ep_history = []
for t in range(max_timesteps):
a_dist = sess.run(pol_output, {state_in:[s]})
a = np.random.choice(len(a_dist[0]), p=a_dist[0])
#print(sess.run(value_output, {state_in:[s]}))
#a = np.random.choice([0, 1])
next_state, r, d, _ = env.step(a)
ep_history.append([s, r])
#env.render()
#Update Actor
state_value = sess.run(value_output, {state_in:[s]})
next_state_value = sess.run(value_output, {state_in:[next_state]})
advantage = next_state_value[0] - state_value[0]
sess.run(update_pol, {state_in:[s], action_holder:[a], advantage_holder:advantage})
#Update Values
next_state_value = sess.run(value_output, {state_in:[next_state]})
state_value = r + discount_rate * (next_state_value[0])
sess.run(update_values, {state_in:[s], value_holder: [state_value]})
s = next_state
ep_rewards += r
if d:
#print("------------------------------------")
total_rewards.append(ep_rewards)
if epNum%100 == 0:
avg_rewards.append(np.mean(total_rewards[-100:]))
print(str(epNum/max_eps*100) + "%")
break
#Update Critic
#ep_history = np.array(ep_history)
#disc_rews = discount_rewards(ep_history[:,1])
#print(sess.run(val_loss, {value_holder:np.vstack(disc_rews), state_in:np.vstack(ep_history[:,0])}))
#sess.run(update_values, {value_holder:np.vstack(disc_rews), state_in:np.vstack(ep_history[:,0])})
avgX = np.linspace(0, len(total_rewards), len(avg_rewards))
plt.plot(total_rewards)
plt.plot(avgX, avg_rewards)
plt.show() | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,339 | stickzman/honors_thesis | refs/heads/master | /Convolutional/convol_alt.py | import numpy as np
import random
import tensorflow as tf
import tensorflow.contrib.slim as slim
import matplotlib.pyplot as plt
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lakeEngineFullBoard import Env
def oneHotEncode(arr, size):
'''
res = []
for type in arr.astype(np.int).tolist():
onehot = np.zeros(size)
onehot[type] = 1
res.append(onehot)
'''
res = np.zeros(size)
res[arr] = 1
return res
lr = .01
tf.reset_default_graph()
#These lines establish the feed-forward part of the network used to choose actions
boardInput = tf.placeholder(shape=[1, 16],dtype=tf.float32)
playerInput = tf.placeholder(shape=[1, 16],dtype=tf.float32)
hidP = slim.fully_connected(playerInput, 20, biases_initializer=None)[0]
hidB = slim.fully_connected(boardInput, 20)[0]
Qout = slim.fully_connected([hidB, hidP], 4, activation_fn=None, biases_initializer=None)[0]
#W = tf.Variable(tf.random_uniform([1,5,4],0,0.01))
#Qout = tf.reduce_mean(tf.matmul(inputs1,W), 1)
predict = tf.argmax(Qout)
#Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
nextQ = tf.placeholder(shape=[1,4],dtype=tf.float32)
loss = tf.reduce_sum(tf.square(nextQ - Qout))
trainer = tf.train.GradientDescentOptimizer(learning_rate=lr)
updateQVals = trainer.minimize(loss)
init = tf.global_variables_initializer()
env = Env()
# Set learning parameters
y = .99
e = 1
num_episodes = 1000
success = False
firstSuccessEp = -1
totalSuccessEps = 0
lastFailedEp = -1
#create lists to contain total rewards and steps per episode
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(num_episodes):
#Reset environment and get first new observation
boardState, s = env.reset()
s = oneHotEncode(s, 16)
rAll = 0
d = False
j = 0
#The Q-Network
while j < 99:
j+=1
#Choose an action by greedily (with e chance of random action) from the Q-network
a,allQ = sess.run([predict,Qout],feed_dict={playerInput:[s], boardInput:[boardState]})
if np.random.rand(1) < e:
a = random.randint(0, 3)
#Get new state and reward from environment
s1,r,d = env.step(a)
s1 = oneHotEncode(s1, 16)
#Obtain the Q' values by feeding the new state through our network
Q1 = sess.run(Qout,feed_dict={playerInput:[s1], boardInput:[boardState]})
#Obtain maxQ' and set our target value for chosen action.
maxQ1 = np.max(Q1)
targetQ = allQ
targetQ[a] = r + y*maxQ1
#Train our network using target and predicted Q values
sess.run([updateQVals],feed_dict={playerInput:[s], boardInput:[boardState], nextQ:[targetQ]})
rAll += r
s = s1
#env.render()
if d == True:
if r==1:
totalSuccessEps += 1
if success == False:
success = True
firstSuccessEp = i
else:
lastFailedEp = i
print("FAILED")
#Reduce chance of random action as we train the model.
print("Episode finished after " + str(j) + " timesteps")
e = 1./((i/50) + 10)
#e = 1/(i+1)
break
rList.append(rAll)
print()
print("Percent of successful episodes: " + str((totalSuccessEps/num_episodes)*100) + "%")
print()
print("First successful episode: " + str(firstSuccessEp))
print()
print("Last failed episode: " + str(lastFailedEp))
plt.plot(rList)
plt.show()
| {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,340 | stickzman/honors_thesis | refs/heads/master | /genAlg/genAlg.py | import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import gym
import matplotlib.pyplot as plt
from genAlgAgent import Population
from genAlgAgent14Genes import Population as Population14G
from genAlgAgent10Genes import Population as Population10G
import argparse
parser = argparse.ArgumentParser()
#Choose the number of genes to use with -g=[10, 14, or 60]
parser.add_argument("-g", "-geneType", type=int, default=60, choices=[10, 14, 60])
#Set the numpy random seed using -s
parser.add_argument("-s", "-seed", type=int, default=-1)
#Set agent to choose actions deterministically instead of stochastically
parser.add_argument("-d", "-deterministic", type=bool, default=False)
args = parser.parse_args()
populationSize=10
numParents=4
generationLength=20
numGenerations=10
minWeight=0
maxWeight=100
mutationProb=0.01
s_size = 4
h_size = 10
a_size = 2
tf.reset_default_graph() #Clear the Tensorflow graph.
#These lines established the feed-forward part of the network. The agent takes a state and produces an action.
state_in= tf.placeholder(shape=[1 , s_size],dtype=tf.float32)
hidden = slim.fully_connected(state_in, h_size, biases_initializer=None)
output = slim.fully_connected(hidden, a_size, biases_initializer=None, activation_fn=tf.nn.softmax)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
if args.s != -1:
np.random.seed(args.s)
if args.g == 60:
pop = Population('CartPole-v0', sess, state_in, output, genSize=populationSize, numParents=numParents, genLength=generationLength, numGens=numGenerations, minW=minWeight, maxW=maxWeight, mutProb=mutationProb, deterministic=args.d)
elif args.g == 14:
pop = Population14G('CartPole-v0', sess, state_in, output, genSize=populationSize, numParents=numParents, genLength=generationLength, numGens=numGenerations, minW=minWeight, maxW=maxWeight, mutProb=mutationProb, deterministic=args.d)
elif args.g == 10:
pop = Population10G('CartPole-v0', sess, state_in, output, genSize=populationSize, numParents=numParents, genLength=generationLength, numGens=numGenerations, minW=minWeight, maxW=maxWeight, mutProb=mutationProb, deterministic=args.d)
pop.run()
#bestAgent = pop.getBestAgent()
#bestAgent.viewRun()
plt.plot(pop.best)
plt.show() | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,341 | stickzman/honors_thesis | refs/heads/master | /PolicyGradient/pol_grad_cartpole.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import gym
import matplotlib.pyplot as plt
from policy_gradient_agent import Agent
env = gym.make('CartPole-v0')
myAgent = Agent(lr=1e-2, s_size=4, a_size=2, h_size=8, b_size=10, gamma=.99) #Load the agent.
total_episodes = 5000 #Set total number of episodes to train agent on.
max_ep_length = 201
i = 0
total_reward = []
avg_rewards = []
for i in range(total_episodes):
s = env.reset()
running_reward = 0
for j in range(max_ep_length):
#Probabilistically pick an action given our network outputs.
a = myAgent.chooseAction(s)
#if i%1000 == 0: env.render()
s1,r,d,_ = env.step(a) #Get our reward for taking an action given a bandit.
myAgent.observe(s, a, r, d)
s = s1
running_reward += r
if d == True:
total_reward.append(running_reward)
break
#Update our running tally of scores.
if i % 100 == 0:
avg_rewards.append(np.mean(total_reward[-100:]))
print(str((i/total_episodes)*100) + "%")
avgX = np.linspace(0, len(total_reward), len(avg_rewards))
plt.plot(total_reward)
plt.plot(avgX, avg_rewards)
plt.show() | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,342 | stickzman/honors_thesis | refs/heads/master | /genAlg/genAlgAgent14Genes.py | import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import gym
import matplotlib.pyplot as plt
import hashlib
class Population:
def __init__(self, envName, sess, state_in, output, genSize, numParents, numGens, genLength=5, minW=0, maxW=1, crsProb=.90, mutProb=.0001, deterministic=False):
self.best = []
self.genLength = genLength
self.minW = minW
self.maxW = maxW
self.mutProb = mutProb
self.numGens = numGens
self.crossoverProb = crsProb
self.numParents = numParents//2*2
self.gen = []
self.oldGenIDs = []
self.sess = sess
self.state_in = state_in
self.output = output
for i in range(genSize):
self.gen.append(Indv(sess, state_in, output, gym.make(envName), minW=minW, maxW=maxW, deterministic=False))
def run(self):
for i in range(self.numGens):
self.runGen()
print("-------Gen " + str(i) + "-------")
self.displayGen()
self.displayNew()
self.oldGenIDs = self.__getIDs(self.gen)
parents = self.__parentSelection()
self.__crossover(parents)
for agent in self.gen:
if agent.done:
#if agent not in parents:
#agent.updateGenome()
agent.reset()
def runGen(self):
for t in range(self.genLength):
while self.__minOneAgentRunning():
for agent in self.gen:
agent.step()
self.__next()
fit = self.__getFitness()
self.best.append(np.amax(fit))
print("Best fitness: " + str(self.best[-1]))
def displayGen(self):
gen = []
fitIDs = np.argsort(self.__getFitness())
for id in fitIDs:
gen.append(self.gen[id])
for agent in gen:
print("ID: " + str(agent.id), "Fitness: " + str(agent.avgFitness()))
def displayNew(self):
oldGen = set(self.oldGenIDs)
gen = set(self.__getIDs(self.gen))
newIDs = gen - oldGen
newAgents = []
for id in newIDs:
#Finds agent that has a matching id and adds it to the newAgents list
newAgents.append(next(agent for agent in self.gen if agent.id == id))
print("--------New Agents--------")
for agent in newAgents:
print("ID: " + str(agent.id), "Fitness: " + str(agent.avgFitness()))
def getBestAgent(self):
fit = self.__getFitness()
return self.gen[np.argsort(fit)[-1]]
def __getIDs(self, gen):
ids = []
for agent in gen:
ids.append(agent.id)
return ids
def __next(self):
for agent in self.gen:
agent.next()
def __parentSelection(self):
'''
probs = self.__getProbs()
parentProbs = np.random.choice(probs, self.numParents, False, probs)
parents = []
for parent in parentProbs:
parents.append(self.gen[np.argmax(probs == parent)])
'''
fitness = self.__getFitness()
idxs = np.argsort(fitness)[-self.numParents:]
parents = []
for id in idxs:
parents.append(self.gen[id])
return parents
def __crossover(self, parents):
offspringGenes = []
for i in range(0, len(parents), 2):
p1 = parents[i]
p2 = parents[i+1]
g1 = p1.genome
g2 = p2.genome
kid1 = []
kid2 = []
for gene1, gene2 in zip(g1, g2):
if np.random.rand() < .5:
kid1.append(gene1)
kid2.append(gene2)
else:
kid1.append(gene2)
kid2.append(gene1)
kid1 = self.__mutate(kid1)
kid2 = self.__mutate(kid2)
offspringGenes.append(kid1)
offspringGenes.append(kid2)
weakIdxs = np.argsort(self.__getFitness())[:len(offspringGenes)]
for i, gene in zip(weakIdxs, offspringGenes):
self.gen[i].reset(gene)
#self.gen[i] = Indv(self.sess, self.state_in, self.output, gym.make('CartPole-v0'), self.minW, self.maxW, gene)
def __mutate(self, genome):
for gene in genome:
if np.random.rand() < self.mutProb:
#print("MUTATION")
gene = (np.random.rand(len(gene)) * (self.maxW-self.minW)) + self.minW
return genome
def __getProbs(self):
fitness = self.__getFitness()
total = sum(fitness)
probs = []
for fit in fitness:
probs.append(fit/total)
return probs
def __getFitness(self):
fitness = []
for agent in self.gen:
fitness.append(agent.avgFitness())
return fitness
def __minOneAgentRunning(self):
res = False
for indv in self.gen:
if indv.done == False: res = True
return res
class Indv:
def __init__(self, sess, state_in, output, env, minW=0, maxW=1, deterministic=False, genome=None):
self.state_in = state_in
self.output = output
self.env = env
self.sess = sess
self.maxW = maxW
self.minW = minW
self.determ = deterministic
self.updateGenome(genome)
self.reset()
def step(self, render=False):
if not self.done:
feed_dict = self.weightDict.copy()
feed_dict[self.state_in] = [self.input]
a_dist = self.sess.run(self.output,feed_dict)
if (self.determ):
#Deterministic Selection
a = np.argmax(a_dist)
else:
#Stochastic Selection
a = np.random.choice(a_dist[0],p=a_dist[0])
a = np.argmax(a_dist == a)
self.input, r, self.done, _ = self.env.step(a)
self.fitness += r
if render: self.env.render()
def viewRun(self):
self.reset()
i = 0
while not self.done:
i+= 1
self.step(render = True)
print(i)
def __buildWeightDict(self):
feed_dict = {}
genomeIndex = 0
for var in tf.trainable_variables():
shape = self.sess.run(tf.shape(var))
tensor = []
for i in range(shape[0]):
tensor.append(self.genome[genomeIndex])
genomeIndex += 1
feed_dict[var] = tensor
self.weightDict = feed_dict
def __genGenome(self):
self.genome = []
for var in tf.trainable_variables():
shape = self.sess.run(tf.shape(var))#Get shape of variable
for i in range(shape[0]):
self.genome.append((np.random.rand(shape[1]) * (self.maxW-self.minW)) + self.minW)
def reset(self, genome=None):
if genome!=None: self.updateGenome(genome)
self.done = False
self.input = self.env.reset()
self.fitness = 0
self.totalFits = []
def next(self):
self.done = False
self.input = self.env.reset()
self.totalFits.append(self.fitness)
self.fitness = 0
def avgFitness(self):
return np.average(self.totalFits)
def updateGenome(self, genome=None):
if genome==None:
self.__genGenome()
else:
self.genome = genome
self.id = hash(tuple([item for sublist in self.genome for item in sublist]))
self.__buildWeightDict() | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,343 | stickzman/honors_thesis | refs/heads/master | /Q-Value/Q-Matrix/frozenlake.py | import gym
import numpy
y = .97 #Discount Rate
learnRate = .2
totalEps = 1000
def updateQMat(q, reward, state, action, newState):
futureReward = max(q[newState][:])
q[state][action] = q[state][action] + learnRate * (reward + y * futureReward - q[state][action])
return
success = False
lastFailEp = -1
firstSuccEp = -1
successEps = 0
env = gym.make('FrozenLake-v0')
#env = wrappers.Monitor(env, '/tmp/recording', force=True) #Records performance data
qMatrix = numpy.zeros((env.observation_space.n, env.action_space.n)) #Initialize qMatrix to 0s
for i in range(totalEps):
observation = env.reset()
#Loop through episode, one timestep at a time
for t in range(env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')):
#Create an array of random estimated rewards representing each action
#with the possible range of rewards decreasing with each episode
randomActions = numpy.random.randn(1, env.action_space.n)*(1/(i+1))
#Choose either the action with max expected reward, or a random action
#according to randomActions array. With each episode, the random actions
#will become less chosen.
action = numpy.argmax(qMatrix[observation][:] + randomActions)
oldObservation = observation;
observation, reward, done, info = env.step(action) #Perform the action
if done and reward == 0:
reward = -1 #Edit reward to negative in the case of falling in a hole
updateQMat(qMatrix, reward, oldObservation, action, observation) #Update the Q-Matrix
env.render()
if done:
if reward == 1:
successEps += 1
if success == False:
success = True
firstSuccEp = i
else:
lastFailEp = i
print("Episode finished after {} timesteps".format(t+1))
break
print()
print("Percentage of successful episodes: " + str(successEps/totalEps * 100) + "%")
print()
print("First successful episode: " + str(firstSuccEp))
print()
print("Last failed episode: " + str(lastFailEp))
env.close()
#gym.upload('/tmp/recording', api_key='sk_fVhBRLT7S7e4MoHswIH5wg') #Uploads performance data | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,344 | stickzman/honors_thesis | refs/heads/master | /lakeEngineFullBoard.py | import numpy as np
import random
#Recreate Frozen Lake w/o slipping function
class Env:
GOAL_REWARD = 1
HOLE_REWARD = -1
DEFAULT_REWARD = 0
SLIP_PERCENT = 0
#Representation of tiles in lake array
SAFE_TILE = 0
HOLE_TILE = 1
START_TILE = 2
GOAL_TILE = 3
PLAYER_TILE = 4
#Representation of moves in move array
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
def __init__(self, slipRate = 0):
self.SLIP_PERCENT = slipRate
self.pIndex = 0 #Player index, also the current state
self.lakeArray = np.zeros(16) #Initialize lake to all safe tiles
self.lakeArray[0] = self.START_TILE
self.lakeArray[15] = self.GOAL_TILE
self.lakeArray[5] = self.HOLE_TILE
self.lakeArray[7] = self.HOLE_TILE
self.lakeArray[11] = self.HOLE_TILE
self.lakeArray[12] = self.HOLE_TILE
#Initialize move matrix to allow movement anywhere
self.moveMatrix = []
for s in range(16):
self.moveMatrix.append([s-4, s+4, s-1, s+1])
#Restrict movement for edge tiles
for i in range(4):
self.moveMatrix[i][self.UP] = -1
for i in range(0, 13, 4):
self.moveMatrix[i][self.LEFT] = -1
for i in range(3, 16, 4):
self.moveMatrix[i][self.RIGHT] = -1
for i in range(12, 16):
self.moveMatrix[i][self.DOWN] = -1
#Check if the player is on the Goal tile
def isWin(self):
return self.pIndex == 15
#Check if player is in a hole
def isFallen(self):
return self.pIndex == 5 or self.pIndex == 7 or self.pIndex == 11 or self.pIndex == 12
#Execute the action and advance one timestep
#Return the state, reward, and if the episode is done
def step(self, action):
reward = self.DEFAULT_REWARD
done = False
self.pIndex = self.move(action)
#Adjust the award according to current state
if self.isWin():
done = True
reward = self.GOAL_REWARD
elif self.isFallen():
done = True
reward = self.HOLE_REWARD
return (self.pIndex, reward, done)
def getBoard(self):
'''
stateArr = np.empty([4, 4])
for i in range(4):
for j in range(4):
stateArr[i, j] = self.lakeArray[i*4+j]
stateArr[self.pIndex//4, self.pIndex%4] = self.PLAYER_TILE
'''
return self.lakeArray.copy();
def move(self, action):
rnd = random.random()
sliprate = self.SLIP_PERCENT
if rnd < sliprate:
randomMove = random.randint(0, 3)
newState = self.moveMatrix[self.pIndex][randomMove]
else:
newState = self.moveMatrix[self.pIndex][action]
if newState == -1:
#If player attempted to move into a non-existent state,
#do not move player and return
return self.pIndex
return newState
#Display the current environment
def render(self):
s = ""
for i in range(len(self.lakeArray)):
if i%4 == 0:
s += "\n"
if i == self.pIndex:
s += "P"
elif self.lakeArray[i] == self.SAFE_TILE:
s += "-"
elif self.lakeArray[i] == self.HOLE_TILE:
s += "O"
elif self.lakeArray[i] == self.START_TILE:
s += "S"
elif self.lakeArray[i] == self.GOAL_TILE:
s += "G"
print(s)
#Reset the environment
def reset(self):
self.pIndex = 0
return self.getBoard(), self.pIndex; | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,345 | stickzman/honors_thesis | refs/heads/master | /PolicyGradient/policy_gradient_agent.py | import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
class Agent():
def __init__(self, lr, s_size, a_size, h_size, b_size, gamma):
self.exp_buffer = []
self.end_of_last_ep = 0
self.ep_count = 0
self.b_size = b_size
self.gamma = gamma
tf.reset_default_graph() #Clear the Tensorflow graph.
#These lines established the feed-forward part of the network. The agent takes a state and produces an action.
self.state_in= tf.placeholder(shape=[None, s_size],dtype=tf.float32)
hidden = slim.fully_connected(self.state_in, h_size, biases_initializer=None)
self.output = slim.fully_connected(hidden, a_size, biases_initializer=None, activation_fn=tf.nn.softmax)
#The next six lines establish the training proceedure. We feed the reward and chosen action into the network
#to compute the loss, and use it to update the network.
self.reward_holder = tf.placeholder(shape=[None],dtype=tf.float32)
self.action_holder = tf.placeholder(shape=[None],dtype=tf.int32)
self.indexes = tf.range(0, tf.shape(self.output)[0]) * tf.shape(self.output)[1] + self.action_holder
self.responsible_outputs = tf.gather(tf.reshape(self.output, [-1]), self.indexes)
self.loss = -tf.reduce_mean(tf.log(self.responsible_outputs)*self.reward_holder)
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
self.min_loss = optimizer.minimize(self.loss)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def __discount_rewards(self, r):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * self.gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def __update(self):
'''
discounted_history = []
for ep_history in batch_hist:
ep_history = np.array(ep_history)
ep_history[:, 2] = self.discount_rewards(ep_history[:, 2], gamma)
for t in ep_history:
discounted_history.append(t)
discounted_history = np.array(discounted_history)
'''
feed_dict={self.reward_holder:self.exp_buffer[:,2],
self.action_holder:self.exp_buffer[:,1],self.state_in:np.vstack(self.exp_buffer[:,0])}
self.sess.run(self.min_loss, feed_dict)
self.exp_buffer = []
self.ep_count = 0
def chooseAction(self, s):
a_dist = self.sess.run(self.output,feed_dict={self.state_in:[s]})
a = np.random.choice(a_dist[0],p=a_dist[0])
return np.argmax(a_dist == a)
def observe(self, s, a, r, d):
self.exp_buffer.append([s, a, r])
if d:
self.ep_count += 1
self.exp_buffer = np.array(self.exp_buffer)
self.exp_buffer[self.end_of_last_ep:, 2] = self.__discount_rewards(self.exp_buffer[self.end_of_last_ep:, 2])
self.end_of_last_ep = len(self.exp_buffer) - 1
if self.ep_count >= self.b_size:
self.__update()
else:
self.exp_buffer = self.exp_buffer.tolist()
| {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,346 | stickzman/honors_thesis | refs/heads/master | /Q-Value/Neural Network/TFunfrozenlake.py | import numpy as np
import random
import tensorflow as tf
import matplotlib.pyplot as plt
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from thawedLakeEngine import Env
lr = .1
tf.reset_default_graph()
#These lines establish the feed-forward part of the network used to choose actions
inputs1 = tf.placeholder(shape=[1,16],dtype=tf.float32)
W = tf.Variable(tf.random_uniform([16,4],0,0.01))
Qout = tf.matmul(inputs1,W)
predict = tf.argmax(Qout,1)
#Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
nextQ = tf.placeholder(shape=[1,4],dtype=tf.float32)
loss = tf.reduce_sum(tf.square(nextQ - Qout))
trainer = tf.train.GradientDescentOptimizer(learning_rate=lr)
updateQVals = trainer.minimize(loss)
init = tf.global_variables_initializer()
env = Env()
# Set learning parameters
y = .99
e = 0.5
num_episodes = 1000
success = False
firstSuccessEp = -1
totalSuccessEps = 0
lastFailedEp = -1
#create lists to contain total rewards and steps per episode
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(num_episodes):
#Reset environment and get first new observation
s = env.reset()
rAll = 0
d = False
j = 0
#The Q-Network
while j < 99:
j+=1
#Choose an action by greedily (with e chance of random action) from the Q-network
a,allQ = sess.run([predict,Qout],feed_dict={inputs1:np.identity(16)[s:s+1]})
print(a, allQ)
if np.random.rand(1) < e:
a[0] = random.randint(0, 3)
#Get new state and reward from environment
s1,r,d = env.step(a[0])
#Obtain the Q' values by feeding the new state through our network
Q1 = sess.run(Qout,feed_dict={inputs1:np.identity(16)[s1:s1+1]})
#Obtain maxQ' and set our target value for chosen action.
maxQ1 = np.max(Q1)
targetQ = allQ
targetQ[0,a[0]] = r + y*maxQ1
#Train our network using target and predicted Q values
sess.run([updateQVals],feed_dict={inputs1:np.identity(16)[s:s+1],nextQ:targetQ})
rAll += r
s = s1
env.render()
if d == True:
if r==1:
totalSuccessEps += 1
if success == False:
success = True
firstSuccessEp = i
else:
lastFailedEp = i
print("FAILED")
#Reduce chance of random action as we train the model.
print("Episode finished after " + str(j) + " timesteps")
#e = 1./((i/50) + 10)
e = 1/(i+1)
break
rList.append(rAll)
print()
print("Percent of successful episodes: " + str((totalSuccessEps/num_episodes)*100) + "%")
print()
print("First successful episode: " + str(firstSuccessEp))
print()
print("Last failed episode: " + str(lastFailedEp))
plt.plot(rList)
plt.show()
| {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,347 | stickzman/honors_thesis | refs/heads/master | /Q-Value/Q-Matrix/unfrozenlake.py | import numpy
import random
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from thawedLakeEngine import Env
discountRate = .97
learnRate = .15
totalEps = 1000
#------------------------------------------------------------
#Implement the same learning algorithm from frozenlake.py
prevSuccess = False
firstSuccessEp = -1
totalSuccessEps = 0
lastFailedEp = -1
def updateQMat(q, reward, state, action, newState):
futureReward = max(q[newState][:]) #Maximum future reward from new state
q[state][action] = q[state][action] + learnRate * (reward + discountRate * futureReward - q[state][action])
return
env = Env()
qMatrix = numpy.zeros((16, 4)) #Initialize qMatrix to 0s
for e in range(totalEps):
state = env.reset()
for t in range(1000):
#Create an array of random estimated rewards representing each action
#with the possible range of rewards decreasing with each episode
randomActions = numpy.random.randn(1, 4)*(1/(e+1))
#Choose either the action with max expected reward, or a random action
#according to randomActions array. With each episode, the random actions
#will become less chosen.
action = numpy.argmax(qMatrix[state][:] + randomActions)
oldObservation = state;
state, reward, done = env.step(action)
updateQMat(qMatrix, reward, oldObservation, action, state) #Update the Q-Matrix
env.render()
if done:
if reward == env.GOAL_REWARD:
if prevSuccess == False:
#Record the first successful ep
prevSuccess = True
firstSuccessEp = e
totalSuccessEps += 1
else:
lastFailedEp = e
print("Episode finished after {} timesteps".format(t+1))
break
print()
print("Percentage of successful episodes: " + str((totalSuccessEps/totalEps) * 100) + "%")
print()
print("First successful episode: " + str(firstSuccessEp))
print()
print("Last failed episode: " + str(lastFailedEp)) | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,348 | stickzman/honors_thesis | refs/heads/master | /mountain_car.py | import numpy as np
import gym
from PolicyGradient.policy_gradient_agent import Agent
import matplotlib.pyplot as plt
env = gym.make("MountainCar-v0")
agent = Agent(lr=.1,s_size=2, a_size=3, h_size=8, b_size=25, gamma=.5)
total_episodes = 1000 #Set total number of episodes to train agent on.
max_ep_length = 999
i = 0
total_reward = []
avg_rewards = []
for i in range(total_episodes):
s = env.reset()
running_reward = 0
for t in range(max_ep_length):
#Probabilistically pick an action given our network outputs.
a = agent.chooseAction(s)
#if i%1000 == 0: env.render()
s1,r,d,_ = env.step(a) #Get our reward for taking an action given a bandit.
agent.observe(s, a, r, d)
s = s1
running_reward += r
if d == True:
total_reward.append(running_reward)
break
#Update our running tally of scores.
if i % 100 == 0:
avg_rewards.append(np.mean(total_reward[-100:]))
print(str((i/total_episodes)*100) + "%")
avgX = np.linspace(0, len(total_reward), len(avg_rewards))
plt.plot(total_reward)
plt.plot(avgX, avg_rewards)
plt.show() | {"/Convolutional/convol_alt.py": ["/lakeEngineFullBoard.py"], "/mountain_car.py": ["/PolicyGradient/policy_gradient_agent.py"]} |
43,408 | ridoansaleh/DjangoBooks | refs/heads/master | /web/web/views.py | from django.shortcuts import redirect
def index_redirect(request):
return redirect('/books/') | {"/web/books/views.py": ["/web/books/models.py"]} |
43,409 | ridoansaleh/DjangoBooks | refs/heads/master | /web/books/models.py | from django.db import models
class Member(models.Model):
firstname = models.CharField(max_length=40)
lastname = models.CharField(max_length=40)
def __str__(self):
return self.firstname + " " + self.lastname
class Books(models.Model):
book_title = models.CharField(max_length=100, blank=False)
# book_photo = models.FileField()
writer = models.CharField(max_length=50, blank=False)
synopsis = models.CharField(max_length=200, blank=True)
publisher = models.CharField(max_length=50)
publish_date = models.DateField()
def __str__(self):
return self.book_title+" "+self.writer | {"/web/books/views.py": ["/web/books/models.py"]} |
43,410 | ridoansaleh/DjangoBooks | refs/heads/master | /web/books/views.py | from django.shortcuts import render, redirect
from .models import Member, Books
def index(request):
books = Books.objects.all()
context = {'books': books}
return render(request, 'books/index.html', context)
def create(request):
book = Books(book_title=request.POST['book_title'], writer=request.POST['writer'],
synopsis=request.POST['synopsis'], publisher=request.POST['publisher'],
publish_date=request.POST['publish_date'])
book.save()
return redirect('/')
def add_book(request):
# members = Member.objects.all()
# context = {'members': members}
return render(request, 'books/add_book.html', {})
def edit(request, id):
members = Member.objects.get(id=id)
context = {'members': members}
return render(request, 'books/edit.html', context)
def update(request, id):
member = Member.objects.get(id=id)
member.firstname = request.POST['firstname']
member.lastname = request.POST['lastname']
member.save()
return redirect('/books/')
def delete(request, id):
member = Member.objects.get(id=id)
member.delete()
return redirect('/books/') | {"/web/books/views.py": ["/web/books/models.py"]} |
43,411 | darrylma/FSND_Movie_Website | refs/heads/master | /entertainment_center.py | import media
import fresh_tomatoes
import urllib
import json
#Define movie title array and initialize arrays for movie details
movie_titles = ["The Dark Knight","Shutter Island","Predestination","Inside Out","Gattaca","Memento"]
storylines = [None] * len(movie_titles)
posters = [None] * len(movie_titles)
release_dates = [None] * len(movie_titles)
imdb_ratings = [None] * len(movie_titles)
#Establishes connection to movie database to retrieve movie information and stores information into arrays
for position, movie_title in enumerate(movie_titles):
connection = urllib.urlopen("http://www.omdbapi.com/?t=" + movie_title)
output = connection.read()
connection.close()
data = json.loads(output)
storylines[position] = data["Plot"]
posters[position] = data["Poster"]
release_dates[position] = data["Released"]
imdb_ratings[position] = data["imdbRating"]
#Define Movie object informtion
i=0
the_dark_knight = media.Movie(movie_titles[i],
storylines[i],
posters[i],
"https://www.youtube.com/watch?v=EXeTwQWrcwY",
release_dates[i],
imdb_ratings[i])
i+=1
shutter_island = media.Movie(movie_titles[i],
storylines[i],
posters[i],
"https://www.youtube.com/watch?v=5iaYLCiq5RM",
release_dates[i],
imdb_ratings[i])
i+=1
predestination = media.Movie(movie_titles[i],
storylines[i],
posters[i],
"https://www.youtube.com/watch?v=jcQacCfi_pw",
release_dates[i],
imdb_ratings[i])
i+=1
inside_out = media.Movie(movie_titles[i],
storylines[i],
posters[i],
"https://www.youtube.com/watch?v=seMwpP0yeu4",
release_dates[i],
imdb_ratings[i])
i+=1
gattaca = media.Movie(movie_titles[i],
storylines[i],
posters[i],
"https://www.youtube.com/watch?v=PC6ZA1dFkVk",
release_dates[i],
imdb_ratings[i])
i+=1
memento = media.Movie(movie_titles[i],
storylines[i],
posters[i],
"https://www.youtube.com/watch?v=nHozKtsvag0",
release_dates[i],
imdb_ratings[i])
movies = [the_dark_knight, shutter_island, predestination, inside_out, gattaca, memento]
fresh_tomatoes.open_movies_page(movies)
| {"/entertainment_center.py": ["/media.py"]} |
43,412 | darrylma/FSND_Movie_Website | refs/heads/master | /media.py | import webbrowser
class Movie():
""" This class provides a way to store movie related information """
VALID_RATINGS = ["G","PG","PG-13","R"]
#Defines what information to be stored for each movie object
def __init__(self, movie_title, movie_storyline, movie_poster_image_url, movie_trailer_youtube_url, movie_release_date, movie_imdb_rating):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = movie_poster_image_url
self.trailer_youtube_url = movie_trailer_youtube_url
self.release_date = movie_release_date
self.imdb_rating = movie_imdb_rating
# def show_trailer(self):
# webbrowser.open(self.trailer_youtube_url)
| {"/entertainment_center.py": ["/media.py"]} |
43,417 | tomorrownow/PyFCM | refs/heads/master | /fcm/Aggregation_FCMs/Aggregation_FCMs.py | # # -*- coding: utf-8 -*-
# """
# Created on Sat Mar 31 16:04:39 2018
# @author: Payam Aminpour
# Michigan State University
# aminpour@msu.edu
# """
# # In[1]:
# import __init__ as init
# import matplotlib.pyplot as plt
# plt.rcdefaults()
# import matplotlib.pyplot as plt
# import xlrd
# import numpy as np
# import networkx as nx
# # In[2]:
# file_location = init.file_location
# workbook = xlrd.open_workbook(file_location)
# sheet = workbook.sheet_by_index(0)
# n_concepts = sheet.nrows - 1
# # In[3]:
# # Agregating FCMs
# adj = np.zeros((n_concepts, n_concepts))
# count = np.zeros((n_concepts, n_concepts))
# adj_ag = np.zeros((n_concepts, n_concepts))
# All_ADJs = []
# p = 0
# for i in range(0, workbook.nsheets):
# p += 1
# sheet = workbook.sheet_by_index(i)
# Adj_matrix = np.zeros((n_concepts, n_concepts))
# for i in range(1, n_concepts + 1):
# for j in range(1, n_concepts + 1):
# Adj_matrix[i - 1, j - 1] = sheet.cell_value(i, j)
# if sheet.cell_value(i, j) != 0:
# count[i - 1, j - 1] += 1
# All_ADJs.append(Adj_matrix)
# adj += Adj_matrix
# adj_copy = np.copy(adj)
# if init.Aggregation_technique == "AMX":
# for i in range(n_concepts):
# for j in range(n_concepts):
# if count[i, j] == 0:
# adj_ag[i, j] = 0
# else:
# adj_ag[i, j] = adj_copy[i, j] / count[i, j]
# if init.Aggregation_technique == "AMI":
# from statistics import mean as mean
# for i in range(n_concepts):
# for j in range(n_concepts):
# a = [ind[i, j] for ind in All_ADJs]
# adj_ag[i, j] = mean(a)
# if init.Aggregation_technique == "MED":
# from statistics import median as med
# for i in range(n_concepts):
# for j in range(n_concepts):
# a = [ind[i, j] for ind in All_ADJs]
# adj_ag[i, j] = med(a)
# if init.Aggregation_technique == "GM":
# import scipy
# for i in range(n_concepts):
# for j in range(n_concepts):
# a = [ind[i, j] for ind in All_ADJs if ind[i, j] != 0]
# adj_ag[i, j] = float(scipy.stats.mstats.gmean(np.array(a)))
# Adj_aggregated_FCM = adj_ag
# # In[4]:
# G = nx.DiGraph(Adj_aggregated_FCM)
# plt.figure(figsize=(10, 10))
# everylarge = [(u, v) for (u, v, d) in G.edges(data=True) if abs(d["weight"]) >= 0.75]
# elarge = [
# (u, v)
# for (u, v, d) in G.edges(data=True)
# if abs(d["weight"]) > 0.5 and abs(d["weight"]) < 0.75
# ]
# esmall = [
# (u, v)
# for (u, v, d) in G.edges(data=True)
# if abs(d["weight"]) <= 0.5 and abs(d["weight"]) > 0.25
# ]
# everysmall = [(u, v) for (u, v, d) in G.edges(data=True) if abs(d["weight"]) <= 0.25]
# #################Centrality#####################################################################
# label = {}
# for nod in G.nodes():
# label[nod] = sheet.cell_value(nod + 1, 0)
# # pos = nx.random_layout(G)
# pos = nx.spring_layout(G, dim=2, k=0.75)
# #########################Visualization##############################################################
# nx.draw_networkx(
# G, pos, labels=label, font_size=7, node_size=200, node_color="lightgreen", alpha=0.6
# )
# nx.draw_networkx_edges(
# G, pos, edgelist=everylarge, width=2, alpha=0.5, edge_color="gold"
# )
# nx.draw_networkx_edges(
# G, pos, edgelist=elarge, width=1, alpha=0.5, edge_color="g", style="dashed"
# )
# nx.draw_networkx_edges(
# G,
# pos,
# edgelist=esmall,
# width=0.5,
# alpha=0.5,
# edge_color="lightcoral",
# style="dashed",
# )
# nx.draw_networkx_edges(
# G,
# pos,
# edgelist=everysmall,
# width=0.25,
# alpha=0.5,
# edge_color="lightgray",
# style="dashed",
# )
# plt.show()
# #######################################################################################################
# nx.write_edgelist(G, "aggregated_edg.csv")
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,418 | tomorrownow/PyFCM | refs/heads/master | /fcm/Clustering_FCMs/__init__.py | # # -*- coding: utf-8 -*-
# """
# Created on Sun Aug 5 15:19:59 2018
# @author: Payam Aminpour
# """
# name = "FCM_Scenario_Analysis"
# print(
# "\n",
# "The file location is the path of your project file in your computer.",
# "\n",
# "For Example: C:/Paym Computer/Safety Project/All_Adjacency_matrix.xlsx",
# "\n",
# "This file should be an excel file with .xlsx extention",
# "\n",
# "Please see the AllParticipants_Adjacency_Matrix_Example file to check how your matrix should look like",
# )
# print("\n")
# file_location = input("copy your project file path here: ")
# print("\n")
# print(
# """There are several ways to generate Reference_FCM
# # FCM_Reference is the average of all FCMs (including zeros) -> Type: AI
# # FCM_Reference is the average of all FCMs (excluding zeros) -> Type: AX
# # FCM_Reference is a n*n zeros matrix -> Type: Z
# # FCM_Reference is a n*n ones matrix -> Type: O
# """
# )
# Aggregation_technique = input("what is the method to generate Reference FCM? ")
# clustering_method = input(
# "what is the clusterign criterion? Structure:S, Dynamics:D -> "
# )
# print("\n")
# n_clusters = int(input("Hom Mnay Clusters? "))
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,419 | tomorrownow/PyFCM | refs/heads/master | /tests/test_scenario.py | from fcm.load import load_csv
from fcm.analysis import scenario
def test_scenario_tanh_k_one_variable(shared_datadir):
df = load_csv(shared_datadir / "test_adjacency_matrix.csv")
result = scenario.scenario_analysis(
data=df.values,
columns=df.columns,
scenarios={"c1": 1},
noise_threshold=0.0,
lambda_thres=1,
principles=[],
f_type="tanh",
infer_rule="k",
)
assert result == {
"c1": 0.0,
"c2": 0.8771805720335079,
"c3": 0.7615984906926053,
"c4": 0.0,
"c5": -0.36340194800116987,
}
def test_scenario_sig_k_variable(shared_datadir):
df = load_csv(shared_datadir / "test_adjacency_matrix.csv")
result = scenario.scenario_analysis(
data=df.values,
columns=df.columns,
scenarios={"c1": 1},
noise_threshold=0.0,
lambda_thres=1,
principles=[],
f_type="sig",
infer_rule="k",
)
print(result)
assert result == {
"c1": 0.0,
"c2": 0.09222567649793934,
"c3": 0.07798569160599089,
"c4": 0.0,
"c5": -0.00969006709092668,
}
def test_scenario_triv_mk_variable(shared_datadir):
df = load_csv(shared_datadir / "test_adjacency_matrix.csv")
result = scenario.scenario_analysis(
data=df.values,
columns=df.columns,
scenarios={"c1": 1},
noise_threshold=0.0,
lambda_thres=1,
principles=[],
f_type="triv",
infer_rule="mk",
)
print(result)
assert result == {"c1": 0.0, "c2": 0.0, "c3": 0.0, "c4": 0.0, "c5": 0.0}
def test_scenario_biv_r_variable(shared_datadir):
df = load_csv(shared_datadir / "test_adjacency_matrix.csv")
result = scenario.scenario_analysis(
data=df.values,
columns=df.columns,
scenarios={"c1": 1},
noise_threshold=0.0,
lambda_thres=1,
principles=[],
f_type="triv",
infer_rule="r",
)
print(result)
assert result == {"c1": 0.0, "c2": 0.0, "c3": 0.0, "c4": 0.0, "c5": 0.0}
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,420 | tomorrownow/PyFCM | refs/heads/master | /tests/test_analysis.py | import fcm
from fcm.analysis.tools import (
_infer_rule,
InferenceRule,
reduce_noise,
_transform,
SquashingFucntion,
)
import numpy as np
import pytest
# Inference Rule Tests
def test_infer_rule_kosko(datadir):
concepts = ["c1", "c2", "c3"]
adj_matrices = np.array([[1.0, -1.0, 0.0], [0.5, 1.0, 0.0], [1.0, 1.0, -0.5]])
expected_result = np.array([2.5, 1.0, -0.5])
n_concepts = len(concepts)
activation_vec = np.ones(n_concepts)
result = _infer_rule(
n_concepts, activation_vec, adj_matrices.T, InferenceRule.K.value
)
print(result)
assert np.array_equal(result, expected_result)
def test_infer_rule_modified_kosko(datadir):
concepts = ["c1", "c2", "c3"]
adj_matrices = np.array([[1.0, -1.0, 0.0], [0.5, 1.0, 0.0], [1.0, 1.0, -0.5]])
expected_result = np.array([3.5, 2.0, 0.5])
n_concepts = len(concepts)
activation_vec = np.ones(n_concepts)
result = _infer_rule(
n_concepts, activation_vec, adj_matrices.T, InferenceRule.MK.value
)
print(result)
assert np.array_equal(result, expected_result)
def test_infer_rule_rescaled_kosko(datadir):
concepts = ["c1", "c2", "c3"]
adj_matrices = np.array([[1.0, -1.0, 0.0], [0.5, 1.0, 0.0], [1.0, 1.0, -0.5]])
expected_result = np.array([3.5, 2.0, 0.5])
n_concepts = len(concepts)
activation_vec = np.ones(n_concepts)
result = _infer_rule(
n_concepts, activation_vec, adj_matrices.T, InferenceRule.R.value
)
print(result)
assert np.array_equal(result, expected_result)
def test_infer_rule_failure(datadir):
with pytest.raises(ValueError):
concepts = ["c1", "c2", "c3"]
adj_matrices = np.array([[1.0, -1.0, 0.0], [0.5, 1.0, 0.0], [1.0, 1.0, -0.5]])
n_concepts = len(concepts)
activation_vec = np.ones(n_concepts)
_infer_rule(n_concepts, activation_vec, adj_matrices.T, "dls")
# Transform Function Tests
def test_transform_sig():
concepts = ["c1", "c2", "c3"]
input_vector = np.array([3.5, 2.0, 0.5])
n_concepts = len(concepts)
expected_result = np.array([0.5, 0.5, 0.5])
result = _transform(
act_vect=input_vector, n=n_concepts, f_type=SquashingFucntion.SIG.value, landa=0
)
print(result)
assert np.array_equal(result, expected_result)
def test_transform_tanh():
concepts = ["c1", "c2", "c3"]
input_vector = np.array([3.5, 2.0, 0.5])
n_concepts = len(concepts)
expected_result = np.array([0.0, 0.0, 0.0])
result = _transform(
act_vect=input_vector,
n=n_concepts,
f_type=SquashingFucntion.TANH.value,
landa=0,
)
print(result)
assert np.array_equal(result, expected_result)
def test_transform_biv():
concepts = ["c1", "c2", "c3"]
input_vector = np.array([3.5, 2.0, 0.5])
n_concepts = len(concepts)
expected_result = np.array([1.0, 1.0, 1.0])
result = _transform(
act_vect=input_vector, n=n_concepts, f_type=SquashingFucntion.BIV.value, landa=0
)
print(result)
assert np.array_equal(result, expected_result)
def test_transform_triv():
concepts = ["c1", "c2", "c3"]
input_vector = np.array([3.5, 2.0, 0.5])
n_concepts = len(concepts)
expected_result = np.array([1.0, 1.0, 1.0])
result = _transform(
act_vect=input_vector,
n=n_concepts,
f_type=SquashingFucntion.TRIV.value,
landa=0,
)
print(result)
assert np.array_equal(result, expected_result)
# Reduce Noise Tests
def test_reduce_noise(datadir):
concepts = ["c1", "c2", "c3"]
adj_matrices = np.array([[1.0, -1.0, 0.0], [0.5, 1.0, 0.0], [1.0, 1.0, -0.5]])
n_concepts = len(concepts)
expected_result = np.array([[1.0, -1.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, -0.0]])
result = reduce_noise(adj_matrices, n_concepts, 0.5)
assert np.array_equal(result, expected_result)
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,421 | tomorrownow/PyFCM | refs/heads/master | /fcm/Aggregation_FCMs/__init__.py | # # -*- coding: utf-8 -*-
# """
# Created on Sun Aug 5 15:19:59 2018
# @author: Payam Aminpour
# """
# name = "FCM_Scenario_Analysis"
# print(
# "\n",
# "The file location is the path of your project file in your computer.",
# "\n",
# "For Example: C:/Paym Computer/Safety Project/All_Adjacency_matrix.xlsx",
# "\n",
# "This file should be an excel file with .xlsx extention",
# "\n",
# "Please see the AllParticipants_Adjacency_Matrix_Example file to check how your matrix should look like",
# )
# print("\n")
# file_location = input("copy your project file path here: ")
# print("\n")
# print(
# """There are several ways to generate Reference_FCM
# # Arithmatic Mean of all FCMs (Including edges with weight = 0) --> Type: AMI
# # Arithmatic Mean of all FCMs (Excluding edges with weight = 0) --> Type: AMX
# # Median of all FCMs --> Type: MED
# # Geometric Mean of all FCMs --> Type: GM
# # Weighted Mean of all FCMs --> Type: WM
# """
# )
# print("\n")
# Aggregation_technique = input("what is the method to aggregate all FCMs? ")
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,422 | tomorrownow/PyFCM | refs/heads/master | /fcm/Uncertainty_Analysis/__init__.py | # # -*- coding: utf-8 -*-
# """
# Created on Sun Aug 5 15:19:59 2018
# @author: Payam Aminpour
# """
# name = "FCM_Scenario_Analysis"
# print(
# "\n",
# "The file location is the path of your project file in your computer.",
# "\n",
# "For Example: C:/Paym Computer/Safety Project/Adjacency_matrix.xlsx",
# "\n",
# "This file should be an excel file with .xlsx extention",
# "\n",
# "Please see the Adjacency_Matrix_Example file to check how your matrix should look like",
# )
# print("\n")
# file_location = input("copy your project file path here: ")
# print(
# "\n",
# """Sometimes you need to remove the links with significantly low weights to avoid messiness.
# Noise_Threshold is a number in [0,1] which defines a boundary below which all links will be removed from the FCM.
# E.g. Noise_Threshold = 0.15 means that all edges with weight <= 0.15 will be removed from FCM. """,
# )
# print("\n")
# Noise_Threshold = float(input("What is the Noise_Threshold: "))
# print(
# "\n",
# """ Every concept in the FCM graph has a value Ai that expresses the quantity of
# its corresponding physical value and it is derived by the transformation of
# the fuzzy values assigned by who developed the FCM to numerical values.
# The value Ai of each concept Ci is calculated during each simulation step,
# computing the influence of other concepts to the specific concept by selecting one of the
# following equations (inference rules):
# k = Kasko
# mk = Modified Kasko
# r = Rescaled Kasko """,
# )
# print("\n")
# infer_rule = input("What is the Inference Rule (k , mk , r)? ")
# print(
# "\n",
# "There are several squashing function:",
# "\n",
# "\n",
# "Bivalent: 'biv'",
# "\n",
# "Trivalent: 'triv'",
# "\n",
# "Sigmoid: 'sig'",
# "\n",
# "Hyperbolic tangent: 'tanh'",
# )
# print("\n")
# function_type = input("What is the type of Squashing function? ")
# print("\n")
# Lambda = float(
# input(
# "What is the parameter lambda in Squashing function? choose a number between (0,10) "
# )
# )
# print(
# "\n",
# """ In each FCM you have some variables which are more important and
# considered to be the main principles of the system. For example, in one FCM my
# main variables are "water pollution" and "CO2 emission". These are the system
# indicators. By defining these principles you would be able to build an additional list
# for keeping track of changes in only these principles not all of the concepts. The only
# thing you need to do is to put their name one by one. you can add as
# many principles as you want """,
# )
# n_princ = int(input("How many Principles? "))
# Principles = []
# for i in range(n_princ):
# Principles.append(input("The name of Principle {} = ".format(i + 1)))
# print("\n")
# Thresh = int(
# input(
# "what is the Maximum Indegree for a Concept to be in list of possible nodes to be activated? "
# )
# )
# print("\n")
# n_iteration = int(input("How many iterations? "))
# print("\n", " Filter the ploting ")
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,423 | tomorrownow/PyFCM | refs/heads/master | /fcm/Uncertainty_Analysis/Uncertainty_Analysis.py | # # -*- coding: utf-8 -*-
# """
# Created on Sat Mar 31 16:04:39 2018
# @author: Payam Aminpour
# Michigan State University
# aminpour@msu.edu
# """
# # In[1]:
# import __init__ as init
# import matplotlib.pyplot as plt
# plt.rcdefaults()
# import matplotlib.pyplot as plt
# import random
# import xlrd
# import pandas as pd
# import numpy as np
# import math
# import networkx as nx
# # In[2]:
# file_location = init.file_location
# workbook = xlrd.open_workbook(file_location)
# sheet = workbook.sheet_by_index(0)
# n_concepts = sheet.nrows - 1
# Adj_matrix = np.zeros((n_concepts, n_concepts))
# activation_vec = np.ones(n_concepts)
# node_name = {}
# # In[3]:
# Noise_Threshold = 0
# for i in range(1, n_concepts + 1):
# for j in range(1, n_concepts + 1):
# if abs(sheet.cell_value(i, j)) <= Noise_Threshold:
# Adj_matrix[i - 1, j - 1] = 0
# else:
# Adj_matrix[i - 1, j - 1] = sheet.cell_value(i, j)
# # In[4]:
# Concepts_matrix = []
# for i in range(1, n_concepts + 1):
# Concepts_matrix.append(sheet.cell_value(0, i))
# # In[5]:
# G = nx.DiGraph(Adj_matrix)
# # In[6]:
# for nod in G.nodes():
# node_name[nod] = sheet.cell_value(nod + 1, 0)
# # In[7]:
# def transform(x, n, f_type, landa=init.Lambda):
# if f_type == "sig":
# x_new = np.zeros(n)
# for i in range(n):
# x_new[i] = 1 / (1 + math.exp(-landa * x[i]))
# return x_new
# if f_type == "tanh":
# x_new = np.zeros(n)
# for i in range(n):
# x_new[i] = math.tanh(landa * x[i])
# return x_new
# if f_type == "bivalent":
# x_new = np.zeros(n)
# for i in range(n):
# if x[i] > 0:
# x_new[i] = 1
# else:
# x_new[i] = 0
# return x_new
# if f_type == "trivalent":
# x_new = np.zeros(n)
# for i in range(n):
# if x[i] > 0:
# x_new[i] = 1
# elif x[i] == 0:
# x_new[i] = 0
# else:
# x_new[i] = -1
# return x_new
# # In[8]:
# def infer_steady(
# init_vec=activation_vec,
# AdjmT=Adj_matrix.T,
# n=n_concepts,
# f_type="sig",
# infer_rule="mk",
# ):
# act_vec_old = init_vec
# resid = 1
# while resid > 0.00001:
# x = np.zeros(n)
# if infer_rule == "k":
# x = np.matmul(AdjmT, act_vec_old)
# if infer_rule == "mk":
# x = act_vec_old + np.matmul(AdjmT, act_vec_old)
# if infer_rule == "r":
# x = (2 * act_vec_old - np.ones(n)) + np.matmul(
# AdjmT, (2 * act_vec_old - np.ones(n))
# )
# act_vec_new = transform(x, n, f_type)
# resid = max(abs(act_vec_new - act_vec_old))
# if resid < 0.00001:
# break
# act_vec_old = act_vec_new
# return act_vec_new
# # In[9]:
# def infer_scenario(
# Scenario_concepts,
# zeros,
# init_vec=activation_vec,
# AdjmT=Adj_matrix.T,
# n=n_concepts,
# f_type="sig",
# infer_rule="mk",
# ):
# act_vec_old = init_vec
# my_random = {}
# for rC in Scenario_concepts:
# PN = random.choice([-1, 1])
# my_random[rC] = random.random() * PN
# resid = 1
# while resid > 0.00001:
# act_vec_new = np.zeros(n)
# x = np.zeros(n)
# if infer_rule == "k":
# x = np.matmul(AdjmT, act_vec_old)
# if infer_rule == "mk":
# x = act_vec_old + np.matmul(AdjmT, act_vec_old)
# if infer_rule == "r":
# x = (2 * act_vec_old - np.ones(n)) + np.matmul(
# AdjmT, (2 * act_vec_old - np.ones(n))
# )
# act_vec_new = transform(x, n, f_type)
# for z in zeros:
# act_vec_new[z] = 0
# for c in Scenario_concepts:
# act_vec_new[c] = my_random[c]
# resid = max(abs(act_vec_new - act_vec_old))
# # if resid < 0.0001:
# # break
# act_vec_old = act_vec_new
# return act_vec_new
# # In[10]:
# def combinations(iterable, r):
# # combinations('ABCD', 2) --> AB AC AD BC BD CD
# # combinations(range(4), 3) --> 012 013 023 123
# pool = tuple(iterable)
# n = len(pool)
# if r > n:
# return
# indices = list(range(r))
# yield tuple(pool[i] for i in indices)
# while True:
# for i in reversed(range(r)):
# if indices[i] != i + n - r:
# break
# else:
# return
# indices[i] += 1
# for j in range(i + 1, r):
# indices[j] = indices[j - 1] + 1
# yield tuple(pool[i] for i in indices)
# # In[11]:
# Principles = init.Principles
# prin_concepts_index = []
# for nod in node_name.keys():
# if node_name[nod] in Principles:
# prin_concepts_index.append(nod)
# listPossibleNodes = []
# for nod in G.nodes():
# if (
# G.in_degree(nbunch=None, weight=None)[nod] <= init.Thresh
# and Concepts_matrix[nod] not in Principles
# ):
# listPossibleNodes.append(nod)
# # In[13]:
# function_type = init.function_type
# infer_rule = init.infer_rule
# SteadyState = infer_steady(f_type=function_type, infer_rule=infer_rule)
# change_in_principles = {}
# for pr in prin_concepts_index:
# change_in_principles[pr] = []
# iteration = 0
# for iter in range(
# init.n_iteration
# ): # You can increas the number of times you repeat the random process of input vector generation
# rand = random.randint(1, len(listPossibleNodes))
# com = random.sample(listPossibleNodes, rand)
# iteration += 1
# Scenario_concepts = com
# ScenarioState = infer_scenario(
# Scenario_concepts,
# listPossibleNodes,
# f_type=function_type,
# infer_rule=infer_rule,
# )
# changes = ScenarioState - SteadyState
# for pr in prin_concepts_index:
# change_in_principles[pr].append(changes[pr])
# iteration
# # In[ ]:
# df = pd.DataFrame()
# df["IDS"] = list(range(iteration))
# for pr in prin_concepts_index:
# df[node_name[pr]] = change_in_principles[pr]
# # In[ ]:
# from math import pi
# # number of variable
# categories = list(df)[1:]
# N = len(categories)
# # We are going to plot the first line of the data frame.
# # But we need to repeat the first value to close the circular graph:
# plt.figure(figsize=(10, 10))
# # What will be the angle of each axis in the plot? (we divide the plot / number of variable)
# angles = [n / float(N) * 2 * pi for n in range(N)]
# angles += angles[:1]
# # Initialise the spider plot
# ax = plt.subplot(111, polar=True)
# # Draw one axe per variable + add labels labels yet
# plt.xticks(angles[:-1], categories, color="black", size=9)
# # Draw ylabels
# ax.set_rlabel_position(0)
# plt.yticks([-1, -0.5, 0, 0.5, 1], ["-1", "-0.5", "0", "0.5", "1"], color="red", size=10)
# # plt.ylim(-1,1)
# for i in range(int(iteration / 10)):
# values = df.loc[i * 10].drop("IDS").values.flatten().tolist()
# values += values[:1]
# # Plot data
# ax.plot(angles, values, linewidth=0.1, color="black", alpha=0.1, linestyle="-")
# # Fill area
# # ax.fill(angles, values, 'b', alpha=0.1)
# plt.savefig("Uncertainty_Analysis_Results.pdf")
# plt.show()
# # In[ ]:
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,424 | tomorrownow/PyFCM | refs/heads/master | /fcm/load.py | # import os
import pandas as pd
# import seaborn as sns
# class FuzzyCognitiveModel:
# def __init__(self, name, fcm_data, concept_map=None):
# self.name = name
# self.group = group
# self.data = fcm_data
# self.concept_map = concept_map
# class SocialCognitiveModel:
# def __init__(self, name, fcm_list):
# self.name = name
# def accumualtion_curve(df, ax=None, new_variables=False, n=500):
# assert False, "TODO: finish me"
# df_graph_stats_data = []
# cmap = sns.diverging_palette(150, 275, s=80, l=55, n=9, as_cmap=True)
# All_ADJs = []
# all_data_frames = pd.DataFrame(
# columns=df_concepts["code"].unique(), index=df_concepts["code"].unique()
# ).fillna(0)
# for root, dirs, files in os.walk(data_location, topdown=False):
# for name in files:
# if "allFCMs" not in name and name != ".DS_Store":
# file_location = os.path.join(root, name)
# participant_organization = name.split("_")[-1].split(".")[0]
# participant_number = name.split("_")[0]
# df = pd.read_excel(file_location, index_col=0).fillna(0)
# df.columns = df.columns.map(concept_map)
# df.index = df.index.map(concept_map)
# print(
# "FCMs",
# "%sFCM - %s - %d" % (name, participant_organization, len(All_ADJs)),
# )
# print(all_data_frames.columns)
# take_not_zero = lambda s1, s2: s1 if s1.sum() != 0 else s2
# df_copy = all_data_frames.combine(
# df, take_not_zero, fill_value=0, overwrite=True
# )
# All_ADJs.append(
# df_copy.loc[all_data_frames.columns, all_data_frames.columns].values
# )
# fig, (ax, ax1) = plt.subplots(1, 2, figsize=(20, 10))
# plt.suptitle(
# "FCM - %s %s" % (participant_organization, participant_number),
# fontsize=14,
# )
# ax.set_title("Adjacency Matrix", fontsize=12)
# sns.heatmap(df, annot=True, linewidths=0.5, ax=ax, center=0, cmap=cmap)
# graph_stats = generate_map(df.values, df.columns, ax1)
# ax1.set_title("Fuzzy Cognitive Map", fontsize=12)
# graph_stats["type"] = participant_organization
# plt.tight_layout()
# save_path = os.path.join(
# save_location,
# "FCMs",
# "FCM - %s - %s" % (participant_organization, participant_number),
# )
# plt.savefig(save_path)
# df_graph_stats_data.append(graph_stats)
# df_graph_stats = pd.DataFrame(df_graph_stats_data)
def load_csv(file_path, concept_map=None):
"""
Loads a csv file as a fuzzy cognitive map.
Parameters
----------
file_path : str
The file path to the csv location.
concept_map : dict
A mapping from user defined variables to a standardized set of variables.
Returns
-------
FCM : DataFrame
"""
df = pd.read_csv(file_path, index_col=0).fillna(0)
if concept_map:
df.columns = df.columns.map(concept_map)
df.index = df.index.map(concept_map)
return df
def load_xlsx(file_path, concept_map=None):
"""
Loads a xlsx file as a fuzzy cognitive map.
Parameters
----------
file_path : str
The file path to the csv location.
concept_map : dict
A mapping from user defined variables to a standardized set of variables.
Returns
-------
FCM : DataFrame
"""
df = pd.read_excel(file_path, index_col=0).fillna(0)
if concept_map:
df.columns = df.columns.map(concept_map)
df.index = df.index.map(concept_map)
return df
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,425 | tomorrownow/PyFCM | refs/heads/master | /pyfcm/analysis/scenario.py | """
Created on Mon May 03 9:12:22 2021
@author: Corey White
North Carolina State University
ctwhite@ncsu.edu
"""
import matplotlib.pyplot as plt
# import xlrd
import numpy as np
import math
import networkx as nx
from fcm.analysis.tools import infer_steady, infer_scenario, reduce_noise
def scenario_analysis(
data,
columns,
scenarios,
noise_threshold=0,
lambda_thres=0,
principles=None,
f_type="tanh",
infer_rule="mk",
):
"""
Run FMC scenario by asking 'what if' questions
Parameters
----------
data: numpy.ndarray
Adjacency matrix of the fuzzy congintive model.
columns: pandas.core.indexes.base.Index
List of columns that matches the order of the adjacency matrix.
noise_threshold: float
Sometimes you need to remove the links with significantly low weights to avoid messiness.
Noise_Threshold is a number in [0,1] which defines a boundary below which all links will be removed from the FCM.
E.g. Noise_Threshold = 0.15 means that all edges with weight <= 0.15 will be removed from FCM. (default is 0)
lambda_thres : int (optional)
The lambda threshold value used in the squashing fuciton between 0 - 10. (default is 0)
principles : List
In each FCM you have some variables which are more important and
considered to be the main principles of the system. For example, in one FCM my
main variables are "water pollution" and "CO2 emission". These are the system
indicators. By defining these principles you would be able to build an additional list
for keeping track of changes in only these principles not all of the concepts. (default is None)
scenarios: Dict
Dictionary of which variables you want to activate during the scenario using the concept as the key and activation level as the value.
{Variable: Activation Level [-1,1]} for example {'c1': 1} or {'c1': -1}
f_type : str (optional)
Sigmoid = "sig", Hyperbolic Tangent = "tanh", Bivalent = "biv", Trivalent = "triv" (default is sig)
infer_rule : str (optional)
Kasko = "k", Modified Kasko = "mk", Rescaled Kasko = "r" (default is mk)
Returns
-------
Activation Vector : numpy.ndarray
"""
n_concepts = len(columns)
adjmatrix = reduce_noise(data, n_concepts, noise_threshold)
activation_vec = np.ones(n_concepts)
concepts_matrix = []
for i in range(0, n_concepts):
concepts_matrix.append(columns.values[i])
G = nx.DiGraph(data)
# label nodes with variable names
node_name = {}
for nod in G.nodes():
node_name[nod] = columns[nod]
prin_concepts_index = []
for nod in node_name.keys():
if node_name[nod] in principles:
prin_concepts_index.append(nod)
# Generate a list of indexes for varibales being ran in the scenario that match their location in the adjacency matrix
change_level_by_index = {
concepts_matrix.index(concept): value for concept, value in scenarios.items()
}
scenario_concepts = list(change_level_by_index.keys())
steady_state = infer_steady(
init_vec=activation_vec,
adjmatrix=adjmatrix.T,
n=n_concepts,
landa=lambda_thres,
f_type=f_type,
infer_rule=infer_rule,
)
scenario_state = infer_scenario(
scenario_concept=scenario_concepts,
change_level=change_level_by_index,
f_type=f_type,
infer_rule=infer_rule,
init_vec=activation_vec,
adjmatrix=adjmatrix.T,
n=n_concepts,
landa=lambda_thres,
)
# Records changes to the priciple concepts
change_in_principles = []
change_in_all = scenario_state - steady_state
for c in scenario_concepts:
change_in_all[c] = 0
for i in range(len(prin_concepts_index)):
change_in_principles.append(change_in_all[prin_concepts_index[i]])
changes_dic = {}
for nod in G.nodes():
changes_dic[node_name[nod]] = change_in_all[nod]
return changes_dic
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,426 | tomorrownow/PyFCM | refs/heads/master | /fcm/analysis/sensitvity.py | """
Created on Fri Apr 30 15:51:12 2021
@author: Corey White
North Carolina State University
ctwhite@ncsu.edu
"""
import math
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from fcm.analysis import infer_steady, infer_scenario, reduce_noise
def sensitivity_analysis(
data,
columns,
noise_threshold=0,
lambda_thres=0,
principles=None,
list_of_consepts_to_run=None,
f_type="sig",
infer_rule="mk",
):
"""
Run FMC Sensitivity Analysis
Parameters
----------
data: numpy.ndarray
Adjacency matrix of the fuzzy congintive model.
columns: pandas.core.indexes.base.Index
List of columns that matches the order of the adjacency matrix.
noise_threshold: float
(Not Currently Implemented)
Sometimes you need to remove the links with significantly low weights to avoid messiness.
Noise_Threshold is a number in [0,1] which defines a boundary below which all links will be removed from the FCM.
E.g. Noise_Threshold = 0.15 means that all edges with weight <= 0.15 will be removed from FCM. (default is 0)
lambda_thres : int (optional)
The lambda threshold value used in the squashing fuciton between 0 - 10. (default is 0)
principles : List (optional)
In each FCM you have some variables which are more important and
considered to be the main principles of the system. For example, in one FCM my
main variables are "water pollution" and "CO2 emission". These are the system
indicators. By defining these principles you would be able to build an additional list
for keeping track of changes in only these principles not all of the concepts. (default is None)
list_of_consepts_to_run : List (optional)
The concepts getting activated during the analysis (default is None).
f_type : str (optional)
Sigmoid = "sig", Hyperbolic Tangent = "tanh", Bivalent = "biv", Trivalent = "triv" (default is sig)
infer_rule : str (optional)
Kasko = "k", Modified Kasko = "mk", Rescaled Kasko = "r" (default is mk)
Returns
-------
Activation Vector : numpy.ndarray
"""
# ax = ax or plt.gca()
n_concepts = len(columns)
adj_matrix = reduce_noise(data, n_concepts, noise_threshold)
activation_vec = np.ones(n_concepts)
concepts_matrix = []
for i in range(1, n_concepts):
concepts_matrix.append(columns.values[i])
G = nx.DiGraph(data)
# label nodes with variable names
node_name = {}
for nod in G.nodes():
node_name[nod] = columns[nod]
G = nx.relabel_nodes(G, node_name)
prin_concepts_index = []
for nod in node_name.keys():
if node_name[nod] in principles:
prin_concepts_index.append(nod)
steady_state = infer_steady(
init_vec=activation_vec,
adjmatrix=adj_matrix.T,
n=n_concepts,
landa=lambda_thres,
f_type=f_type,
infer_rule=infer_rule,
)
# Scenario
for name in list_of_consepts_to_run:
# Scenario component name
sce_con_name = name
scenario_concept = concepts_matrix.index(sce_con_name)
change_levels = np.linspace(0, 1, 21)
change_in_principles = {}
for pr in prin_concepts_index:
change_in_principles[pr] = []
for c in change_levels:
scenario_state = infer_scenario(
scenario_concept=scenario_concept,
init_vec=activation_vec,
adjmatrix=adj_matrix.T,
n=n_concepts,
landa=lambda_thres,
f_type=f_type,
infer_rule=infer_rule,
change_level=c,
)
changes = scenario_state - steady_state
for pr in prin_concepts_index:
change_in_principles[pr].append(changes[pr])
fig = plt
fig.clf() # Clear figure
for pr in prin_concepts_index:
fig.plot(
change_levels,
change_in_principles[pr],
"-o",
markersize=3,
label=node_name[pr],
)
fig.legend(fontsize=8)
plt.xlabel("activation state of {}".format(sce_con_name))
plt.ylabel("State of system principles")
fig.savefig("{}.png".format(sce_con_name))
plt.show()
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,427 | tomorrownow/PyFCM | refs/heads/master | /setup.py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 30 11:55:30 2021
@author: Corey White
"""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="FCM",
version="1.0.0",
author="Corey White",
author_email="ctwhite@ncsu.edu",
description="A package for FCM scenario analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/tomorrownow/PyFCM",
packages=setuptools.find_packages(exclude=["tests*"]),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
),
)
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,428 | tomorrownow/PyFCM | refs/heads/master | /tests/test_load.py | import fcm
from fcm.load import load_csv, load_xlsx
from pandas.core.frame import DataFrame
import pytest
def test_load_csv(shared_datadir):
df = load_csv(shared_datadir / "test_adjacency_matrix.csv")
assert isinstance(df, DataFrame)
def test_load_xlsx(shared_datadir):
df = load_xlsx(shared_datadir / "Adjacency_Matrix_Example.xlsx")
assert isinstance(df, DataFrame)
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,429 | tomorrownow/PyFCM | refs/heads/master | /fcm/Clustering_FCMs/Clustering_FCMs.py | # # -*- coding: utf-8 -*-
# """
# Created on Sat Mar 31 16:04:39 2018
# @author: Payam Aminpour
# Michigan State University
# aminpour@msu.edu
# """
# import __init__ as init
# import matplotlib.pyplot as plt
# plt.rcdefaults()
# import matplotlib.pyplot as plt
# import xlrd
# import numpy as np
# import networkx as nx
# import math
# import random
# # ____________________________________________________________________________________
# file_location = init.file_location
# workbook = xlrd.open_workbook(file_location)
# sheet = workbook.sheet_by_index(0)
# n_concepts = sheet.nrows - 1
# n_participants = workbook.nsheets
# # ____________________________________________________________________________________
# ########## Creat a dictionary keys = name of participants; values = Adj Matrix
# Allparticipants = {}
# IDs = [] # each participant has a unique name or ID
# for i in range(0, n_participants):
# sheet = workbook.sheet_by_index(i)
# Adj_matrix = np.zeros((n_concepts, n_concepts))
# for row in range(1, n_concepts + 1):
# for col in range(1, n_concepts + 1):
# Adj_matrix[row - 1, col - 1] = sheet.cell_value(row, col)
# IDs.append(sheet.cell_value(0, 0))
# Allparticipants[sheet.cell_value(0, 0)] = Adj_matrix
# # ____________________________________________________________________________________
# def FCM(ID):
# """Generate an FCM in networkx format"""
# adj = Allparticipants[ID]
# FCM = nx.DiGraph(adj)
# return FCM
# def similarity(agent, FCM_Reference):
# """ how similar the FCM is to the FCM Reference"""
# def select_k(spectrum, minimum_energy=0.9):
# running_total = 0.0
# total = sum(spectrum)
# if total == 0.0:
# return len(spectrum)
# for i in range(len(spectrum)):
# running_total += spectrum[i]
# if running_total / total >= minimum_energy:
# return i + 1
# return len(spectrum)
# laplacian1 = nx.spectrum.laplacian_spectrum(agent.FCM.to_undirected())
# laplacian2 = nx.spectrum.laplacian_spectrum(FCM_Reference.to_undirected())
# k1 = select_k(laplacian1)
# k2 = select_k(laplacian2)
# k = min(k1, k2)
# similarity = sum((laplacian1[:k] - laplacian2[:k]) ** 2)
# return similarity
# # -------------------------------------------
# activation_vec = np.ones(n_concepts)
# def transform(x, n, f_type, landa=1):
# if f_type == "sig":
# x_new = np.zeros(n)
# for i in range(n):
# x_new[i] = 1 / (1 + math.exp(-landa * x[i]))
# return x_new
# if f_type == "tanh":
# x_new = np.zeros(n)
# for i in range(n):
# x_new[i] = math.tanh(landa * x[i])
# return x_new
# if f_type == "bivalent":
# x_new = np.zeros(n)
# for i in range(n):
# if x[i] > 0:
# x_new[i] = 1
# else:
# x_new[i] = 0
# return x_new
# if f_type == "trivalent":
# x_new = np.zeros(n)
# for i in range(n):
# if x[i] > 0:
# x_new[i] = 1
# elif x[i] == 0:
# x_new[i] = 0
# else:
# x_new[i] = -1
# return x_new
# def infer_steady(
# Adj, init_vec=activation_vec, n=n_concepts, f_type="tanh", infer_rule="k"
# ):
# act_vec_old = init_vec
# AdjmT = Adj.T
# resid = 1
# while resid > 0.00001:
# x = np.zeros(n)
# if infer_rule == "k":
# x = np.matmul(AdjmT, act_vec_old)
# if infer_rule == "mk":
# x = act_vec_old + np.matmul(AdjmT, act_vec_old)
# if infer_rule == "r":
# x = (2 * act_vec_old - np.ones(n)) + np.matmul(
# AdjmT, (2 * act_vec_old - np.ones(n))
# )
# act_vec_new = transform(x, n, f_type)
# resid = max(abs(act_vec_new - act_vec_old))
# if resid < 0.00001:
# break
# act_vec_old = act_vec_new
# return act_vec_new
# def infer_scenario(
# Scenario_concepts,
# level,
# Adj,
# init_vec=activation_vec,
# n=n_concepts,
# f_type="tanh",
# infer_rule="k",
# ):
# act_vec_old = init_vec
# AdjmT = Adj.T
# resid = 1
# while resid > 0.0001:
# act_vec_new = np.zeros(n)
# x = np.zeros(n)
# if infer_rule == "k":
# x = np.matmul(AdjmT, act_vec_old)
# if infer_rule == "mk":
# x = act_vec_old + np.matmul(AdjmT, act_vec_old)
# if infer_rule == "r":
# x = (2 * act_vec_old - np.ones(n)) + np.matmul(
# AdjmT, (2 * act_vec_old - np.ones(n))
# )
# act_vec_new = transform(x, n, f_type)
# for c in Scenario_concepts:
# act_vec_new[c] = level[c]
# resid = max(abs(act_vec_new - act_vec_old))
# # if resid < 0.0001:
# # break
# act_vec_old = act_vec_new
# return act_vec_new
# # -------------------------------------------
# def dynamic(agent, FCM_Reference, f_type, infer_rule):
# M = 0
# W = []
# SState = infer_steady(
# Allparticipants[agent.ID], f_type=f_type, infer_rule=infer_rule
# )
# SState_ref = infer_steady(FCM_Reference, f_type=f_type, infer_rule=infer_rule)
# iteration = 0
# for iter in range(10):
# for iter in range(100):
# rand = random.randint(1, n_concepts)
# com = random.sample(agent.FCM.nodes(), rand)
# Scenario_concepts = com
# my_random = {}
# for rC in Scenario_concepts:
# PN = random.choice([-1, 1])
# my_random[rC] = random.random() * PN
# iteration += 1
# ScenarioState = infer_scenario(
# Scenario_concepts,
# my_random,
# Allparticipants[agent.ID],
# f_type=f_type,
# infer_rule=infer_rule,
# )
# ScenarioState_ref = infer_scenario(
# Scenario_concepts,
# my_random,
# FCM_Reference,
# f_type=f_type,
# infer_rule=infer_rule,
# )
# Change = ScenarioState - SState
# Change_ref = ScenarioState_ref - SState_ref
# M += sum((Change[:] - Change_ref[:]) ** 2)
# M = (math.sqrt(M)) / iteration
# W.append(M)
# return np.mean(W)
# ########### A class of agents with FCMs and IDs############################
# class Agents(object):
# def __init__(self, ID):
# self.ID = ID
# self.FCM = FCM(self.ID)
# # ____________________________________________________________________________________
# """Here you generate n agents and give each agent an FCM"""
# agents = []
# n = n_participants
# for Id in IDs:
# a = Agents(ID=Id)
# agents.append(a)
# # ____________________________________________________________________________________
# """This Function is generating the reference FCM """
# def Fcm_Reference(How):
# if How == "AI":
# adj = np.zeros((n_concepts, n_concepts))
# for ag in agents:
# adj += nx.to_numpy_matrix(ag.FCM)
# FCM_Reference = adj / n_participants
# if How == "AX":
# adj = np.zeros((n_concepts, n_concepts))
# count = np.zeros((n_concepts, n_concepts))
# adj_ag = np.zeros((n_concepts, n_concepts))
# for ag in agents:
# Adj_matrix = np.zeros((n_concepts, n_concepts))
# for i in range(0, n_concepts):
# for j in range(0, n_concepts):
# Adj_matrix[i, j] = nx.to_numpy_matrix(ag.FCM)[i, j]
# if nx.to_numpy_matrix(ag.FCM)[i, j] != 0:
# count[i, j] += 1
# adj += Adj_matrix
# adj_copy = np.copy(adj)
# for i in range(n_concepts):
# for j in range(n_concepts):
# if count[i, j] == 0:
# adj_ag[i, j] = 0
# else:
# adj_ag[i, j] = adj_copy[i, j] / count[i, j]
# FCM_Reference = adj_ag
# if How == "O":
# FCM_Reference = np.ones((n_concepts, n_concepts))
# if How == "Z":
# FCM_Reference = np.zeros((n_concepts, n_concepts))
# return FCM_Reference
# # ____________________________________________________________________________________
# ######## You have to choose one way to generate a Reference FCM ###########
# FCM_Reference = Fcm_Reference(init.Aggregation_technique)
# # a dictionary with keys = agent.ID and values = simil index of the agent's FCM
# simil = {}
# if init.clustering_method == "D":
# f_type = input(
# "What is the type of Squashing function (sig , tanh , bivalent, trivalent)? "
# )
# infer_rule = input("What is the Inference Rule (k , mk , r)? ")
# for agent in agents:
# simil[agent.ID] = dynamic(agent, FCM_Reference, f_type, infer_rule)
# if init.clustering_method == "S":
# for agent in agents:
# simil[agent.ID] = similarity(agent, nx.DiGraph(FCM_Reference))
# # ____________________________________________________________________________________
# ################## K-Mean clustering ######################################
# from sklearn.cluster import KMeans
# X = np.array(list(simil.values()))
# n_clusters = init.n_clusters
# km = KMeans(n_clusters=n_clusters)
# km.fit(X.reshape(-1, 1))
# Indiv_Clusters = list(zip(list(simil.keys()), km.labels_))
# clusters = {}
# for i in range(n_clusters):
# clusters[i] = []
# for i in Indiv_Clusters:
# print(i[0], "is in cluster {}".format(i[1]))
# clusters[i[1]].append(simil[i[0]])
# plt.figure(figsize=(10, 3))
# plt.rc("xtick", labelsize=14)
# plt.rc("ytick", labelsize=0)
# for cl in range(n_clusters):
# plt.plot(clusters[cl], np.zeros_like(clusters[cl]), "x", markersize="8", label=cl)
# plt.legend()
# plt.savefig("Clusters.pdf")
# plt.show()
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
43,430 | tomorrownow/PyFCM | refs/heads/master | /fcm/analysis/tools.py | # Methods used while analyzing fuzzy cognitive models
# Copyright (C) 2018-2021 Corey White and others (see below)
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, see https://www.gnu.org/licenses/gpl-2.0.html
from enum import Enum
import numpy as np
import math
class SquashingFucntion(Enum):
SIG = "sig"
TANH = "tanh"
BIV = "biv"
TRIV = "triv"
class InferenceRule(Enum):
K = "k"
MK = "mk"
R = "r"
# 'SIG' in SquashingFucntion.__members__
def _infer_rule(n, act_vec_old, adjmatrix, infer_rule):
"""
Infer Rules
k = Kasko
mk = Modified Kasko
r = Rescaled Kasko
Parameters
----------
n : int
The number of concepts in the adjacency matrix.
act_vec_old : numpy.ndarray
Olde activation vector.
adjmatrix: numpy.ndarray
Transposed adjacency matrix of the fuzzy congintive model.
infer_rule : InferenceRule (Enum)
Kasko = "k", Modified Kasko = "mk", Rescaled Kasko = "r" (default is mk)
Returns
-------
Activation Vector : numpy.ndarray
"""
x = np.zeros(n)
if infer_rule == InferenceRule.K.value:
x = np.matmul(adjmatrix, act_vec_old)
elif infer_rule == InferenceRule.MK.value:
x = act_vec_old + np.matmul(adjmatrix, act_vec_old)
elif infer_rule == InferenceRule.R.value:
x = (2 * act_vec_old - np.ones(n)) + np.matmul(
adjmatrix, (2 * act_vec_old - np.ones(n))
)
else:
raise ValueError(
"An invalide inference rule was provide. Kasko = k, Modified Kasko = mk, Rescaled Kasko = r"
)
return x
def _transform(act_vect, n, f_type, landa):
"""
Squashing function applied to FCM
Parameters
----------
act_vect : numpy.ndarray
Activation vector after inference rule is applied.
n : int
The number of concepts in the adjacency matrix.
f_type : str
Sigmoid = "sig", Hyperbolic Tangent = "tanh", Bivalent = "biv", Trivalent = "triv"
landa : int
The lambda threshold value used in the squashing fuciton between 0 - 10
Returns
-------
Activation Vector : numpy.ndarray
"""
x_new = np.zeros(n)
if f_type == SquashingFucntion.SIG.value:
for i in range(n):
x_new[i] = 1 / (1 + math.exp(-landa * act_vect[i]))
return x_new
elif f_type == SquashingFucntion.TANH.value:
for i in range(n):
x_new[i] = math.tanh(landa * act_vect[i])
return x_new
elif f_type == SquashingFucntion.BIV.value:
for i in range(n):
if act_vect[i] > 0:
x_new[i] = 1
else:
x_new[i] = 0
return x_new
elif f_type == SquashingFucntion.TRIV.value:
for i in range(n):
if act_vect[i] > 0:
x_new[i] = 1
elif act_vect[i] == 0:
x_new[i] = 0
else:
x_new[i] = -1
return x_new
else:
raise ValueError(
"An invalide squashing function was provide. Please select Sigmoid = 'sig', Hyperbolic Tangent = 'tanh', Bivalent = 'biv', Trivalent = 'triv'"
)
def infer_steady(init_vec, adjmatrix, n, landa, f_type="sig", infer_rule="mk"):
"""
Every concept in the FCM graph has a value Ai that expresses the quantity of its
corresponding physical value and it is derived by the transformation of the fuzzy values
assigned by who developed the FCM to numerical values. The value Ai of each concept Ci is
calculated during each simulation step, computing the influence of other concepts to the
specific concept by selecting one of the following equations (inference rules).
k = Kasko
mk = Modified Kasko
r = Rescaled Kasko
Parameters
----------
init_vec : numpy.ndarray
Inital activation vector.
adjmatrix : numpy.ndarray
Adjacency matrix of the fuzzy congintive model.
n : int
The number of concepts in the adjacency matrix.
landa : int
The lambda threshold value used in the squashing fuciton between 0 - 10.
f_type : str (optional)
Sigmoid = "sig", Hyperbolic Tangent = "tanh", Bivalent = "biv", Trivalent = "triv" (default is sig)
infer_rule : str (optional)
Kasko = "k", Modified Kasko = "mk", Rescaled Kasko = "r" (default is mk)
Returns
-------
Activation Vector : numpy.ndarray
"""
act_vec_old = init_vec
resid = 1
while resid > 0.00001:
act_vec_new = np.zeros(n)
x = _infer_rule(n, act_vec_old, adjmatrix, infer_rule)
act_vec_new = _transform(x, n, f_type, landa)
resid = max(abs(act_vec_new - act_vec_old))
act_vec_old = act_vec_new
return act_vec_new
# TODO: Merge remove duplicated code between infer_scenario and infer_steady fuctions
def infer_scenario(
scenario_concept,
init_vec,
adjmatrix,
n,
landa,
f_type="sig",
infer_rule="mk",
change_level=1,
):
"""
Infer the scenario
k = Kasko
mk = Modified Kasko
r = Rescaled Kasko
Parameters
----------
scenario_concept: int or list
Index of scenorio in the activation vector, or list of indexes
init_vec : numpy.ndarray
Inital activation vector.
adjmatrix : numpy.ndarray
Adjacency matrix of the fuzzy congintive model.
n : int
The number of concepts in the adjacency matrix.
landa : int
The lambda threshold value used in the squashing fuciton between 0 - 10.
f_type : str (optional)
Sigmoid = "sig", Hyperbolic Tangent = "tanh", Bivalent = "biv", Trivalent = "triv" (default is sig)
infer_rule : str (optional)
Kasko = "k", Modified Kasko = "mk", Rescaled Kasko = "r" (default is mk)
change_level : int (optional)
The activation level of the concept or list of concpects between [-1,1] (default is 1)
Returns
-------
Activation Vector : numpy.ndarray
"""
act_vec_old = init_vec
resid = 1
while resid > 0.00001:
act_vec_new = np.zeros(n)
x = _infer_rule(n, act_vec_old, adjmatrix, infer_rule)
act_vec_new = _transform(x, n, f_type, landa)
# This is the only differenc inbetween infer_steady and infer_scenario
# TODO: Change the data structure being used here to a dictonary
if isinstance(change_level, dict):
for i, v in change_level.items():
act_vec_new[i] = v
elif isinstance(scenario_concept, int) and isinstance(change_level, int):
act_vec_new[scenario_concept] = change_level
else:
print("scenario_concept: {}".format(scenario_concept))
print(
"act_vec_new[scenario_concept]: {}".format(
act_vec_new[scenario_concept]
)
)
print("c: {}".format(change_level))
act_vec_new[scenario_concept] = change_level
resid = max(abs(act_vec_new - act_vec_old))
act_vec_old = act_vec_new
return act_vec_new
def reduce_noise(adjmatrix, n_concepts, noise_thresold):
"""
Sometimes you need to remove the links with significantly low weights to avoid messiness.
noise threshold is a number in [0,1] which defines a boundary below which all links will be removed from the FCM.
E.g. noise_thresold = 0.15 means that all edges with weight <= 0.15 will be removed from FCM.
Parameters
----------
adjmatrix : numpy.ndarray
Adjacency matrix of the fuzzy congintive model.
n_concepts : int
The number of concepts in the adjacency matrix.
noise_thresold : int
Noise threshold is a number in [0,1] which defines a boundary below which all links will be removed from the FCM.
Returns
-------
Adjacency Matrix : numpy.ndarray
An adjacency matrix is returned with values less than or equal to the noise threshold set to zero.
"""
for i in range(1, n_concepts + 1):
for j in range(1, n_concepts + 1):
if abs(adjmatrix[i - 1, j - 1]) <= noise_thresold:
adjmatrix[i - 1, j - 1] = 0
return adjmatrix
| {"/tests/test_scenario.py": ["/fcm/load.py"], "/tests/test_analysis.py": ["/fcm/analysis/tools.py"], "/pyfcm/analysis/scenario.py": ["/fcm/analysis/tools.py"], "/tests/test_load.py": ["/fcm/load.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.