Compare commits

...

2 Commits

Author SHA1 Message Date
Shaun Reed 940d035638 Add Python project examples
+ Neural network CLI
+ Hidden Markov Model CLI
+ K-Means clustering CLI
+ Linear regression CLI
+ Screenshots, updated README instructions
2022-02-06 13:41:34 -05:00
Shaun Reed bbbf404340 [cmake-example] Update cmake project example
+ Rename `cmake` -> `cmake-example`
2022-01-31 08:55:25 -05:00
29 changed files with 2547 additions and 24 deletions

View File

@ -20,7 +20,7 @@ project(
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
add_subdirectory(algorithms)
add_subdirectory(cmake)
add_subdirectory(cmake-example)
add_subdirectory(cryptography)
add_subdirectory(datastructs)
add_subdirectory(graphics)

View File

@ -1,6 +1,6 @@
###############################################################################
## Author: Shaun Reed ##
## Legal: All Content (c) 2021 Shaun Reed, all rights reserved ##
## Legal: All Content (c) 2022 Shaun Reed, all rights reserved ##
## About: A template project for getting started working with CMake ##
## ##
## Contact: shaunrd0@gmail.com | URL: www.shaunreed.com | GitHub: shaunrd0 ##

View File

@ -1,6 +1,6 @@
###############################################################################
## Author: Shaun Reed ##
## Legal: All Content (c) 2021 Shaun Reed, all rights reserved ##
## Legal: All Content (c) 2022 Shaun Reed, all rights reserved ##
## ##
## Contact: shaunrd0@gmail.com | URL: www.shaunreed.com | GitHub: shaunrd0 ##
##############################################################################
@ -10,8 +10,8 @@
set(APP_SRC say-hello.cpp)
# Add our executable, naming it and linking it to our source code
add_executable(execute-hello ${APP_SRC})
add_executable(say-hello ${APP_SRC})
# Link to our custom library, defined in c-cmake/src/
target_link_libraries(execute-hello lib-klips)
target_link_libraries(say-hello lib-klips)

View File

@ -1,9 +1,9 @@
/*#############################################################################
/*##############################################################################
## Author: Shaun Reed ##
## Legal: All Content (c) 2019 Shaun Reed, all rights reserved ##
## Legal: All Content (c) 2022 Shaun Reed, all rights reserved ##
## ##
## Contact: shaunrd0@gmail.com | URL: www.shaunreed.com | GitHub: shaunrd0 ##
##############################################################################
################################################################################
*/
#include <lib-klips.hpp>

View File

@ -1,9 +1,9 @@
/*#############################################################################
/*##############################################################################
## Author: Shaun Reed ##
## Legal: All Content (c) 2019 Shaun Reed, all rights reserved ##
## Legal: All Content (c) 2022 Shaun Reed, all rights reserved ##
## ##
## Contact: shaunrd0@gmail.com | URL: www.shaunreed.com | GitHub: shaunrd0 ##
##############################################################################
################################################################################
*/
/**

View File

@ -1,6 +1,6 @@
###############################################################################
## Author: Shaun Reed ##
## Legal: All Content (c) 2021 Shaun Reed, all rights reserved ##
## Legal: All Content (c) 2022 Shaun Reed, all rights reserved ##
## ##
## Contact: shaunrd0@gmail.com | URL: www.shaunreed.com | GitHub: shaunrd0 ##
##############################################################################

View File

@ -1,9 +1,9 @@
/*#############################################################################
/*##############################################################################
## Author: Shaun Reed ##
## Legal: All Content (c) 2019 Shaun Reed, all rights reserved ##
## Legal: All Content (c) 2022 Shaun Reed, all rights reserved ##
## ##
## Contact: shaunrd0@gmail.com | URL: www.shaunreed.com | GitHub: shaunrd0 ##
##############################################################################
################################################################################
*/
#include <lib-klips.hpp>

13
python/README.md Normal file
View File

@ -0,0 +1,13 @@
This directory contains a collection of Python scripts or CLI tools that I've made.
Each of these projects provide a `requirements.txt` that can be used to
install the required Python packages and dependencies.
To install Python 3.9 and `pip`
```bash
sudo apt install python3.9 python3-pip
python3.9 -m pip install -U pip
```
Additional setup instructions specific to each project provided in project README

95
python/k-means/README.md Normal file
View File

@ -0,0 +1,95 @@
Install required dependencies for matplotlib GUI frontend and all pip other packages for this project
```bash
sudo apt install python3-tk
python3.9 -m pip install -r requirements.txt
```
CLI to run K-Means clustering algorithm on a set of data.
Data can be provided or randomly generated for testing.
```bash
python3.9 k-means.py -h
usage: k-means.py [-h] [--data [X,Y ...]] [--seeds [X,Y ...]] [--silent] [--verbose] [--random] [--radius [RADIUS]]
[--lock-radius] [--file [FILE_PATH]]
[CLUSTER_COUNT] [CENTROID_SHIFT] [LOOP_COUNT]
K-means clustering program for clustering data read from a file, terminal, or randomly generated
positional arguments:
CLUSTER_COUNT Total number of desired clusters
(default: '2')
CENTROID_SHIFT Centroid shift threshold. If cluster centroids move less-than this value, clustering is finished
(default: '1.0')
LOOP_COUNT Maximum count of loops to perform clustering
(default: '3')
optional arguments:
-h, --help show this help message and exit
--data [X,Y ...], -d [X,Y ...]
A list of data points separated by spaces as: x,y x,y x,y ...
(default: '[(1.0, 2.0), (2.0, 3.0), (2.0, 2.0), (5.0, 6.0), (6.0, 7.0), (6.0, 8.0), (7.0, 11.0), (1.0, 1.0)]')
--seeds [X,Y ...], --seed [X,Y ...], -s [X,Y ...]
A list of seed points separated by spaces as: x,y x,y x,y ...
Number of seeds provided must match CLUSTER_COUNT, or else CLUSTER_COUNT will be overriden.
--silent When this flag is set, scatter plot visualizations will not be shown
(default: 'False')
--verbose, -v When this flag is set, cluster members will be shown in output
(default: 'False')
--random, -r When this flag is set, data will be randomly generated
(default: 'False')
--radius [RADIUS] Initial radius to use for clusters
(default: 'None')
--lock-radius, -l When this flag is set, centroid radius will not be recalculated
(default: 'False')
--file [FILE_PATH], -f [FILE_PATH]
Optionally provide file for data to be read from. Each point must be on it's own line with format x,y
```
Running k-means clustering program
```bash
python3.9 k-means.py --file ./input.txt --silent
Finding K-means clusters for given data [(1.0, 2.0), (2.0, 3.0), (2.0, 2.0), (5.0, 6.0), (6.0, 7.0), (6.0, 8.0), (7.0, 11.0), (1.0, 1.0), (5.0, 5.0), (10.0, 10.0), (15.0, 15.0), (25.0, 25.0), (20.0, 20.0), (21.0, 21.0), (22.0, 22.0)]
Using 2 clusters, 1.0 max centroid shift, and 3 iterations
Clustering iteration 0
Updating cluster membership using cluster seeds, radius:
((5.0000, 5.0000), 10.6066)
((20.0000, 20.0000), 10.6066)
Outliers present: set()
Updated clusters ([(5.0, 5.0), (20.0, 20.0)]) with new centroids [(4.5, 5.5), (20.6, 20.6)]
New centroids [(4.5, 5.5), (20.6, 20.6)] shifted [0.7071, 0.8485] respectively
Showing final cluster result...
Initial cluster at (5.0000, 5.0000) moved to (4.5000, 5.5000)
Total shift: 0.7071
Final radius: 11.0365
Initial radius: 10.6066
Initial cluster at (20.0000, 20.0000) moved to (20.6000, 20.6000)
Total shift: 0.8485
Final radius: 11.0365
Initial radius: 10.6066
Stopping...
Cluster centroids have not shifted at least 1.0, clusters are stable
```
Running k-means clustering program on some random example data shows the following visual output
```bash
python3.9 k-means.py --random
# Output removed for GUI example
```
![](screenshot.png)

15
python/k-means/input.txt Normal file
View File

@ -0,0 +1,15 @@
1,2
2,3
2,2
5,6
6,7
6,8
7,11
1,1
5,5
10,10
15,15
25,25
20,20
21,21
22,22

438
python/k-means/k-means.py Normal file
View File

@ -0,0 +1,438 @@
################################################################################
# Author: Shaun Reed #
# About: K-Means clustering CLI #
# Contact: shaunrd0@gmail.com | URL: www.shaunreed.com | GitHub: shaunrd0 #
################################################################################
from ast import literal_eval
from itertools import chain
from matplotlib import pyplot as plt
from typing import List
import argparse
import math
import numpy as np
import random
import sys
################################################################################
# CLI Argument Parser
################################################################################
# ==============================================================================
def init_parser():
parser = argparse.ArgumentParser(
description='K-means clustering program for clustering data read from a file, terminal, or randomly generated',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'clusters', metavar='CLUSTER_COUNT', type=int, nargs='?',
help=
'''Total number of desired clusters
(default: '%(default)s')
''',
default=2
)
parser.add_argument(
'shift', metavar='CENTROID_SHIFT', type=float, nargs='?',
help=
'''Centroid shift threshold. If cluster centroids move less-than this value, clustering is finished
(default: '%(default)s')
''',
default=1.0
)
parser.add_argument(
'loops', metavar='LOOP_COUNT', type=int, nargs='?',
help=
'''Maximum count of loops to perform clustering
(default: '%(default)s')
''',
default=3
)
parser.add_argument(
'--data', '-d', metavar='X,Y', type=point, nargs='*',
help=
'''A list of data points separated by spaces as: x,y x,y x,y ...
(default: '%(default)s')
''',
default=[(1.0, 2.0), (2.0, 3.0), (2.0, 2.0), (5.0, 6.0), (6.0, 7.0), (6.0, 8.0), (7.0, 11.0), (1.0, 1.0)]
)
parser.add_argument(
'--seeds', '--seed', '-s', metavar='X,Y', type=point, nargs='*',
help=
'''A list of seed points separated by spaces as: x,y x,y x,y ...
Number of seeds provided must match CLUSTER_COUNT, or else CLUSTER_COUNT will be overriden.
''',
)
parser.add_argument(
'--silent', action='store_true',
help=
'''When this flag is set, scatter plot visualizations will not be shown
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--verbose', '-v', action='store_true',
help=
'''When this flag is set, cluster members will be shown in output
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--random', '-r', action='store_true',
help=
'''When this flag is set, data will be randomly generated
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--radius', metavar='RADIUS', type=float, nargs='?',
help=
'''Initial radius to use for clusters
(default: '%(default)s')
''',
default=None
)
parser.add_argument(
'--lock-radius', '-l', action='store_true',
help=
'''When this flag is set, centroid radius will not be recalculated
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--file', '-f', metavar='FILE_PATH', nargs='?', type=open,
help=
'''Optionally provide file for data to be read from. Each point must be on it\'s own line with format x,y
''',
)
return parser
################################################################################
# Helper Functions
################################################################################
# ==============================================================================
def point(arg):
"""
Helper function for parsing x,y points provided through argparse CLI
:param arg: A single argument passed to an option or positional argument
:return: A tuple (x, y) representing a data point
"""
try:
x, y = literal_eval(arg)
return float(x), float(y) # Cast all point values to float
except:
raise argparse.ArgumentTypeError("Please provide data points in x,y format")
def random_data():
"""
Generates random data points for testing clustering
:return: A list of random data point tuples [(1, 1), (2, 4), ...]
"""
data_size = random.randint(50, random.randint(100, 200))
data = []
for x in range(0, data_size):
data.append((random.randint(0, 100), random.randint(0, 100)))
return data
def round_points(points, precision=4):
"""
Rounds all points in a list to a given decimal place
:param points: A list of data points to round to requested decimal place
:param precision: The decimal place to round to
:return: A list of points where (x, y) has been rounded to match requested precision value
"""
points = [(round(x, precision), round(y, precision)) for x,y in points]
return points
################################################################################
# K-means Clustering
################################################################################
# ==============================================================================
def select_seeds(data):
"""
Randomly select N seeds where N is the number of clusters requested through the CLI
:param data: A list of data points [(0, 1), (2, 2), (1, 4), ...]
:return: Dictionary of {seeds: radius}; For example {(2, 2): 5.0, (1, 4): 5.0}
"""
assert(len(data) > context.clusters)
x, y = zip(*data)
seeds = {} # Store seeds in a dictionary<seed, radius>
for i in range(0, context.clusters):
while True:
new_seed = data[random.randint(0, len(data) - 1)]
if new_seed not in seeds:
break
seeds[new_seed] = i if not context.radius else context.radius
if context.radius:
# An initial radius was provided and applied. Use it.
return seeds
else:
# No initial radius was provided, so calculate one
return update_clusters(seeds)
def points_average(data):
"""
Finds average (x, y) for points in data list [(x, y), (x, y), ...]
Used for updating cluster centroid positions
:param data: List [(x, y), (x, y), ...]
:return: An average (x, y) position for the list of points
"""
x, y = 0, 0
for pair in data:
x += pair[0]
y += pair[1]
x = float(x / len(data))
y = float(y / len(data))
return x, y
def update_clusters(seeds, clusters=None):
"""
Seeds {(x, y), radius} for clusters must be provided
If no clusters {(x, y), [members, ...]} are provided, initialize cluster radius given seeds
If clusters are provided, update centroids and radius
:param seeds: Dictionary of {cluster_seed: radius}; Example {(x, y), radius, (x, y): radius, ...}
:param clusters: Dictionary of {cluster_seed: member_list}; Example {(x, y): [(x, y), (x, y), ...], ...}
:return: Cluster seeds dictionary with updates positions and radius values
"""
radius = sys.maxsize
new_seeds = dict()
if clusters is None: # If we only provided seeds, initialize their radius
for seed in seeds:
for other_seed in seeds.copy():
if other_seed == seed:
continue
dist = math.dist(seed, other_seed)
# Track the smallest distance between 2 centroids
radius = dist if dist < radius else radius
# Update all seeds to the initial cluster radius
radius /= 2
for seed in seeds:
seeds[seed] = radius
else:
# Update centroid positions for clusters if they were provided
for centroid, members in clusters.items():
cluster_data = set(members) | {centroid}
avgX, avgY = points_average(cluster_data)
new_seeds[tuple((avgX, avgY))] = seeds[centroid]
# If we have passed the CLI flag to lock cluster radius, return new seeds without updating radius
# + If we have not passed the -l flag, update cluster radius
seeds = new_seeds if context.lock_radius else update_clusters(new_seeds)
return seeds
def cluster_data(data, seeds):
"""
Runs K-Means clustering on some provided data using a dictionary of cluster seeds {centroid: radius}
:param data: A list of data points to cluster [(x, y), (x, y), ...]
:param seeds: Dictionary of cluster centroid positions and radius {centroid: radius}
:return: Dictionary of final clusters found {centroid: member_list, ...} and updated seeds dictionary
"""
outliers = set()
clusters = {}
for seed in seeds: # Initialize empty clusters for each seed
# If centroid is a data point, it is also a member of the cluster
clusters[seed] = [seed] if seed in data else []
print(f'Updating cluster membership using cluster seeds, radius: ')
for seed, radius in seeds.items():
print(f'\t(({seed[0]:.4f}, {seed[1]:.4f}), {radius:.4f})')
# For each point, calculate the distance from all seeds
for point in data:
for seed, radius in seeds.items():
if point is seed: # Do not check for distance(point, point)
continue
dist = math.dist(point, seed)
if dist <= radius: # If the distance from any cluster is within range, add point to the cluster
# This print statement is noisy, but it can be uncommented to see output for each new cluster member
# print(f'{point} added to cluster {seed}\n\tDistance ({dist}) is within radius ({radius})')
# Take union of point and cluster data
clusters.update({seed: list(set(clusters[seed]) | set([point]))})
# Initialize outliers using difference between sets
outliers = set(data) - (set(chain(*clusters.values())) | set(clusters.keys()))
print(f'Outliers present: {outliers}')
return clusters, seeds
def show_clusters(data, seeds, plot, show=True):
"""
Shows clusters using matplotlib
:param data: Data points to draw on the scatter plot
:param seeds: Cluster seed dictionary {centroid: radius, ...}
:param plot: The subplot to plot data on
:param show: Toggles displaying a window for the plot.
Allows two plots to be drawn on the same subplot and then shown together using a subsequent call to plt.show()
"""
dataX, dataY = zip(*data)
plot.set_aspect(1. / plot.get_data_ratio())
plot.scatter(dataX, dataY, c='k')
# Draw circles for clusters
cs = []
while len(cs) < context.clusters: # Ensure we have enough colors to display all clusters
cs.extend(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
for seed, radius, c in zip(seeds.keys(), seeds.values(), cs):
plot.scatter(seed[0], seed[1], color=c)
circle = plt.Circle(seed, radius, alpha=0.25, color=c)
plot.add_patch(circle)
plot.grid()
if show:
print(f'Close window to update centroid positions and re-cluster data...')
plt.show()
def print_cluster_info(initial_clusters, seeds, centroid_diff):
"""
Outputs some information on clusters after each iteration
:param initial_clusters: The clusters as they were before reclustering
:param seeds: The new seeds dictionary {centroid: radius, ...}
:param centroid_diff: List of difference in centroid positions for each cluster
"""
for initial_point, initial_radius, updated, radius, dist in\
zip(initial_clusters.keys(), initial_clusters.values(), seeds.keys(), seeds.values(), centroid_diff):
print(f'Initial cluster at ({initial_point[0]:.4f}, {initial_point[1]:.4f}) '
f'moved to ({updated[0]:.4f}, {updated[1]:.4f})'
f'\n\tTotal shift: {dist:.4f}'
f'\n\tFinal radius: {radius:.4f}')
if initial_radius != radius:
print(f'\tInitial radius: {initial_radius:.4f}')
################################################################################
# Main
################################################################################
# ==============================================================================
def main(args: List[str]):
parser = init_parser()
global context
context = parser.parse_args(args[1:])
if context.file: # If a file was provided, use that data instead
context.data = [literal_eval(line.rstrip()) for line in context.file]
context.data = [(float(x), float(y)) for x, y in context.data]
elif context.random: # If random flag was set, randomly generate some data
print("TODO: Randomly generate data")
context.data = random_data()
print(
f'Finding K-means clusters for given data {context.data}\n'
f'\tUsing {context.clusters} clusters, {context.shift} max centroid shift, and {context.loops} iterations'
)
seeds = {}
if context.seeds: # Enforce CLUSTER_COUNT matching initial number of seeds
context.clusters = len(context.seeds)
seeds = update_clusters(dict.fromkeys(context.seeds, 0))
else: # Select 2 random seeds once, before we enter clustering loop
seeds = select_seeds(context.data)
# Save a copy of the initial clusters to show comparison at the end
initial_clusters = seeds.copy()
for loop in range(0, context.loops):
print(f'\nClustering iteration {loop}')
plt.title(f'Cluster iteration {loop}')
# Check distance from all points to seed
clusters, seeds = cluster_data(context.data, seeds)
if loop > 0: # The initial graph has no centroid shift to print
# If we are on any iteration beyond the first, print updated cluster information
# + The first iteration shows initial data, since it has no updated data yet
print_cluster_info(prev_centroids, seeds, centroid_diff)
if context.verbose:
print(f'Cluster members:')
for member in [f'{np.round(cent, 4)}: {members}' for cent, members in clusters.items()]:
print(member)
elif loop == 0 and not context.silent:
# If we are on the first iteration, show the initial data provided through CLI
print(
f'Showing initial data with {context.clusters} clusters '
f'given seed points {round_points(seeds.keys())}'
)
# Show the plot for every iteration if it is not suppressed by the CLI --silent flag
if not context.silent:
show_clusters(context.data, seeds, plt.subplot())
# Update centroids for new cluster data
prev_centroids = seeds.copy()
seeds = update_clusters(seeds, clusters)
print(
f'\nUpdated clusters ({round_points(prev_centroids.keys())}) '
f'with new centroids {round_points(seeds.keys())}'
)
# Find the difference in position for all centroids using their previous and current positions
centroid_diff = [round(math.dist(prev, curr), 4) for prev, curr in
list(zip(prev_centroids.keys(), seeds.keys()))]
print(f'New centroids {round_points(seeds.keys())} shifted {centroid_diff} respectively')
# If any centroid has moved more than context.shift, the clusters are not stable
stable = not any((diff > context.shift for diff in centroid_diff))
if stable: # If centroid shift is not > context.shift, centroids have not changed
break # Stop re-clustering process and show final result
print("\n\nShowing final cluster result...")
centroid_diff = [round(math.dist(prev, curr), 4) for prev, curr in
list(zip(initial_clusters.keys(), seeds.keys()))]
print_cluster_info(initial_clusters, seeds, centroid_diff)
# If the clusters reached a point where they were stable, show output to warn
if stable:
print(
f'\nStopping...\n'
f'Cluster centroids have not shifted at least {context.shift}, clusters are stable'
)
if not context.silent:
# Create a side-by-side subplot to compare first iteration with final clustering results
print(f'Close window to exit...')
f, arr = plt.subplots(1, 2)
arr[0].set_title(f'Cluster {0} (Initial result)')
show_clusters(context.data, initial_clusters, arr[0], False)
arr[1].set_title(f'Cluster {loop} (Final result)')
show_clusters(context.data, seeds, arr[1], False)
plt.show()
if __name__ == "__main__":
sys.exit(main(sys.argv))

View File

@ -0,0 +1,2 @@
matplotlib==3.5.0
numpy==1.21.4

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

View File

@ -0,0 +1,65 @@
Install required dependencies for matplotlib GUI frontend and all pip other packages for this project
```bash
sudo apt install python3-tk
python3.9 -m pip install -r requirements.txt
```
Given a set of tuple `(X,Y)` data points as `[(X, Y), .., (X, Y)]`, determine the
best fitting line plot, and then apply this projection to predict the dependent `Y`
value using an independent `GIVEN_X` value.
```bash
python3.9 linear-regression.py -h
usage: linear-regression.py [-h] [--silent] [--file [FILE_PATH]] [GIVEN_X] [X,Y ...]
Find most fitting line plot for given data points and predict value given some X
positional arguments:
GIVEN_X Value for X for prediction using linear regression
(default: '4.5')
X,Y A list of data points separated by spaces as: x,y x,y x,y ...
(default: '[(1, 3), (2, 7), (3, 5), (4, 9), (5, 11), (6, 12), (7, 15)]')
optional arguments:
-h, --help show this help message and exit
--silent When this flag is set, line plot visualization will not be shown
(default: 'False')
--file [FILE_PATH], -f [FILE_PATH]
Optionally provide file for data to be read from. Each point must be on it's own line with format x,y
```
Running linear regression program
```bash
python3.9 linear-regression.py --file ./input.txt --silent
Finding fitting line plot for given data [(1, 3), (2, 7), (3, 5), (4, 9), (5, 11), (6, 12), (7, 15)]
points_avg: (5.117647058823529, 5.235294117647059)
variance: (241.76470588235296, 193.05882352941177)
sigma: (3.887196176892422, 3.4736402333270258)
covariance: 0.8455882352941174
correlation: 0.0626235432924427
Our line Y = BX + A must pass through the point (5.117647058823529, 5.235294117647059)
Y = (0.05596107055961069)X + 4.9489051094890515
For X = 4.5, Y is predicted to be 5.200729927007299
```
By default, the following linear regression is calculated and displayed
```bash
python3.9 linear-regression.py
Finding fitting line plot for given data [(1, 3), (2, 7), (3, 5), (4, 9), (5, 11), (6, 12), (7, 15)]
points_avg: (4.0, 8.857142857142858)
variance: (28.0, 104.85714285714286)
sigma: (2.160246899469287, 4.180453381654971)
covariance: 8.666666666666666
correlation: 0.9596775116832306
Our line Y = BX + A must pass through the point (4.0, 8.857142857142858)
Y = (1.8571428571428565)X + 1.4285714285714315
For X = 4.5, Y is predicted to be 9.785714285714285
```
![](screenshot.png)

View File

@ -0,0 +1,17 @@
1,2
2,3
2,2
5,6
6,7
6,8
7,11
1,1
2,6
4,8
6,1
3,2
15,5
10,2
2,10
11,4
4,11

View File

@ -0,0 +1,198 @@
################################################################################
# Author: Shaun Reed #
# About: Linear regression CLI #
# Contact: shaunrd0@gmail.com | URL: www.shaunreed.com | GitHub: shaunrd0 #
################################################################################
from ast import literal_eval
from matplotlib import pyplot as plt
from typing import List
import argparse
import math
import numpy as np
import sys
################################################################################
# Commandline Argument Parser
################################################################################
# ==============================================================================
def init_parser():
parser = argparse.ArgumentParser(
description='Find most fitting line plot for given data points and predict value given some X',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'given', metavar='GIVEN_X', type=float, nargs='?',
help=
'''Value for X for prediction using linear regression
(default: '%(default)s')
''',
default=4.5
)
parser.add_argument(
'data', metavar='X,Y', type=point, nargs='*',
help=
'''A list of data points separated by spaces as: x,y x,y x,y ...
(default: '%(default)s')
''',
default=[(1, 3), (2, 7), (3, 5), (4, 9), (5, 11), (6, 12), (7, 15)]
)
parser.add_argument(
'--silent', action='store_true',
help=
'''When this flag is set, line plot visualization will not be shown
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--file', '-f', metavar='FILE_PATH', nargs='?', type=open,
help=
'''Optionally provide file for data to be read from. Each point must be on it\'s own line with format x,y
''',
)
return parser
def point(arg):
"""
Helper function for parsing x,y points provided through argparse CLI
:param arg: A single argument passed to an option or positional argument
:return: A tuple (x, y) representing a data point
"""
try:
x, y = literal_eval(arg)
return x, y
except:
raise argparse.ArgumentTypeError("Please provide data points in x,y format")
################################################################################
# Linear Regression Calculation
################################################################################
# ==============================================================================
def points_average(data):
"""
Finds average (x, y) for points in data list [(x, y), (x, y), ...]
Used for updating cluster centroid positions
:param data: List [(x, y), (x, y), ...]
:return: An average (x, y) position for the list of points
"""
x, y = 0, 0
for pair in data:
x += pair[0]
y += pair[1]
x = float(x / len(data))
y = float(y / len(data))
return x, y
def points_variance(data, points_avg):
"""
Find variance for a series of data points
:param data: List of data points [(x, y), (x, y), ...]
:param points_avg: Average (x, y) position for the list of points in data
:return: Variance of X and Y for the data set as a tuple (x, y)
"""
x, y = 0, 0
for point in data:
x += math.pow((point[0] - points_avg[0]), 2)
y += math.pow((point[1] - points_avg[1]), 2)
return x, y
def points_covariance(data, points_avg):
"""
Find covariance between X, Y within the data set
:param data: List of data points [(x, y), (x, y), ...]
:param points_avg: Tuple of average X, Y for data set list
:return: Single float value representing covariance
"""
cov = 0
for point in data:
cov += (point[0] - points_avg[0]) * (point[1] - points_avg[1])
return float(cov / (len(data) - 1))
def show_regression(data, beta, alpha):
"""
Shows the linear regression in the matplotlib subplot
Line drawn with Y = BX + A
:param data: Data to show on the scatter plot
:param beta: Value for B in the line equation
:param alpha: Value for A in the line equation
"""
dataX, dataY = zip(*data)
scaleX = np.linspace(min(dataX) - 1, max(dataX) + 1, 100)
scaleY = beta * scaleX + alpha
plt.plot(scaleX, scaleY, c='g')
plt.scatter(dataX, dataY, c='k')
print(f'For X = {context.given}, Y is predicted to be {beta * context.given + alpha} ')
plt.scatter(context.given, beta * context.given + alpha, c='#e6e600')
plt.show()
################################################################################
# Main
################################################################################
# ==============================================================================
def main(args: List[str]):
parser = init_parser()
global context
context = parser.parse_args(args[1:])
print(f'Finding fitting line plot for given data {context.data}')
if context.file: # If a file was provided, use that data instead
context.data = [literal_eval(line.rstrip()) for line in context.file]
context.data = [(float(x), float(y)) for x, y in context.data]
# Find the average for the data X and Y points
data_avg = points_average(context.data)
print(f'points_avg: {data_avg}')
# Find the variance for the data X and Y points
data_variance = points_variance(context.data, data_avg)
print(f'variance: {data_variance}')
# Find the standard deviations for X and Y values
data_sigma = (math.sqrt(float(data_variance[0] / (len(context.data) - 1))),
math.sqrt(float(data_variance[1] / (len(context.data) - 1))))
print(f'sigma: {data_sigma}')
# Find the covariance between X, Y within data set
data_covariance = points_covariance(context.data, data_avg)
print(f'covariance: {data_covariance}')
# Find correlation between X, Y within data set
data_correlation = (1.0/math.prod(data_sigma)) * data_covariance
print(f'correlation: {data_correlation}')
# Find equation for linear regression for the given data set
print(f'Our line Y = BX + A must pass through the point {data_avg}')
data_beta = data_correlation * float(data_sigma[1] / data_sigma[0])
data_alpha = data_avg[1] - data_beta * data_avg[0]
print(f'Y = ({data_beta})X + {data_alpha}')
# Show the final graph produced by linear regression calculations
# + Predicts the Y value, given the X value provided through the CLI
if not context.silent:
show_regression(context.data, data_beta, data_alpha)
if __name__ == "__main__":
sys.exit(main(sys.argv))

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

View File

@ -0,0 +1,119 @@
Install required dependencies for matplotlib GUI frontend and all pip other packages for this project
```bash
sudo apt install python3-tk
python3.9 -m pip install -r requirements.txt
```
CLI tool to determine most probably path of Hidden Markov Model given an observation sequence of emissions.
Given an observation sequence of emissions, find the most probable path of traversal for a Hidden Markov Model.
Since this is just an example of HMM, a graph can be automatically generated by specifying only the node count.
Edges and weights connecting the nodes will be randomly assigned.
If required, an input graph can be provided through the JSON configuration option.
See provided examples of JSON input files for more detail on options available.
```bash
python3.9 markov-model.py -h
usage: markov-model.py [-h] [--nodes [GRAPH_NODE_COUNT]] [--edges [GRAPH_EDGE_COUNT]] [--show-all] [--interactive] [--silent]
[--file [FILE_PATH]]
[OBSERVATION_SEQUENCE ...]
Calculates most probable path of HMM given an observation sequence
positional arguments:
OBSERVATION_SEQUENCE An observation sequence to calculate the most probable path
(default: '['A', 'B', 'D', 'C']')
optional arguments:
-h, --help show this help message and exit
--nodes [GRAPH_NODE_COUNT], -n [GRAPH_NODE_COUNT]
The total number of node states in the HMM graph
(default: '4')
--edges [GRAPH_EDGE_COUNT], -e [GRAPH_EDGE_COUNT]
The total number of edges in the HMM graph
(default: '8')
--show-all When this flag is set, all path probabilities and their calculations will be output
(default: 'False')
--interactive Allow taking input to update matrices with triple (row, col, value)
(default: 'False')
--silent When this flag is set, final graph will not be shown
(default: 'False')
--file [FILE_PATH], -f [FILE_PATH]
Optionally provide file for data to be read from. Each point must be on it's own line with format x,y
```
Running HMM with a graph using 4 nodes, 8 edges, and random transition / emission matrices
Sometimes there can be a sequence with no possible path due to a constrained transition matrix
Sometimes there can be a sequence with no possible path due to a limited emission matrix
```bash
python3.9 markov-model.py --nodes 4 --edges 8 --show-all A B D C G --silent
1->3: 0.89
1->0: 0.6
3->3: 0.81
3->1: 0.29
0->2: 0.67
0->1: 0.89
2->0: 0.12
2->1: 0.41
Calculating (0, 2, 1, 0, 2): (0.98 * 0.67) * (0.74 * 0.41) * (0.22 * 0.60) * (0.22 * 0.67) * 0.36 = 0.001395
Calculating (0, 2, 1, 3, 3): (0.98 * 0.67) * (0.74 * 0.41) * (0.22 * 0.89) * (0.11 * 0.81) * 0.52 = 0.001807
Finding most probable path for given observation sequence: ['A', 'B', 'D', 'C', 'G']
Total nodes in graph: 4
Total edges in graph: 8
Number of sequences: 5
Interactive mode: False
Emitting nodes: {'A': [0, 2], 'B': [1, 2], 'C': [0, 2, 3], 'D': [1, 2], 'G': [0, 2, 3]}
Transition matrix:
[[0. 0.89 0.67 0. ]
[0.6 0. 0. 0.89]
[0.12 0.41 0. 0. ]
[0. 0.29 0. 0.81]]
Emission matrix:
[[ 0.98 0. 0.22 0. 0.11]
[ 0. 0.1 -0. 0.22 0. ]
[ 0.67 0.74 0.46 0.62 0.36]
[-0. 0. 0.11 0. 0.52]]
Final paths sorted by probability:
(0, 2, 1, 3, 3) has probability: 0.001807
(0, 2, 1, 0, 2) has probability: 0.001395
```
By default, a random Hidden Markov Model and visualization will be generated
```bash
python3.9 markov-model.py
Finding most probable path for given observation sequence: ['A', 'B', 'D', 'C']
Total nodes in graph: 4
Total edges in graph: 8
Number of sequences: 4
Interactive mode: False
Emitting nodes: {'A': [0, 2, 3], 'B': [1, 2, 3], 'C': [0, 3], 'D': [1, 2]}
Transition matrix:
[[0. 0. 0.31 0. ]
[0.55 0.25 0. 0. ]
[0.79 0.47 0. 0.12]
[0.92 0. 0.81 0. ]]
Emission matrix:
[[0.45 0. 0.4 0. ]
[0. 0.89 0. 0.51]
[0.12 0.24 0. 0.78]
[0.08 0.42 0.96 0. ]]
(0, 2, 1, 0) has the highest probability of 0.00176553432
```
![](screenshot.png)

View File

@ -0,0 +1,16 @@
{
"sequence": ["A", "B", "D", "C"],
"nodes": 4,
"edges": 10,
"interactive": true,
"transition_matrix": [
[0.2, 0.7, 0.0],
[0.0, 0.0, 0.7],
[0.2, 0.3, 0.0]
],
"emission_matrix": [
[0.7, 0.3, 0.0, 0.0],
[0.2, 0.2, 0.4, 0.2],
[0.0, 0.0, 0.2, 0.8]
]
}

View File

@ -0,0 +1,481 @@
################################################################################
# Author: Shaun Reed #
# About: HMM implementation to calculate most probable path for sequence #
# Contact: shaunrd0@gmail.com | URL: www.shaunreed.com | GitHub: shaunrd0 #
################################################################################
from matplotlib import pyplot as plt
from typing import List
import argparse
import itertools
import json
import networkx as nx
import numpy as np
import random
import sys
################################################################################
# CLI Argument Parser
################################################################################
# ==============================================================================
def init_parser():
parser = argparse.ArgumentParser(
description='Calculates most probable path of HMM given an observation sequence',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'sequence', metavar='OBSERVATION_SEQUENCE', nargs='*',
help=
'''An observation sequence to calculate the most probable path
(default: '%(default)s')
''',
default=['A', 'B', 'D', 'C']
)
parser.add_argument(
'--nodes', '-n', metavar='GRAPH_NODE_COUNT',type=int, nargs='?',
help=
'''The total number of node states in the HMM graph
(default: '%(default)s')
''',
default=4
)
parser.add_argument(
'--edges', '-e', metavar='GRAPH_EDGE_COUNT',type=int, nargs='?',
help=
'''The total number of edges in the HMM graph
(default: '%(default)s')
''',
default=8
)
parser.add_argument(
'--show-all', action='store_true',
help=
'''When this flag is set, all path probabilities and their calculations will be output
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--interactive', action='store_true',
help=
'''Allow taking input to update matrices with triple (row, col, value)
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--silent', action='store_true',
help=
'''When this flag is set, final graph will not be shown
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--file', '-f', metavar='FILE_PATH', nargs='?', type=open,
help=
'''Optionally provide file for data to be read from. Each point must be on it\'s own line with format x,y
''',
)
return parser
################################################################################
# Helper Functions
################################################################################
# ==============================================================================
def parse_file():
"""
Validates keys in JSON file and updates CLI input context
Initializes a MultiDiGraph object using input data model_graph
Initializes a matrix of emission probabilities emission_matrix
:return: model_graph, emission_matrix
"""
# Load the JSON input file, validate keys
file_data = json.load(context['file'])
for key in file_data:
if key == "transition_matrix" or key == "emission_matrix":
continue
assert key in context
# Update the CLI context with JSON input
context.update(file_data)
model_graph = nx.MultiDiGraph(build_graph(np.array(file_data['transition_matrix'])))
emission_matrix = np.array(file_data['emission_matrix'])
return model_graph, emission_matrix
def random_emission():
"""
Initialize an emission matrix size SxE
Where S is number of states and E is number of emissions
:return: Initialized emission_matrix
"""
emission_matrix = np.zeros((context["nodes"], len(set(context["sequence"]))))
shape = emission_matrix.shape
for row in range(0, shape[0]):
for col in range(0, shape[1]):
# Let random number swing below 0 to increase chance of nodes not emitting
emit_prob = round(random.uniform(-0.25, 1.0), 2)
emit_prob = 0.0 if emit_prob < 0.0 else emit_prob
emission_matrix[row][col] = emit_prob
return emission_matrix
def random_graph(nodes, edges=2):
"""
Create a random graph represented as a list [(from_node, to_node, {'weight': edge_weight}), ...]
Networkx can use this list in constructors for graph objects
:param nodes: The number of nodes in the graph
:param edges: The number of edges connecting nodes in the graph
:return: A list [(from_node, to_node, {'weight': edge_weight}), ...]
"""
# By default, make twice as many edges as there are nodes
edges *= nodes if edges == 2 else 1
r_graph = []
for x in range(0, edges):
while True:
new_edge = (
random.randint(0, nodes - 1), # Randomly select a from_node index
random.randint(0, nodes - 1), # Randomly select a to_node index
{
# Randomly set an edge weight between from_node and to_node
'weight':
round(random.uniform(0.0, 1.0), 2)
}
)
if not any((new_edge[0], new_edge[1]) == (a, b) for a, b, w in r_graph):
break
r_graph.append(new_edge)
return r_graph
def build_graph(t_matrix):
"""
Converts a transition matrix to a list of edges and weights
This list can then be passed to NetworkX graph constructors
:param t_matrix: The transition matrix to build the graph from
:return: A list [(from_node, to_node, {'weight': edge_weight}), ...]
"""
n_graph = []
shape = t_matrix.shape
for row in range(0, shape[0]):
for col in range(0, shape[1]):
if t_matrix[row][col] <= 0.0:
continue
new_edge = (row, col, {'weight': t_matrix[row][col]})
n_graph.append(new_edge)
return n_graph
def transition_matrix(graph: nx.MultiDiGraph):
"""
Build a transition matrix from a Networkx MultiDiGraph object
:param graph: An initialized MultiDiGraph graph object
:return: An initialized transition matrix with shape (NODE_COUNT, NODE_COUNT)
"""
# Initialize a matrix of zeros with size ExE where E is total number of states (nodes)
t_matrix = np.zeros((context["nodes"], context["nodes"]))
# Build matrices from iterating over the graph
for a, b, weight in graph.edges(data='weight'):
t_matrix[a][b] = weight
if context["show_all"]:
print(f'{a}->{b}: {weight}')
return t_matrix
def make_emission_dict(emission_matrix):
"""
Create a dictionary that maps to index keys for each emission. emission_keys
Create a dictionary that maps to a list of emitting nodes for each emission. emission_dict
:param emission_matrix: An emission_matrix size NxE
Where N is the number of nodes (states) and E is the number of emissions
:return: emission_dict, emission_keys
"""
emission_dict = {}
for emission in sorted(set(context["sequence"])):
emission_dict[emission] = []
emission_keys = dict.fromkeys(emission_dict.keys())
# Initialize emission_dict to store a list of all nodes that emit the key value
shape = emission_matrix.shape
i = 0
for key in emission_dict.keys():
for row in range(0, shape[0]):
if emission_matrix[row][i] > 0:
emission_dict[key].append(row)
emission_keys[key] = i
i += 1
return emission_dict, emission_keys
def int_input(prompt):
"""
Forces integer input. Retries and warns if bogus values are entered.
:param prompt: The initial prompt message to show for input
:return: The integer input by the user at runtime
"""
while True:
try:
value = int(input(prompt))
break
except ValueError:
print("Please enter an integer value")
return value
def triple_input(matrix):
"""
Takes 3 integer input, validates it makes sense for the selected matrix
If row or column selected is outside the limits of the matrix, warn and retry input until valid
:param matrix: The matrix to use for input validation
:return: The validated input
"""
row = int_input("Row: ")
col = int_input("Col: ")
value = int_input("Value: ")
row, col = check_input(row, col, matrix)
return row, col, value
def check_input(row, col, matrix):
"""
Checks that row, col input values are within the bounds of matrix
If valid values are passed initially, no additional prompts are made.
Retries input until valid values are input.
:param row: The row index input by the user
:param col: The col index input by the user
:param matrix: The matrix to use for input validation
:return: The validated input for row and column index
"""
while row > matrix.shape[0] - 1:
print(f'{row} is too large for transition matrix of shape {matrix.shape}')
row = int_input("Row : ")
while col > matrix.shape[1] - 1:
print(f'{col} is too large for transition matrix of shape {matrix.shape}')
col = int_input("Col: ")
return row, col
################################################################################
# Hidden Markov Model
################################################################################
# ==============================================================================
def find_paths(emission_dict, t_matrix):
"""
Find all possible paths for an emission sequence
:param emission_dict: A dictionary of emitters for emissions {emission_1: [0, 1], emission_2: [1, 3], ...}
:param t_matrix: A transition matrix size NxN where N is the total number of nodes in the graph
:return: A list of validated paths for the emission given through the CLI
"""
paths = []
for emission in context["sequence"]:
paths.append(emission_dict[emission])
# Take the cartesian product of the emitting nodes to get a list of all possible paths
# + Return only the paths which have > 0 probability given the transition matrix
return validate_paths(list(itertools.product(*paths)), t_matrix)
def validate_paths(path_list: list, t_matrix):
"""
Checks all paths in path_list [[0, 1, 2, 3], [0, 1, 1, 2], ...]
If the transition matrix t_matrix indicates any node in a path can't reach the next node in path
The path can't happen given our graph. Remove it from the list of paths.
:param path_list: A list of paths to validate
:param t_matrix: A transition matrix size NxN where N is the total number of nodes in the graph
:return: A list of validated paths [[0, 1, 2, 3], [0, 1, 1, 2], ...]
"""
valid_paths = []
for path in path_list:
valid = True
for step in range(0, len(path) - 1):
current_node = path[step]
# If the transition matrix indicates that the chance to move to next step in path is 0
if t_matrix[current_node][path[step+1]] <= 0.0:
# The path cannot possibly happen. Don't consider it.
valid = False
break
if valid:
# We reached the end of our path without hitting a dead-end. The path is valid.
valid_paths.append(path)
return valid_paths
def find_probability(emission_matrix, t_matrix, emission_keys, valid_paths):
"""
Find probability of paths occurring given our current HMM
Store result in a dictionary {probability: (0, 1, 2, 3), probability_2: (0, 0, 1, 2)}
:param emission_matrix: A matrix of emission probabilities NxE where N is the emitting node and E is the emission
:param t_matrix: A transition matrix NxN where N is the total number of nodes in the graph
:param emission_keys: A dictionary mapping to index values for emissions as E in the emission_matrix
:param valid_paths: A list of valid paths to calculate probability given an emission sequence
:return: A dictionary of {prob: path}; For example {probability: (0, 1, 2, 3), probability_2: (0, 0, 1, 2)}
"""
path_prob = {}
seq = list(context["sequence"])
for path in valid_paths:
calculations = f'Calculating {path}: '
prob = 1.0
for step in range(0, len(path) - 1):
current_node = path[step]
next_node = path[step + 1]
emission_index = emission_keys[seq[step]]
emission_prob = emission_matrix[current_node][emission_index]
transition_prob = t_matrix[current_node][next_node]
calculations += f'({emission_prob:.2f} * {transition_prob:.2f}) * '
prob *= emission_prob * transition_prob
emission_index = emission_keys[seq[step + 1]]
final_emission_prob = emission_matrix[next_node][emission_index]
prob *= final_emission_prob
calculations += f'{final_emission_prob:.2f} = {prob:.6f}'
if prob > 0.0: # Don't keep paths which aren't possible due to emission sequence
path_prob[prob] = path
if context["show_all"]:
print(calculations)
return path_prob
def run_problem(transition_matrix, emission_matrix):
"""
Runs the HMM calculations given a transition_matrix and emission_matrix
:param transition_matrix: A matrix size NxN where N is the total number of nodes and values represent probability
:param emission_matrix: A matrix size NxE where N is total nodes and E is total number of emissions
:return: A dictionary of {probability: path} sorted by probability key from in descending order
"""
# Dictionary of {emission: [emitter, ...]}
emission_dict, emission_keys = make_emission_dict(emission_matrix)
valid_paths = find_paths(emission_dict, transition_matrix)
path_prob = find_probability(emission_matrix, transition_matrix, emission_keys, valid_paths)
result = {key: path_prob[key] for key in dict.fromkeys(sorted(path_prob.keys(), reverse=True))}
print(f'Finding most probable path for given observation sequence: {context["sequence"]}\n'
f'\tTotal nodes in graph: {context["nodes"]}\n'
f'\tTotal edges in graph: {context["edges"]}\n'
f'\tNumber of sequences: {len(set(context["sequence"]))}\n'
f'\tInteractive mode: {context["interactive"]}\n'
f'\tEmitting nodes: {emission_dict}\n'
f'Transition matrix: \n{transition_matrix}\n'
f'Emission matrix: \n{emission_matrix}'
)
return result
def show_result(result):
"""
Prints results from running the HMM calculations
:param result: The result dictionary returned by run_problem()
"""
if len(result) == 0:
print(f'No valid paths found for sequence {context["sequence"]}')
elif context["show_all"]:
print(f'Final paths sorted by probability:')
[print(f'{path} has probability:\t {prob:.6f}') for prob, path in result.items()]
else:
print(f'{list(result.values())[0]} has the highest probability of {list(result.keys())[0]}')
def draw_graph(graph):
"""
Draws the model_graph for the current HMM using NetworkX
:param graph: An initialized MultiDiGraph object with edge weights representing transition probability
"""
# Get a dictionary of {node: position} for drawing the graph
dict_pos = nx.spring_layout(graph)
nx.draw(
graph, dict_pos,
with_labels=True,
node_size=[x * 200 for x in dict(graph.degree).values()],
alpha=1,
arrowstyle="->",
arrowsize=25,
)
# TODO: Fix parallel path weight display
nx.draw_networkx_edge_labels(graph, dict_pos)
plt.show()
################################################################################
# Main
################################################################################
# ==============================================================================
def main(args: List[str]):
parser = init_parser()
global context
context = vars(parser.parse_args(args[1:]))
if context["file"]: # If a file was provided, use that data instead
model_graph, emission_matrix = parse_file()
else:
# If no file was provided, build a random graph with the requested number of nodes and edges
model_graph = nx.MultiDiGraph(random_graph(context["nodes"], context["edges"]))
# Create a random emission matrix
emission_matrix = random_emission()
t_matrix = transition_matrix(model_graph)
result = run_problem(t_matrix, emission_matrix)
show_result(result)
# Draw the graph for a visual example to go with output
if not context["silent"]:
draw_graph(model_graph)
# Unless we are in interactive mode, we're finished. Return.
if not context["interactive"]:
return
# Prompt to update the transition or emission matrix, then rerun problem with new values
print("Choose matrix to update:\n\t1. Transition\n\t2. Emission\n\t3. Both", end='')
choice = input()
if choice == '1':
row, col, value = triple_input(t_matrix)
t_matrix[row][col] = value
elif choice == '2':
row, col, value = triple_input(emission_matrix)
emission_matrix[row][col] = value
elif choice == '3':
print('\nInput for updating transition matrix')
row, col, value = triple_input(t_matrix)
t_matrix[row][col] = value
print('\nInput for updating emission matrix')
row, col, value = triple_input(emission_matrix)
emission_matrix[row][col] = value
result = run_problem(t_matrix, emission_matrix)
show_result(result)
# Draw the graph for a visual example to go with output
if not context["silent"]:
model_graph = nx.MultiDiGraph(build_graph(np.array(t_matrix)))
draw_graph(model_graph)
if __name__ == "__main__":
sys.exit(main(sys.argv))

View File

@ -0,0 +1,3 @@
matplotlib==3.5.0
networkx==2.6.3
numpy==1.21.4

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

151
python/neural-network/IRIS.csv Executable file
View File

@ -0,0 +1,151 @@
sepal_length,sepal_width,petal_length,petal_width,species
5.1,3.5,1.4,0.2,Iris-setosa
4.9,3,1.4,0.2,Iris-setosa
4.7,3.2,1.3,0.2,Iris-setosa
4.6,3.1,1.5,0.2,Iris-setosa
5,3.6,1.4,0.2,Iris-setosa
5.4,3.9,1.7,0.4,Iris-setosa
4.6,3.4,1.4,0.3,Iris-setosa
5,3.4,1.5,0.2,Iris-setosa
4.4,2.9,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.4,3.7,1.5,0.2,Iris-setosa
4.8,3.4,1.6,0.2,Iris-setosa
4.8,3,1.4,0.1,Iris-setosa
4.3,3,1.1,0.1,Iris-setosa
5.8,4,1.2,0.2,Iris-setosa
5.7,4.4,1.5,0.4,Iris-setosa
5.4,3.9,1.3,0.4,Iris-setosa
5.1,3.5,1.4,0.3,Iris-setosa
5.7,3.8,1.7,0.3,Iris-setosa
5.1,3.8,1.5,0.3,Iris-setosa
5.4,3.4,1.7,0.2,Iris-setosa
5.1,3.7,1.5,0.4,Iris-setosa
4.6,3.6,1,0.2,Iris-setosa
5.1,3.3,1.7,0.5,Iris-setosa
4.8,3.4,1.9,0.2,Iris-setosa
5,3,1.6,0.2,Iris-setosa
5,3.4,1.6,0.4,Iris-setosa
5.2,3.5,1.5,0.2,Iris-setosa
5.2,3.4,1.4,0.2,Iris-setosa
4.7,3.2,1.6,0.2,Iris-setosa
4.8,3.1,1.6,0.2,Iris-setosa
5.4,3.4,1.5,0.4,Iris-setosa
5.2,4.1,1.5,0.1,Iris-setosa
5.5,4.2,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5,3.2,1.2,0.2,Iris-setosa
5.5,3.5,1.3,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
4.4,3,1.3,0.2,Iris-setosa
5.1,3.4,1.5,0.2,Iris-setosa
5,3.5,1.3,0.3,Iris-setosa
4.5,2.3,1.3,0.3,Iris-setosa
4.4,3.2,1.3,0.2,Iris-setosa
5,3.5,1.6,0.6,Iris-setosa
5.1,3.8,1.9,0.4,Iris-setosa
4.8,3,1.4,0.3,Iris-setosa
5.1,3.8,1.6,0.2,Iris-setosa
4.6,3.2,1.4,0.2,Iris-setosa
5.3,3.7,1.5,0.2,Iris-setosa
5,3.3,1.4,0.2,Iris-setosa
7,3.2,4.7,1.4,Iris-versicolor
6.4,3.2,4.5,1.5,Iris-versicolor
6.9,3.1,4.9,1.5,Iris-versicolor
5.5,2.3,4,1.3,Iris-versicolor
6.5,2.8,4.6,1.5,Iris-versicolor
5.7,2.8,4.5,1.3,Iris-versicolor
6.3,3.3,4.7,1.6,Iris-versicolor
4.9,2.4,3.3,1,Iris-versicolor
6.6,2.9,4.6,1.3,Iris-versicolor
5.2,2.7,3.9,1.4,Iris-versicolor
5,2,3.5,1,Iris-versicolor
5.9,3,4.2,1.5,Iris-versicolor
6,2.2,4,1,Iris-versicolor
6.1,2.9,4.7,1.4,Iris-versicolor
5.6,2.9,3.6,1.3,Iris-versicolor
6.7,3.1,4.4,1.4,Iris-versicolor
5.6,3,4.5,1.5,Iris-versicolor
5.8,2.7,4.1,1,Iris-versicolor
6.2,2.2,4.5,1.5,Iris-versicolor
5.6,2.5,3.9,1.1,Iris-versicolor
5.9,3.2,4.8,1.8,Iris-versicolor
6.1,2.8,4,1.3,Iris-versicolor
6.3,2.5,4.9,1.5,Iris-versicolor
6.1,2.8,4.7,1.2,Iris-versicolor
6.4,2.9,4.3,1.3,Iris-versicolor
6.6,3,4.4,1.4,Iris-versicolor
6.8,2.8,4.8,1.4,Iris-versicolor
6.7,3,5,1.7,Iris-versicolor
6,2.9,4.5,1.5,Iris-versicolor
5.7,2.6,3.5,1,Iris-versicolor
5.5,2.4,3.8,1.1,Iris-versicolor
5.5,2.4,3.7,1,Iris-versicolor
5.8,2.7,3.9,1.2,Iris-versicolor
6,2.7,5.1,1.6,Iris-versicolor
5.4,3,4.5,1.5,Iris-versicolor
6,3.4,4.5,1.6,Iris-versicolor
6.7,3.1,4.7,1.5,Iris-versicolor
6.3,2.3,4.4,1.3,Iris-versicolor
5.6,3,4.1,1.3,Iris-versicolor
5.5,2.5,4,1.3,Iris-versicolor
5.5,2.6,4.4,1.2,Iris-versicolor
6.1,3,4.6,1.4,Iris-versicolor
5.8,2.6,4,1.2,Iris-versicolor
5,2.3,3.3,1,Iris-versicolor
5.6,2.7,4.2,1.3,Iris-versicolor
5.7,3,4.2,1.2,Iris-versicolor
5.7,2.9,4.2,1.3,Iris-versicolor
6.2,2.9,4.3,1.3,Iris-versicolor
5.1,2.5,3,1.1,Iris-versicolor
5.7,2.8,4.1,1.3,Iris-versicolor
6.3,3.3,6,2.5,Iris-virginica
5.8,2.7,5.1,1.9,Iris-virginica
7.1,3,5.9,2.1,Iris-virginica
6.3,2.9,5.6,1.8,Iris-virginica
6.5,3,5.8,2.2,Iris-virginica
7.6,3,6.6,2.1,Iris-virginica
4.9,2.5,4.5,1.7,Iris-virginica
7.3,2.9,6.3,1.8,Iris-virginica
6.7,2.5,5.8,1.8,Iris-virginica
7.2,3.6,6.1,2.5,Iris-virginica
6.5,3.2,5.1,2,Iris-virginica
6.4,2.7,5.3,1.9,Iris-virginica
6.8,3,5.5,2.1,Iris-virginica
5.7,2.5,5,2,Iris-virginica
5.8,2.8,5.1,2.4,Iris-virginica
6.4,3.2,5.3,2.3,Iris-virginica
6.5,3,5.5,1.8,Iris-virginica
7.7,3.8,6.7,2.2,Iris-virginica
7.7,2.6,6.9,2.3,Iris-virginica
6,2.2,5,1.5,Iris-virginica
6.9,3.2,5.7,2.3,Iris-virginica
5.6,2.8,4.9,2,Iris-virginica
7.7,2.8,6.7,2,Iris-virginica
6.3,2.7,4.9,1.8,Iris-virginica
6.7,3.3,5.7,2.1,Iris-virginica
7.2,3.2,6,1.8,Iris-virginica
6.2,2.8,4.8,1.8,Iris-virginica
6.1,3,4.9,1.8,Iris-virginica
6.4,2.8,5.6,2.1,Iris-virginica
7.2,3,5.8,1.6,Iris-virginica
7.4,2.8,6.1,1.9,Iris-virginica
7.9,3.8,6.4,2,Iris-virginica
6.4,2.8,5.6,2.2,Iris-virginica
6.3,2.8,5.1,1.5,Iris-virginica
6.1,2.6,5.6,1.4,Iris-virginica
7.7,3,6.1,2.3,Iris-virginica
6.3,3.4,5.6,2.4,Iris-virginica
6.4,3.1,5.5,1.8,Iris-virginica
6,3,4.8,1.8,Iris-virginica
6.9,3.1,5.4,2.1,Iris-virginica
6.7,3.1,5.6,2.4,Iris-virginica
6.9,3.1,5.1,2.3,Iris-virginica
5.8,2.7,5.1,1.9,Iris-virginica
6.8,3.2,5.9,2.3,Iris-virginica
6.7,3.3,5.7,2.5,Iris-virginica
6.7,3,5.2,2.3,Iris-virginica
6.3,2.5,5,1.9,Iris-virginica
6.5,3,5.2,2,Iris-virginica
6.2,3.4,5.4,2.3,Iris-virginica
5.9,3,5.1,1.8,Iris-virginica
1 sepal_length sepal_width petal_length petal_width species
2 5.1 3.5 1.4 0.2 Iris-setosa
3 4.9 3 1.4 0.2 Iris-setosa
4 4.7 3.2 1.3 0.2 Iris-setosa
5 4.6 3.1 1.5 0.2 Iris-setosa
6 5 3.6 1.4 0.2 Iris-setosa
7 5.4 3.9 1.7 0.4 Iris-setosa
8 4.6 3.4 1.4 0.3 Iris-setosa
9 5 3.4 1.5 0.2 Iris-setosa
10 4.4 2.9 1.4 0.2 Iris-setosa
11 4.9 3.1 1.5 0.1 Iris-setosa
12 5.4 3.7 1.5 0.2 Iris-setosa
13 4.8 3.4 1.6 0.2 Iris-setosa
14 4.8 3 1.4 0.1 Iris-setosa
15 4.3 3 1.1 0.1 Iris-setosa
16 5.8 4 1.2 0.2 Iris-setosa
17 5.7 4.4 1.5 0.4 Iris-setosa
18 5.4 3.9 1.3 0.4 Iris-setosa
19 5.1 3.5 1.4 0.3 Iris-setosa
20 5.7 3.8 1.7 0.3 Iris-setosa
21 5.1 3.8 1.5 0.3 Iris-setosa
22 5.4 3.4 1.7 0.2 Iris-setosa
23 5.1 3.7 1.5 0.4 Iris-setosa
24 4.6 3.6 1 0.2 Iris-setosa
25 5.1 3.3 1.7 0.5 Iris-setosa
26 4.8 3.4 1.9 0.2 Iris-setosa
27 5 3 1.6 0.2 Iris-setosa
28 5 3.4 1.6 0.4 Iris-setosa
29 5.2 3.5 1.5 0.2 Iris-setosa
30 5.2 3.4 1.4 0.2 Iris-setosa
31 4.7 3.2 1.6 0.2 Iris-setosa
32 4.8 3.1 1.6 0.2 Iris-setosa
33 5.4 3.4 1.5 0.4 Iris-setosa
34 5.2 4.1 1.5 0.1 Iris-setosa
35 5.5 4.2 1.4 0.2 Iris-setosa
36 4.9 3.1 1.5 0.1 Iris-setosa
37 5 3.2 1.2 0.2 Iris-setosa
38 5.5 3.5 1.3 0.2 Iris-setosa
39 4.9 3.1 1.5 0.1 Iris-setosa
40 4.4 3 1.3 0.2 Iris-setosa
41 5.1 3.4 1.5 0.2 Iris-setosa
42 5 3.5 1.3 0.3 Iris-setosa
43 4.5 2.3 1.3 0.3 Iris-setosa
44 4.4 3.2 1.3 0.2 Iris-setosa
45 5 3.5 1.6 0.6 Iris-setosa
46 5.1 3.8 1.9 0.4 Iris-setosa
47 4.8 3 1.4 0.3 Iris-setosa
48 5.1 3.8 1.6 0.2 Iris-setosa
49 4.6 3.2 1.4 0.2 Iris-setosa
50 5.3 3.7 1.5 0.2 Iris-setosa
51 5 3.3 1.4 0.2 Iris-setosa
52 7 3.2 4.7 1.4 Iris-versicolor
53 6.4 3.2 4.5 1.5 Iris-versicolor
54 6.9 3.1 4.9 1.5 Iris-versicolor
55 5.5 2.3 4 1.3 Iris-versicolor
56 6.5 2.8 4.6 1.5 Iris-versicolor
57 5.7 2.8 4.5 1.3 Iris-versicolor
58 6.3 3.3 4.7 1.6 Iris-versicolor
59 4.9 2.4 3.3 1 Iris-versicolor
60 6.6 2.9 4.6 1.3 Iris-versicolor
61 5.2 2.7 3.9 1.4 Iris-versicolor
62 5 2 3.5 1 Iris-versicolor
63 5.9 3 4.2 1.5 Iris-versicolor
64 6 2.2 4 1 Iris-versicolor
65 6.1 2.9 4.7 1.4 Iris-versicolor
66 5.6 2.9 3.6 1.3 Iris-versicolor
67 6.7 3.1 4.4 1.4 Iris-versicolor
68 5.6 3 4.5 1.5 Iris-versicolor
69 5.8 2.7 4.1 1 Iris-versicolor
70 6.2 2.2 4.5 1.5 Iris-versicolor
71 5.6 2.5 3.9 1.1 Iris-versicolor
72 5.9 3.2 4.8 1.8 Iris-versicolor
73 6.1 2.8 4 1.3 Iris-versicolor
74 6.3 2.5 4.9 1.5 Iris-versicolor
75 6.1 2.8 4.7 1.2 Iris-versicolor
76 6.4 2.9 4.3 1.3 Iris-versicolor
77 6.6 3 4.4 1.4 Iris-versicolor
78 6.8 2.8 4.8 1.4 Iris-versicolor
79 6.7 3 5 1.7 Iris-versicolor
80 6 2.9 4.5 1.5 Iris-versicolor
81 5.7 2.6 3.5 1 Iris-versicolor
82 5.5 2.4 3.8 1.1 Iris-versicolor
83 5.5 2.4 3.7 1 Iris-versicolor
84 5.8 2.7 3.9 1.2 Iris-versicolor
85 6 2.7 5.1 1.6 Iris-versicolor
86 5.4 3 4.5 1.5 Iris-versicolor
87 6 3.4 4.5 1.6 Iris-versicolor
88 6.7 3.1 4.7 1.5 Iris-versicolor
89 6.3 2.3 4.4 1.3 Iris-versicolor
90 5.6 3 4.1 1.3 Iris-versicolor
91 5.5 2.5 4 1.3 Iris-versicolor
92 5.5 2.6 4.4 1.2 Iris-versicolor
93 6.1 3 4.6 1.4 Iris-versicolor
94 5.8 2.6 4 1.2 Iris-versicolor
95 5 2.3 3.3 1 Iris-versicolor
96 5.6 2.7 4.2 1.3 Iris-versicolor
97 5.7 3 4.2 1.2 Iris-versicolor
98 5.7 2.9 4.2 1.3 Iris-versicolor
99 6.2 2.9 4.3 1.3 Iris-versicolor
100 5.1 2.5 3 1.1 Iris-versicolor
101 5.7 2.8 4.1 1.3 Iris-versicolor
102 6.3 3.3 6 2.5 Iris-virginica
103 5.8 2.7 5.1 1.9 Iris-virginica
104 7.1 3 5.9 2.1 Iris-virginica
105 6.3 2.9 5.6 1.8 Iris-virginica
106 6.5 3 5.8 2.2 Iris-virginica
107 7.6 3 6.6 2.1 Iris-virginica
108 4.9 2.5 4.5 1.7 Iris-virginica
109 7.3 2.9 6.3 1.8 Iris-virginica
110 6.7 2.5 5.8 1.8 Iris-virginica
111 7.2 3.6 6.1 2.5 Iris-virginica
112 6.5 3.2 5.1 2 Iris-virginica
113 6.4 2.7 5.3 1.9 Iris-virginica
114 6.8 3 5.5 2.1 Iris-virginica
115 5.7 2.5 5 2 Iris-virginica
116 5.8 2.8 5.1 2.4 Iris-virginica
117 6.4 3.2 5.3 2.3 Iris-virginica
118 6.5 3 5.5 1.8 Iris-virginica
119 7.7 3.8 6.7 2.2 Iris-virginica
120 7.7 2.6 6.9 2.3 Iris-virginica
121 6 2.2 5 1.5 Iris-virginica
122 6.9 3.2 5.7 2.3 Iris-virginica
123 5.6 2.8 4.9 2 Iris-virginica
124 7.7 2.8 6.7 2 Iris-virginica
125 6.3 2.7 4.9 1.8 Iris-virginica
126 6.7 3.3 5.7 2.1 Iris-virginica
127 7.2 3.2 6 1.8 Iris-virginica
128 6.2 2.8 4.8 1.8 Iris-virginica
129 6.1 3 4.9 1.8 Iris-virginica
130 6.4 2.8 5.6 2.1 Iris-virginica
131 7.2 3 5.8 1.6 Iris-virginica
132 7.4 2.8 6.1 1.9 Iris-virginica
133 7.9 3.8 6.4 2 Iris-virginica
134 6.4 2.8 5.6 2.2 Iris-virginica
135 6.3 2.8 5.1 1.5 Iris-virginica
136 6.1 2.6 5.6 1.4 Iris-virginica
137 7.7 3 6.1 2.3 Iris-virginica
138 6.3 3.4 5.6 2.4 Iris-virginica
139 6.4 3.1 5.5 1.8 Iris-virginica
140 6 3 4.8 1.8 Iris-virginica
141 6.9 3.1 5.4 2.1 Iris-virginica
142 6.7 3.1 5.6 2.4 Iris-virginica
143 6.9 3.1 5.1 2.3 Iris-virginica
144 5.8 2.7 5.1 1.9 Iris-virginica
145 6.8 3.2 5.9 2.3 Iris-virginica
146 6.7 3.3 5.7 2.5 Iris-virginica
147 6.7 3 5.2 2.3 Iris-virginica
148 6.3 2.5 5 1.9 Iris-virginica
149 6.5 3 5.2 2 Iris-virginica
150 6.2 3.4 5.4 2.3 Iris-virginica
151 5.9 3 5.1 1.8 Iris-virginica

View File

@ -0,0 +1,200 @@
Install required dependencies for matplotlib GUI frontend and all pip other packages for this project
```bash
sudo apt install python3-tk
python3.9 -m pip install -r requirements.txt
```
Neural network implementation using Python CLI to dynamically generate a resizable network
and then run a given number of learning cycles on the provided data set.
As an example, the IRIS dataset is used to classify flower types using petal measurements.
Input layer perceptron count can be adjusted with `INPUTS` positional parameter
Hidden layer perceptron count can be adjusted with `PERCEPTRONS` positional parameter
Output layer perceptron count can be adjusted with `OUTPUTS` positional parameter
Hidden layers can be added or removed using`--hidden-layers` option setting
Node bias can be initialized randomly or with provided data.
Perceptron edge weight bias can be initialized randomly or with provided data.
Threshold for perceptron fire can be initialized randomly or with provided data.
Setup instructions and output of `neural-network.py -h`-
```bash
python3.9 neural-network.py -h
usage: neural-network.py [-h] [--hidden-layers [HIDDEN_LAYERS]] [--cycles [CYCLES]] [--learn-rate [LEARNING_RATE]]
[--bias [INITIAL_BIAS]] [--weight [INITIAL_EDGE_WEIGHTS]] [--error-threshold [ERROR_THRESHOLD]]
[--fire-threshold [FIRE_THRESHOLD]] [--spacing [LAYER_SPACING]] [--horizontal] [--silent] [--verbose]
[--file [file_path]]
[INPUTS] [PERCEPTRONS] [OUTPUTS]
Neural network implementation
positional arguments:
INPUTS Number of inputs for the neural network
(default: '3')
PERCEPTRONS Number of perceptrons in each hidden layer
(default: '8')
OUTPUTS Number of outputs for the neural network
(default: '3')
optional arguments:
-h, --help show this help message and exit
--hidden-layers [HIDDEN_LAYERS], -l [HIDDEN_LAYERS]
Number of hidden layers
(default: '1')
--cycles [CYCLES], -c [CYCLES]
Number of cycles to run through the network
(default: '3')
--learn-rate [LEARNING_RATE]
Learning rate to use for the network.
Must be within range of 0.0 < rate <= 1.0
(default: '0.25')
--bias [INITIAL_BIAS], -b [INITIAL_BIAS]
The initial bias to use for perceptrons within the network.
Must be within range of -1.0 <= bias <= 1.0
If value is unset, bias will be initialized randomly
--weight [INITIAL_EDGE_WEIGHTS], -w [INITIAL_EDGE_WEIGHTS]
The initial edge weight to use for node connections in the network
If value is unset, edge weights will be initialized randomly
--error-threshold [ERROR_THRESHOLD], --error [ERROR_THRESHOLD]
The acceptable error threshold to use for training the network.
(default: '0.5')
--fire-threshold [FIRE_THRESHOLD], --fire [FIRE_THRESHOLD]
The fire threshold for perceptrons in the network.
If a perceptron's cumulative inputs reach this value, the perceptron fires
(default: '0.25')
--spacing [LAYER_SPACING]
Distance between origin of network layers within visualization
(default: '2.0')
--horizontal, --flip The network visualization will flow left-to-right
(default: 'False')
--silent Do not show the network visualization, only print output to console
(default: 'False')
--verbose, -v When this flag is set, error rate and change in weight will be output for each calculation
(default: 'False')
--file [file_path], -f [file_path]
Optionally provide a json file to configure any option available through the cli
json keys match --long version of each option, where --long-split option key is "long_split" in json
```
Input and output layers are sized by the length of a single input sequence and the number of possible label classifications.
If the length of an input sequence does not match the number of input nodes requested, a warning will show.
If the length of possible label classifications does not match the number of output nodes requested, a warning will show.
In both cases, the program corrects the node count to match the input data / labels, and not the requested node count.
The total number of output labels provided must match the total number of the number of input sequences.
Running NN program uses IRIS data set by default.
Warnings will be shown if input and output node count is changed without providing new input.
```bash
python3.9 neural-network.py --file input.json --silent
Warning: Input sequences each contain 3 entries but 5 input nodes were requested.
Using 3 input nodes instead of 5
Warning: Output labels contain 3 possible classifications but 8 output were nodes requested.
Using 3 output nodes instead of 8
Creating a single layer neural network:
Total input nodes: 3
Number of perceptrons in each hidden layer: 8
Total output nodes: 3
Number of hidden layers: 3
Fire threshold: 0.25
Error threshold: 0.5
Learn rate: 0.25
Initial bias: Random
Initial edge weights: Random
Network visualization settings:
Graph visualization is enabled: False
Graph visualization is horizontal: True
Graph visualization is vertical: False
Graph visualization layer spacing: 2.0
Test data input count: 150
inputs layer: [0, 1, 2]
hidden layer: [[3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18], [19, 20, 21, 22, 23, 24, 25, 26]]
outputs layer: [27, 28, 29]
[Cycle 1] Accuracy: 92.6667% [139 / 11]
[Cycle 2] Accuracy: 95.3333% [286 / 14]
[Cycle 3] Accuracy: 96.2222% [433 / 17]
[Cycle 4] Accuracy: 96.6667% [580 / 20]
[Cycle 5] Accuracy: 96.9333% [727 / 23]
[Cycle 6] Accuracy: 97.1111% [874 / 26]
[Cycle 7] Accuracy: 97.2381% [1021 / 29]
[Cycle 8] Accuracy: 97.3333% [1168 / 32]
[Cycle 9] Accuracy: 97.4074% [1315 / 35]
[Cycle 10] Accuracy: 97.4667% [1462 / 38]
Correct: 1462 Wrong: 38 Total: 1500
Cycle 1 accuracy: 92.6667% Cycle 10 accuracy: 97.4667%
4.8% change over 10 cycles 0.48% average change per cycle
```
Running NN program with garbage data in `input-test.json` to test resizing of input / output layers.
A single input sequence is `[0, 1, 0, 1, 1, 1]` which is length of 6, so 6 input nodes are created.
Within the output labels, there are 8 unique labels in the set, so 8 output nodes are created.
The length a single label must match the number of output nodes.
For 8 output nodes, the labels `[1, 0, 0, 0, 0, 0, 0, 0]` and `[0, 1, 0, 0, 0, 0, 0, 0]` are valid.
```bash
python3.9 neural-network.py --file ./input-test.json --silent
Warning: Output labels contain 8 possible classifications but 10 output were nodes requested.
Using 8 output nodes instead of 10
Creating a single layer neural network:
Total input nodes: 6
Number of perceptrons in each hidden layer: 8
Total output nodes: 8
Number of hidden layers: 3
Fire threshold: 0.25
Error threshold: 0.5
Learn rate: 0.25
Initial bias: Random
Initial edge weights: Random
Network visualization settings:
Graph visualization is enabled: False
Graph visualization is horizontal: True
Graph visualization is vertical: False
Graph visualization layer spacing: 2.0
Test data input count: 14
inputs layer: [0, 1, 2, 3, 4, 5]
hidden layer: [[6, 7, 8, 9, 10, 11, 12, 13], [14, 15, 16, 17, 18, 19, 20, 21], [22, 23, 24, 25, 26, 27, 28, 29]]
outputs layer: [30, 31, 32, 33, 34, 35, 36, 37]
[Cycle 1] Accuracy: 35.7143% [5 / 9]
[Cycle 2] Accuracy: 39.2857% [11 / 17]
[Cycle 3] Accuracy: 40.4762% [17 / 25]
[Cycle 4] Accuracy: 41.0714% [23 / 33]
[Cycle 5] Accuracy: 41.4286% [29 / 41]
[Cycle 6] Accuracy: 41.6667% [35 / 49]
[Cycle 7] Accuracy: 41.8367% [41 / 57]
[Cycle 8] Accuracy: 41.9643% [47 / 65]
[Cycle 9] Accuracy: 42.0635% [53 / 73]
[Cycle 10] Accuracy: 42.1429% [59 / 81]
Correct: 59 Wrong: 81 Total: 140
Cycle 1 accuracy: 35.7143% Cycle 10 accuracy: 42.1429%
6.4286% change over 10 cycles 0.6429% average change per cycle
```
By default, the following network and visualization will be generated
```bash
python3.9 neural-network.py
# Output removed for GUI example
```
![](screenshot.png)

View File

@ -0,0 +1,44 @@
{
"inputs": 6,
"perceptrons": 8,
"outputs": 10,
"hidden_layers": 3,
"learn_rate": 0.25,
"fire_threshold": 0.25,
"error_threshold": 0.5,
"cycles": 10,
"spacing": 2.0,
"horizontal": true,
"input_sequence": [
[0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 1]
],
"label_sequence": [
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1]
]
}

View File

@ -0,0 +1,12 @@
{
"inputs": 5,
"perceptrons": 8,
"outputs": 8,
"hidden_layers": 3,
"learn_rate": 0.25,
"fire_threshold": 0.25,
"error_threshold": 0.5,
"cycles": 10,
"spacing": 2.0,
"horizontal": true
}

View File

@ -0,0 +1,649 @@
################################################################################
# Author: Shaun Reed #
# About: ANN implementation with adjustable layers and layer lengths #
# Contact: shaunrd0@gmail.com | URL: www.shaunreed.com | GitHub: shaunrd0 #
################################################################################
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from typing import List
import argparse
import json
import math
import numpy as np
import pandas as pd # Unused unless optional code is manually uncommented
import random
import sys
import viznet as vn
################################################################################
# CLI Argument Parser
################################################################################
# ==============================================================================
def init_parser():
parser = argparse.ArgumentParser(
description='Neural network implementation',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'inputs', metavar='INPUTS', type=int, nargs='?',
help=
'''Number of inputs for the neural network
(default: '%(default)s')
''',
default=3
)
parser.add_argument(
'perceptrons', metavar='PERCEPTRONS', type=int, nargs='?',
help=
'''Number of perceptrons in each hidden layer
(default: '%(default)s')
''',
default=8
)
parser.add_argument(
'outputs', metavar='OUTPUTS', type=int, nargs='?',
help=
'''Number of outputs for the neural network
(default: '%(default)s')
''',
default=3
)
parser.add_argument(
'--hidden-layers', '-l', metavar='HIDDEN_LAYERS', type=int, nargs='?',
help=
'''Number of hidden layers
(default: '%(default)s')
''',
default=1
)
parser.add_argument(
'--cycles', '-c', metavar='CYCLES', type=int, nargs='?',
help=
'''Number of cycles to run through the network
(default: '%(default)s')
''',
default=3
)
parser.add_argument(
'--learn-rate', metavar='LEARNING_RATE', type=float, nargs='?',
help=
'''Learning rate to use for the network.
Must be within range of 0.0 < rate <= 1.0
(default: '%(default)s')
''',
default=0.25
)
parser.add_argument(
'--bias', '-b', metavar='INITIAL_BIAS', type=float, nargs='?',
help=
'''The initial bias to use for perceptrons within the network.
Must be within range of -1.0 <= bias <= 1.0
If value is unset, bias will be initialized randomly
''',
)
parser.add_argument(
'--weight', '-w', metavar='INITIAL_EDGE_WEIGHTS', type=float, nargs='?',
help=
'''The initial edge weight to use for node connections in the network
If value is unset, edge weights will be initialized randomly
'''
)
parser.add_argument(
'--error-threshold', '--error', metavar='ERROR_THRESHOLD', type=float, nargs='?',
help=
'''The acceptable error threshold to use for training the network.
(default: '%(default)s')
''',
default=0.5
)
parser.add_argument(
'--fire-threshold', '--fire', metavar='FIRE_THRESHOLD', type=float, nargs='?',
help=
'''The fire threshold for perceptrons in the network.
If a perceptron\'s cumulative inputs reach this value, the perceptron fires
(default: '%(default)s')
''',
default=0.25
)
parser.add_argument(
'--spacing', metavar='LAYER_SPACING', type=float, nargs='?',
help=
'''Distance between origin of network layers within visualization
(default: '%(default)s')
''',
default=2.0
)
parser.add_argument(
'--horizontal', '--flip', action='store_true',
help=
'''The network visualization will flow left-to-right
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--silent', action='store_true',
help=
'''Do not show the network visualization, only print output to console
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--verbose', '-v', action='store_true',
help=
'''When this flag is set, error rate and change in weight will be output for each calculation
(default: '%(default)s')
''',
default=False
)
parser.add_argument(
'--file', '-f', metavar='file_path', nargs='?', type=open,
help=
'''Optionally provide a json file to configure any option available through the cli
json keys match --long version of each option, where --long-split option key is "long_split" in json
''',
)
return parser
################################################################################
# Neural Network
################################################################################
# ==============================================================================
def parse_file():
"""
Validates keys in JSON file and updates CLI input context
:return: (seq_input, seq_label) Initialized to input and label sequences in JSON file if present
"""
# Load the JSON input file, validate keys
file_data = json.load(context['file'])
for key in file_data:
if key == "input_sequence" or key == "label_sequence":
continue
assert key in context
# Update the CLI context with JSON input
context.update(file_data)
# If JSON file provided input and label sequences, load and return them
seq_input = seq_label = None
if 'input_sequence' in file_data:
seq_input = np.array(file_data['input_sequence'])
if 'label_sequence' in file_data:
seq_label = np.array(file_data['label_sequence'])
return seq_input, seq_label
def network_layers():
"""
Initialize a dictionary of layers where each layer is a list of nodes: {'input': [0, 1, 2]}
The hidden layer in this dictionary is a list of lists for each hidden layer: {'hidden': [[3, 4, 5], [6, 7, 8]]}
:return: A dictionary, as an example: {'input': [0, 1, 2], 'hidden': [[3, 4, 5], [6, 7, 8]], 'output': [9, 10, 11] }
"""
inputs = [i for i in range(context["inputs"])]
offset = context["inputs"]
# For each hidden layer add the requested number of perceptrons
hidden = [[] for x in range(context["hidden_layers"])]
for x in range(context["hidden_layers"]):
hidden[x] = [i for i in range(offset, context["perceptrons"] + offset)]
offset += context["perceptrons"]
outputs = [i for i in range(offset, context["outputs"] + offset)]
offset += context["outputs"]
layers = {"inputs": inputs, "hidden": hidden, "outputs": outputs}
[print(f'{layer} layer: {layers[layer]}') for layer in layers]
return layers
def random_matrix(rows, cols, low=-1.0, high=1.0):
""" Produce a random matrix of size ROWSxCOLS using LOW and HIGH as upper and lower bounds """
return np.random.uniform(low, high, (rows, cols))
def get_matrix_dict():
"""
Produces a dictionary that holds edge weight transition matrices for each layer of the network
matrix_dict['input'] maps to a single 2D matrix
matrix_dict['hidden'] maps to a 3D matrix
+ where matrix_dict['hidden'][0] is the 2D transition matrix for the first hidden layer
:return: A dictionary, as an example: {'input': [[...]], 'hidden': [[[...]], [[...]]], 'output': [[...]] }
"""
if context["weight"] is None:
# Create matrices to represent edges and weights for each layer of the network
input_matrix = random_matrix(context["inputs"], context["perceptrons"])
hidden_matrices = [random_matrix(context["perceptrons"], context["perceptrons"])
for x in range(context["hidden_layers"]-1)]
output_matrix = random_matrix(context["perceptrons"], context["outputs"])
else:
# If an initial edge weight was specified, fill matrices with that value instead of generating randomly
input_matrix = np.full((context["inputs"], context["perceptrons"]), context["weight"])
hidden_matrices = [np.full((context["perceptrons"], context["perceptrons"]), context["weight"])
for x in range(context["hidden_layers"]-1)]
output_matrix = np.full((context["perceptrons"], context["outputs"]), context["weight"])
matrix_dict = {'input': input_matrix, 'hidden': np.array(hidden_matrices), 'output': output_matrix}
return matrix_dict
def get_bias_dict():
"""
Produces a dictionary that stores bias vectors for perceptrons in each layer of the network
The hidden layer in this dictionary is a list of lists for bias in each hidden layer: {'hidden': [[...], [...]]}
:return: A dictionary, as an example: {'input': [0.5, 0.5], 'hidden': [[0.5, 0.5 0.5], ...], 'output': [...] }
"""
# If there was a bias provided, use it; Else use random perceptron bias
bias = round(random.uniform(-1.0, 1.0), 2) if context["bias"] is None else context["bias"]
# Create vectors to represent perceptron bias in each layer
input_bias = [bias for x in range(0, context["inputs"])]
hidden_bias = [[bias for x in range(0, context["perceptrons"])] for x in range(0, context["hidden_layers"])]
output_bias = [bias for x in range(0, context["outputs"])]
bias_dict = {'input': input_bias, 'hidden': hidden_bias, 'output': output_bias}
return bias_dict
def threshold_fire(input_sum):
"""
Applies step function using fire_threshold set by CLI to determine if perceptron is firing or not
:param input_sum: The sum of inputs for this perceptron
:return: A list of outputs for each perceptron in the layer. If only the first fired: [1, 0, 0, 0]
"""
output = [1 if val > context["fire_threshold"] else 0 for val in input_sum.tolist()]
return output
def adjust_weight(matrix_dict, out_output, label):
"""
Back propagation for adjusting edge weights of nodes that produces incorrect output
:param matrix_dict: A dictionary of matrices for the network produces by get_matrix_dict()
:param out_output: The actual output for this input sequence
:param label: The desired result for this input sequence
:return: A dictionary of transition matrices for the network with adjusted edge weights
"""
# Find erroneous indices
bad_nodes = error_nodes(out_output, label)
if len(bad_nodes) == 0:
return matrix_dict
# Adjust the edge weights leading to the error nodes; Don't adjust correct nodes
for layer, mat in reversed(matrix_dict.items()):
if layer == 'output':
for node in bad_nodes:
for row in range(len(mat)):
mod = context['learn_rate'] * (label[node] - out_output[node]) # * Input (???)
if context['verbose']:
print(f'Adjusting output weights at ({row}, {node}) with {mod}')
mat[row][node] += mod
# In a fully connected neural network, all edges are updated if any output node is wrong
# + Every node of every layer connects to every node in the next layer
# + Any wrong node updates all edges in previous layers
if layer == 'hidden':
# If there are any hidden layers that do not connect to input or output layers directly
if mat.size > 0:
# For each hidden layer matrix, update all edge weights
for i, hl_mat in enumerate(mat):
for row in range(len(hl_mat)):
mod = context['learn_rate']
for col in range(len(hl_mat[row])):
# print(f'Adjusting output weights at ({row}, {col}) with {mod}')
mat[i][row][col] += context["learn_rate"]
if layer == 'input':
for row in range(len(mat)):
mod = context['learn_rate']
for col in range(len(mat[row])):
# print(f'Adjusting output weights at ({row}, {col}) with {mod}')
mat[row][col] += mod
return matrix_dict
def error_rate(actual_output, label):
"""
Determines error rate for this input sequence
Error rate is later used to determine if edge weights should be adjusted
:param actual_output: The actual output for this input sequence
:param label: The desired output for this input sequence
:return: The error rate for the sequence
"""
error_sum = 0
for n, output in enumerate(actual_output):
err = label[n] - output
error_sum += math.pow(err, 2)
err = math.sqrt(error_sum)
return err
def error_nodes(out_output, label):
"""
Find which output nodes are incorrect
:param out_output: Actual output for this input sequence
:param label: The desired output for this input sequence
:return: A list of node indices that produced the wrong output for this sequence
"""
# Loop through each output, check if it matches the label; If it doesn't add index to returned list
return [i for i, output in enumerate(out_output) if output != label[i]]
def layer_pass(weight_matrix, input_vector, bias_vector):
"""
Passes input from layer A to layer B
:param weight_matrix: Transition matrix of edge weights where perceptrons from layer A are rows and B are columns
:param input_vector: An input vector that represents the output from A to B
:param bias_vector: The bias vector for perceptrons in layer B
:return: Final output from the layer, after step function is applied in threshold_fire()
"""
layer_edge_weights = np.array(weight_matrix).T
prev_output = np.atleast_2d(input_vector).T
this_layer_input = layer_edge_weights.dot(prev_output).T.flatten()
this_layer_input += np.array(bias_vector)
return threshold_fire(this_layer_input)
def train_network(seq_input, seq_label, bias_dict, matrix_dict):
"""
Performs forward pass through network, moving through the number of cycles requested by the CLI
:param seq_input: Sequence of inputs to feed into the network
:param seq_label: Sequence of labels to verify network output and indicate error
:param bias_dict: Dictionary of bias vectors for the perceptrons in each layer
:param matrix_dict: Dictionary of transition matrices for the edge weights between layers in the network
:return: Information gathered from training the network used to output final accuracy
"""
# Info dictionary used to track accuracy
info = {'correct': 0, 'wrong': 0, 'total': len(seq_input) * context["cycles"], 'first_acc': 0}
# A list of error rates for each cycle
# + These aren't used much for the program, but they hold nice data to explore while debugging
cycle_errors = []
cycle_outputs = [[] for x in range(context["cycles"])]
for cycle_index in range(1, context["cycles"] + 1):
# print(f'\nCycle number {cycle_index}')
for seq_index in range(0, len(seq_input)):
# One list for storing the outputs of each layer, and another to store inputs
seq_outputs = []
# Input layer -> Hidden layer
# Apply input perceptron bias vector to initial inputs of the input layer
in_input = np.array(np.array(seq_input[seq_index]) + np.array(bias_dict['input']))
# Find output of the input layer
in_output = threshold_fire(in_input)
seq_outputs.append(in_output)
# Find output for first hidden layer
hl_output = layer_pass(matrix_dict["input"], seq_outputs[-1], bias_dict['hidden'][0])
seq_outputs.append(hl_output)
# For each hidden layer find inputs and outputs, up until the last hidden layer
# + Start at 1 since we already have the output from first hidden layer
for layer_index in range(1, context["hidden_layers"]):
# Hidden layer -> Hidden layer
edges = matrix_dict['hidden'][layer_index - 1]
bias = bias_dict['hidden'][layer_index - 1]
# Find output for hidden layer N
hl_output = layer_pass(edges, seq_outputs[-1], bias)
seq_outputs.append(hl_output)
# Hidden layer -> Output layer
# Find output for output layer
out_output = layer_pass(matrix_dict['output'], seq_outputs[-1], bias_dict['output'])
seq_outputs.append(out_output)
# Forward pass through network finished
# Find error rate for this input sequence
err = error_rate(out_output, seq_label[seq_index])
if context['verbose'] and err > 0:
print(f'Error rate for sequence {seq_index} cycle {cycle_index}: {err}')
# If error rate for this sequence is above threshold, adjust weighted edges
if err > context["error_threshold"]:
matrix_dict = adjust_weight(matrix_dict, out_output, seq_label[seq_index])
# Track correctness of sequences and cycles
if err == 0:
info['correct'] += 1
else:
info['wrong'] += 1
# Append the result to the cycle_outputs list for this cycle; -1 for 0 index array offset
# cycle_outputs contains a list for each cycle. Each list contains N outputs for N input sequences
cycle_outputs[cycle_index - 1].append(out_output)
cycle_errors.append(err)
# Move to next learning cycle in for loop
info_total_temp = info['correct'] + info['wrong']
if cycle_index == 1:
info['first_acc'] = round(100.0 * float(info["correct"] / info_total_temp), 4)
print(
f'[Cycle {cycle_index}] \tAccuracy: {100.0 * float(info["correct"] / info_total_temp):.4f}% \t'
f'[{info["correct"]} / {info["wrong"]}]'
)
if context["verbose"]:
for layer in matrix_dict:
print(
f'Network {layer} layer: \n{matrix_dict[layer]}\n'
# Bias vector doesn't change, so it's not very interesting output per-cycle
# f'{layer} bias vector: {bias_dict[layer]}'
)
info['cycle_error'] = cycle_errors
return info
def draw_graph(net_plot, net_layers, draw_horizontal=None, spacing=None):
"""
This is the only function where viznet is used. Viznet is a module to visualize network graphs using matplotlib.
https://viznet.readthedocs.io/en/latest/core.html
https://viznet.readthedocs.io/en/latest/examples.html#examples
To draw the graph, we need to at least specify the following information for-each layer in the network -
1. The number of nodes in the layer
2. The type of nodes that make up each layer (https://viznet.readthedocs.io/en/latest/viznet.theme.html)
3. The distance between the center (origin) of each layer
With this we can use viznet helper functions to draw network
:param net_plot: A matplotlib subplot to draw the network on
:param net_layers: A dictionary of layers that make up the network nodes
:param draw_horizontal: True if graph should be drawn so direction flows left->right; False for bottom->top
:param spacing: The distance between the center of origin for each layer in the network
"""
# If no spacing was provided to the call, use spacing set by CLI
spacing = context["spacing"] if spacing is None else spacing
# If no draw mode was provided to the call, use mode set by CLI
draw_horizontal = context["horizontal"] if draw_horizontal is None else draw_horizontal
# 1. Number of nodes in each layer is provided by dictionary: len(net_layers['input'])
# 2. Define node type to draw for each layer in the network (default ['nn.input', 'nn.hidden', nn.output])
node_types = ['nn.input'] + ['nn.hidden'] * context["hidden_layers"] + ['nn.output']
# 3. Use spacing distance to create list of X positions with equal distance apart (default [0, 1.5, 3.0])
# 1.5 * 0 = 0; 1.5 * 1 = 1.5; 1.5 * 2 = 3.0; 1.5 * 3 = 4.5; etc
layer_pos = spacing * np.arange(context["hidden_layers"] + 2)
# Create a sequence of Node objects using viznet helper function node_sequence
# + Allows defining a NodeBrush for-each node, which is used by the library to style nodes
node_sequence = []
layer_index = 0
for layer in net_layers:
# If we are on the hidden layers, iterate through each
if layer == 'hidden':
for hl in net_layers[layer]:
brush = vn.NodeBrush(node_types[layer_index], net_plot)
ctr = (layer_pos[layer_index], 0) if draw_horizontal else (0, layer_pos[layer_index])
node_sequence.append(vn.node_sequence(
brush, len(hl),
center=ctr, space=(0, 1) if draw_horizontal else (1, 0))
)
layer_index += 1
else:
brush = vn.NodeBrush(node_types[layer_index], net_plot)
ctr = (layer_pos[layer_index], 0) if draw_horizontal else (0, layer_pos[layer_index])
node_sequence.append(vn.node_sequence(
brush, len(net_layers[layer]),
center=ctr, space=(0, 1) if draw_horizontal else (1, 0))
)
layer_index += 1
# Define an EdgeBrush that draws arrows between nodes using matplotlib axes
edge_brush = vn.EdgeBrush('-->', net_plot)
for start, end in zip(node_sequence[:-1], node_sequence[1:]):
# Connect each node in `start` layer to each node in `end` layer
for start_node in start:
for end_node in end:
# Apply the EdgeBrush using matplotlib axes and node edge tuple
edge_brush >> (start_node, end_node)
plt.show()
################################################################################
# Main
################################################################################
# ==============================================================================
def main(args: List[str]):
parser = init_parser()
global context
context = vars(parser.parse_args(args[1:]))
seq_input = None
seq_label = None
if context['file']:
seq_input, seq_label = parse_file()
if seq_input is None or seq_label is None:
# You cannot provide input or label sequences via the CLI
# If no file was provided with data, use iris dataset as example data
# Use sklearn.dataset to grab example data
iris = load_iris()
# iris_data = iris.data[:, (0, 1, 2, 3)]
iris_data = iris.data[:, (0, 2, 3)]
# iris_data = iris.data[:, (2, 3)]
iris_label = iris.target
# Or read a CSV manually using pandas
# iris = pd.read_csv('/home/kapper/Code/School/CS/AI/Assignment/two/IRIS.csv').to_dict()
# iris_data = [[x, y] for x, y in zip(iris['petal_length'].values(), iris['petal_width'].values())]
# iris_data = [[x, y, z] for x, y, z in zip(iris['petal_length'].values(),
# iris['petal_width'].values(),
# iris['sepal_length'].values())]
# iris_label = [x for x in iris['species'].values()]
# To change the number of output nodes, we need to adjust the number of labels for classification
# iris_data = iris.data[0:99, (0, 2, 3)]
# iris_label = [l for l in iris_label if l != 2]
# Convert labels to: 0-> [1, 0, 0]; 1-> [0, 1, 0]; 2->[0, 0, 1]
seq_input = iris_data
seq_label = []
for i, label in enumerate(set(iris_label)):
same = [s for s in iris_label if s == label]
for l in same:
new_label = np.zeros(len(set(iris_label))).tolist()
new_label[i] = 1
seq_label.append(new_label)
# Assert that the provided learning rate is valid
assert(0.0 < context['learn_rate'] <= 1.0)
# This check ensures that the number of inputs match the number of input nodes
# + And does the same for output nodes with possible classifications
# + But, this removes the ability to grow / shrink input / output layers through CLI
if context["inputs"] != len(seq_input[0]):
print(f'Warning: Input sequences each contain {len(seq_input[0])} entries '
f'but {context["inputs"]} input nodes were requested.\n'
f'\tUsing {len(seq_input[0])} input nodes instead of {context["inputs"]}'
)
context["inputs"] = len(seq_input[0])
if context["outputs"] != len(set(map(tuple, seq_label))):
print(f'Warning: Output labels contain {len(set(map(tuple, seq_label)))} possible classifications '
f'but {context["outputs"]} output were nodes requested.\n'
f'\tUsing {len(set(map(tuple, seq_label)))} output nodes instead of {context["outputs"]}'
)
context["outputs"] = len(set(map(tuple, seq_label)))
# Output the problem settings
print(f'Creating a single layer neural network: \n'
f'\tTotal input nodes: {context["inputs"]}\n'
f'\tNumber of perceptrons in each hidden layer: {context["perceptrons"]}\n'
f'\tTotal output nodes: {context["outputs"]}\n'
f'\tNumber of hidden layers: {context["hidden_layers"]}\n'
f'\tFire threshold: {context["fire_threshold"]}\n'
f'\tError threshold: {context["error_threshold"]}\n'
f'\tLearn rate: {context["learn_rate"]}\n'
f'\tInitial bias: {context["bias"] if context["bias"] is not None else "Random"}\n'
f'\tInitial edge weights: {context["weight"] if context["weight"] is not None else "Random"}\n'
f'Network visualization settings: \n'
f'\tGraph visualization is enabled: {not context["silent"]}\n'
f'\tGraph visualization is horizontal: {context["horizontal"]}\n'
f'\tGraph visualization is vertical: {not context["horizontal"]}\n'
f'\tGraph visualization layer spacing: {context["spacing"]}\n'
f'\tTest data input count: {len(seq_input)}'
)
# Initialize a dictionary of vectors for mapping to each layer node
# + layers['hidden'][0] = [3, 4, 5, 6] --> Hidden layer nodes are at index 3, 4, 5, 6
layers = network_layers()
# A dictionary where matrix_dict['input'] maps to edge weight matrix for input_layer->first_hidden_layer
# matrix_dict['hidden'] maps to a list of matrices; matrix_dict['hidden'][0] is edge weights for first_hl->second_hl
# matrix_dict['output'] maps to edge weight matrix for last_hl->output_layer
matrix_dict = get_matrix_dict()
# Randomly generate perceptron bias if none was provided through CLI
bias_dict = get_bias_dict()
info = train_network(seq_input, seq_label, bias_dict, matrix_dict)
# Final console output for overall results
info_total_temp = info['correct'] + info['wrong']
acc = 100.0 * float(info["correct"] / info_total_temp)
print(
f'\nCorrect: {info["correct"]} \t Wrong: {info["wrong"]} \t Total: {context["cycles"] * len(seq_input)}'
f'\nCycle 1 accuracy: {info["first_acc"]}% \tCycle {context["cycles"]} accuracy: {acc:.4f}%'
f'\n{round(acc - info["first_acc"], 4)}% change over {context["cycles"]} cycles '
f'\t{round((acc - info["first_acc"]) / context["cycles"], 4)}% average change per cycle'
)
# All cycles have finished; Draw the network for a visual example to go with output
if not context["silent"]:
draw_graph(plt.subplot(), layers)
if __name__ == "__main__":
sys.exit(main(sys.argv))

View File

@ -0,0 +1,5 @@
matplotlib==3.5.0
numpy==1.21.4
pandas==1.3.4
scikit_learn==1.0.2
viznet==0.3.0

Binary file not shown.

After

Width:  |  Height:  |  Size: 114 KiB