diff --git a/1 - The_method_of_least_squares.ipynb b/1 - The_method_of_least_squares.ipynb new file mode 100644 index 0000000..420487d --- /dev/null +++ b/1 - The_method_of_least_squares.ipynb @@ -0,0 +1 @@ +{"cells":[{"metadata":{"trusted":true},"cell_type":"code","source":"#source: https://github.com/llSourcell/linear_regression_live/blob/master/demo.py\ndef compute_error_for_line_given_points(b, m, coordinates):\n \"\"\"\n For a given line, (defined by the equation below) \n and a given set of coordinates, comprising a list of two-element sublists, \n produce the error, which is the sum of the square of the discrepancies from the line. \n Equation of line: y = mx + b\n where m is the slope, and b is the y-intercept\n \"\"\"\n totalError = 0 # Initialize sum of discrepancies to naught.\n for i in range(0, len(coordinates)): # Sweep over pairs inthe list of coordinates.\n x = coordinates[i][0] # Select the first number in the ith cooirdinate pair in the list.\n y = coordinates[i][1] # Select the second number in the ith cooirdinate pair in the list.\n totalError += (y - (m * x + b)) ** 2 # Add to the sum, the square of the discrepancy between the *predicted* y value (of the line equation) and the *actual* y value (of the coordinate).\n result = totalError / float(len(coordinates)) # Divide the total sum of squares-of-discrepancies by number of pairs in the list, to arrive at the average. \n return result\n# example \nb = 1\nm = 2\ncoordinates = [[3,6],[6,9],[12,18]]\ncompute_error_for_line_given_points(b, m, coordinates)","execution_count":7,"outputs":[{"output_type":"execute_result","execution_count":7,"data":{"text/plain":"22.0"},"metadata":{}}]},{"metadata":{"trusted":true},"cell_type":"code","source":"import matplotlib as plt\n\nplt.[[3,6],[6,9],[12,18]]","execution_count":null,"outputs":[]},{"metadata":{"trusted":true},"cell_type":"code","source":"plt.help()","execution_count":6,"outputs":[{"output_type":"error","ename":"AttributeError","evalue":"module 'matplotlib' has no attribute 'help'","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)","\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhelp\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m","\u001b[0;31mAttributeError\u001b[0m: module 'matplotlib' has no attribute 'help'"]}]},{"metadata":{"trusted":true},"cell_type":"code","source":"","execution_count":null,"outputs":[]}],"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.1"}},"nbformat":4,"nbformat_minor":1} \ No newline at end of file diff --git a/Gradient_descent.ipynb b/2 - Gradient_descent.ipynb similarity index 100% rename from Gradient_descent.ipynb rename to 2 - Gradient_descent.ipynb diff --git a/3 - Linear_regression.ipynb b/3 - Linear_regression.ipynb new file mode 100644 index 0000000..68ffa23 --- /dev/null +++ b/3 - Linear_regression.ipynb @@ -0,0 +1 @@ +{"cells":[{"metadata":{"trusted":true},"cell_type":"code","source":"#source: https://github.com/llSourcell/linear_regression_live/blob/master/demo.py\n\n# A list of sublists each of which has two elements;\n# the first being the price of wheat/kg and\n# the second being the average price of bread.\nprice_wheat_bread = [[0.5,5],\n [0.6,5.5],\n [0.8,6],\n [1.1,6.8],\n [1.4,7]\n ]\n\ndef step_gradient(b_current, \n m_current, \n points, \n learningRate):\n \"\"\"\n For a given y-intercept, b, and slope, m, and set of points, and learning rate, \n produce the step size of the gradient needed for changing those coefficients, b and m.\n \"\"\"\n b_gradient = 0 # Initialize change in slope to naught, i.e. no change.\n m_gradient = 0 # Initialize change in y-intercept to naught, i.e. no change. \n N = float(len(points)) # The number of pairs of points in our list of coordinates.\n for i in range(0, len(points)): # Sweep over the index of every pair of points in the list of coordinates.\n x = points[i][0] # Select the first number in the ith pair.\n y = points[i][1] # Select the second number in the ith pair.\n # How quickly does the squared error change with respect to the y-intercept?\n # The negative sign means that the gradient points in the direction of decrease of error, instead of increase.\n b_gradient += -(2/N) * (y - ((m_current * x) + b_current)) # Notice the two comes down based on the power rule of derivatives.\n # How quickly does the error change with respect to the slope?\n m_gradient += -(2/N) * x * (y - ((m_current * x) + b_current)) # Notice the x comes out based on the rule of derivatives.\n # Based on how large of steps you chose to to increment, the gradients (or negative of the slopes) are now used to adjust the old values.\n new_b = b_current - (learningRate * b_gradient) # I am not sure why there is a negative sign.\n new_m = m_current - (learningRate * m_gradient) # \n return [new_b, new_m]\n\ndef gradient_descent_runner(points, \n starting_b, \n starting_m, \n learning_rate, \n num_iterations):\n \"\"\"\n Given a set of points, and initial values of b and m, and a learning rate, and number of iterations,\n produce a...what?\n \"\"\"\n b = starting_b\n m = starting_m\n for i in range(num_iterations): # How ever many times you want to run the gradient descent, that's how many increments will change your coefficients, m and b, to readjust the predicting line, with successively less and less error.\n b, m = step_gradient(b, m, points, learning_rate)\n return [b, m]\n\n# Let's produce the best m and b values, given initial choices of m as 12, b as 42, \n# and the increment learning step as .01, over 10000 iterations.\n# The goal being to end up with ideal, or at least very accurate, coefficients.\ngradient_descent_runner(price_wheat_bread, 12, 42, 0.01, 10000) \n","execution_count":17,"outputs":[{"output_type":"execute_result","execution_count":17,"data":{"text/plain":"[4.107202463019789, 2.2190814997453208]"},"metadata":{}}]},{"metadata":{},"cell_type":"markdown","source":"Apparently the ideal value of b (y-intercept) is 4.1 and of m (slope) is 2.2"},{"metadata":{"collapsed":true,"trusted":false},"cell_type":"code","source":"","execution_count":null,"outputs":[]}],"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":2},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython2","version":"2.7.13"}},"nbformat":4,"nbformat_minor":1} \ No newline at end of file diff --git a/Perceptron.ipynb b/4 - Perceptron.ipynb similarity index 100% rename from Perceptron.ipynb rename to 4 - Perceptron.ipynb diff --git a/Simple_neural_network.ipynb b/5 - Simple_neural_network.ipynb similarity index 100% rename from Simple_neural_network.ipynb rename to 5 - Simple_neural_network.ipynb diff --git a/MNIST_deep_learning.ipynb b/6 - MNIST_deep_learning.ipynb similarity index 100% rename from MNIST_deep_learning.ipynb rename to 6 - MNIST_deep_learning.ipynb diff --git a/Linear_regression.ipynb b/Linear_regression.ipynb deleted file mode 100644 index f50df1c..0000000 --- a/Linear_regression.ipynb +++ /dev/null @@ -1,78 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[3.30446237250121, 2.966062687410304]" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "#source: https://github.com/llSourcell/linear_regression_live/blob/master/demo.py\n", - "#Price of wheat/kg and the average price of bread\n", - "wheat_and_bread = [[0.5,5],[0.6,5.5],[0.8,6],[1.1,6.8],[1.4,7]]\n", - "\n", - "def step_gradient(b_current, m_current, points, learningRate):\n", - " b_gradient = 0\n", - " m_gradient = 0\n", - " N = float(len(points))\n", - " for i in range(0, len(points)):\n", - " x = points[i][0]\n", - " y = points[i][1]\n", - " b_gradient += -(2/N) * (y - ((m_current * x) + b_current))\n", - " m_gradient += -(2/N) * x * (y - ((m_current * x) + b_current))\n", - " new_b = b_current - (learningRate * b_gradient)\n", - " new_m = m_current - (learningRate * m_gradient)\n", - " return [new_b, new_m]\n", - "\n", - "def gradient_descent_runner(points, starting_b, starting_m, learning_rate, num_iterations):\n", - " b = starting_b\n", - " m = starting_m\n", - " for i in range(num_iterations):\n", - " b, m = step_gradient(b, m, points, learning_rate)\n", - " return [b, m]\n", - "\n", - "gradient_descent_runner(wheat_and_bread, 1, 1, 0.01, 100)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 2 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.13" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/README.md b/README.md index 8fa10c0..11b973d 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,12 @@ There are six snippets of code that made deep learning what it is today. [Coding the History of Deep Learning](https://medium.com/@emilwallner/the-history-of-deep-learning-explored-through-6-code-snippets-d0a0e8545202) covers the inventors and the background to their breakthroughs. In this repo, you can find all the code samples from the story. -- **The Method of Least Squares**: The first cost function -- **Gradient Descent**: Finding the minimum of the cost function -- **Linear Regression**: Automatically decrease the cost function -- **The Perceptron**: Using a linear regression type equations to mimic a neuron -- **Artificial Neural Networks**: Leveraging backpropagation to solve non-linear problems -- **Deep Neural Networks**: Neural networks with more than one hidden layer +1. **The Method of Least Squares**: The first cost function +1. **Gradient Descent**: Finding the minimum of the cost function +1. **Linear Regression**: Automatically decrease the cost function +1. **The Perceptron**: Using a linear regression type equations to mimic a neuron +1. **Artificial Neural Networks**: Leveraging backpropagation to solve non-linear problems +1. **Deep Neural Networks**: Neural networks with more than one hidden layer diff --git a/The_method_of_least_squares.ipynb b/The_method_of_least_squares.ipynb deleted file mode 100644 index e57a85d..0000000 --- a/The_method_of_least_squares.ipynb +++ /dev/null @@ -1,47 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "#source: https://github.com/llSourcell/linear_regression_live/blob/master/demo.py\n", - "# y = mx + b\n", - "# m is slope, b is y-intercept\n", - "def compute_error_for_line_given_points(b, m, coordinates):\n", - " totalError = 0\n", - " for i in range(0, len(coordinates)):\n", - " x = coordinates[i][0]\n", - " y = coordinates[i][1]\n", - " totalError += (y - (m * x + b)) ** 2\n", - " return totalError / float(len(coordinates))\n", - "# example \n", - "compute_error_for_line_given_points(1, 2, [[3,6],[6,9],[12,18]])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.1" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -}