activation.py 1.68 KB
import math
import numpy as np

def softmax(inputA):
    result = []
    
    sigmaSum = 0
    normalized_arr = []
    for x in inputA:
        normalized_arr.append(float(x))
    normalized_arr = normalize(normalized_arr) 
        
    for i in range(0, len(normalized_arr)):
        
        powA = pow(math.e, normalized_arr[i]) 
        sigmaSum = sigmaSum + powA
        
    for i in range(0, len(normalized_arr)):
        powB = pow(math.e, normalized_arr[i])
        resultA = powB / sigmaSum
        
        result.append(resultA)
    
    #result = normalize(result)
    return result

def softmax2(input2):
    result = []
    
    sigmaSum = 0
    normalized_arr = []
    
    '''
    for x in inputA:
        normalized_arr.append(float(x))
    normalized_arr = normalize(normalized_arr) 
        
    for i in range(0, len(normalized_arr)):
        
        powA = pow(math.e, normalized_arr[i]) 
        sigmaSum = sigmaSum + powA
        
    for i in range(0, len(normalized_arr)):
        powB = pow(math.e, normalized_arr[i])
        resultA = powB / sigmaSum
        
        result.append(resultA)
    '''
    
    result = np.exp(normalized_arr) / sum(np.exp(normalized_arr))

    #result = normalize(result)
    return result

def normalize(arrs):
    normalized_arr = []
    for x in arrs:
        normalized_arr.append(float(x))
    
    if len(normalized_arr) > 0:    
        maximum = max(normalized_arr)
        minimum = min(normalized_arr)
        denom = float(maximum) - float(minimum)
        if denom == 0:
            denom = 1
        for i in range(0,len(normalized_arr)):
            normalized_arr[i] = ((normalized_arr[i] - minimum)/ denom) * 2 - 1
    return normalized_arr