Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ OPTION(BUILD_ATEN "Build ATen tensor library" ON)
OPTION(BUILD_ATENSPACE "Build ATenSpace (AtomSpace + Tensor embeddings)" ON)
OPTION(BUILD_TENSOR_LOGIC "Build Tensor Logic integration layer" ON)

# Neural Network Framework
OPTION(BUILD_NN "Build nn (Lua/Torch neural network library)" ON)

# External integration packages
OPTION(BUILD_GNUCASH "Build Gnucash cognitive accounting integration" OFF)
OPTION(BUILD_KOBOLDCPP "Build KoboldCpp story/world modeling integration" OFF)
Expand Down Expand Up @@ -215,6 +218,14 @@ IF(BUILD_COGSELF AND BUILD_COGGML)
add_subdirectory(cogself)
ENDIF()

# Neural Network Framework - Lua/Torch nn library
IF(BUILD_NN)
MESSAGE(STATUS "Building nn (Lua/Torch neural network framework)...")
MESSAGE(STATUS " Location: nn/")
MESSAGE(STATUS " Note: Lua library - no CMake build needed")
MESSAGE(STATUS " See nn/README.md for usage")
ENDIF()

# External integration packages
IF(BUILD_GNUCASH AND BUILD_ATOMSPACE)
MESSAGE(STATUS "Building Gnucash cognitive accounting integration...")
Expand Down
73 changes: 73 additions & 0 deletions nn/ClassNLLCriterion.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
--[[
nn.ClassNLLCriterion - Negative Log Likelihood loss

Used with LogSoftMax for classification

loss = -input[target]
]]--

local ClassNLLCriterion, parent = torch.class('nn.ClassNLLCriterion', 'nn.Criterion')

function ClassNLLCriterion:__init(weights)
parent.__init(self)
self.weights = weights
self.sizeAverage = true
end

function ClassNLLCriterion:updateOutput(input, target)
if input:dim() == 1 then
-- Single sample
self.output = -input[target]
if self.weights then
self.output = self.output * self.weights[target]
end
elseif input:dim() == 2 then
-- Batch of samples
local nFrame = input:size(1)
self.output = 0
for i = 1, nFrame do
local t = target[i]
local val = -input[i][t]
if self.weights then
val = val * self.weights[t]
end
self.output = self.output + val
end
if self.sizeAverage then
self.output = self.output / nFrame
end
else
error('input must be vector or matrix')
end

return self.output
end

function ClassNLLCriterion:updateGradInput(input, target)
self.gradInput:resizeAs(input):zero()

if input:dim() == 1 then
-- Single sample
self.gradInput[target] = -1
if self.weights then
self.gradInput[target] = self.gradInput[target] * self.weights[target]
end
elseif input:dim() == 2 then
-- Batch of samples
local nFrame = input:size(1)
for i = 1, nFrame do
local t = target[i]
self.gradInput[i][t] = -1
if self.weights then
self.gradInput[i][t] = self.gradInput[i][t] * self.weights[t]
end
end
if self.sizeAverage then
self.gradInput:div(nFrame)
end
end

return self.gradInput
end

return ClassNLLCriterion
58 changes: 58 additions & 0 deletions nn/Concat.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
--[[
nn.Concat - Container that concatenates outputs of child modules
]]--

local Concat, parent = torch.class('nn.Concat', 'nn.Container')

function Concat:__init(dimension)
parent.__init(self)
self.dimension = dimension or 1
self.size = torch.LongStorage()
end

function Concat:updateOutput(input)
local nModule = #self.modules

-- Collect outputs from all modules
local outputs = {}
for i = 1, nModule do
outputs[i] = self.modules[i]:updateOutput(input)
end

-- Concatenate along dimension
self.output = torch.cat(outputs, self.dimension)

return self.output
end

function Concat:updateGradInput(input, gradOutput)
local nModule = #self.modules

-- Split gradOutput for each module
local offset = 1
self.gradInput:resizeAs(input):zero()

for i = 1, nModule do
local outputSize = self.modules[i].output:size(self.dimension)
local narrowGradOutput = gradOutput:narrow(self.dimension, offset, outputSize)
local gradInput = self.modules[i]:updateGradInput(input, narrowGradOutput)
self.gradInput:add(gradInput)
offset = offset + outputSize
end

return self.gradInput
end

function Concat:accGradParameters(input, gradOutput, scale)
local nModule = #self.modules
local offset = 1

for i = 1, nModule do
local outputSize = self.modules[i].output:size(self.dimension)
local narrowGradOutput = gradOutput:narrow(self.dimension, offset, outputSize)
self.modules[i]:accGradParameters(input, narrowGradOutput, scale)
offset = offset + outputSize
end
end

return Concat
91 changes: 91 additions & 0 deletions nn/Container.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
--[[
nn.Container - Base class for modules containing other modules
]]--

local Container, parent = torch.class('nn.Container', 'nn.Module')

function Container:__init(...)
parent.__init(self)
self.modules = {}
end

function Container:add(module)
table.insert(self.modules, module)
return self
end

function Container:get(index)
return self.modules[index]
end

function Container:size()
return #self.modules
end

function Container:parameters()
local params = {}
local gradParams = {}

for i = 1, #self.modules do
local p, gp = self.modules[i]:parameters()
if p then
for j = 1, #p do
table.insert(params, p[j])
table.insert(gradParams, gp[j])
end
end
end

return params, gradParams
end

function Container:training()
parent.training(self)
for i = 1, #self.modules do
self.modules[i]:training()
end
end

function Container:evaluate()
parent.evaluate(self)
for i = 1, #self.modules do
self.modules[i]:evaluate()
end
end

function Container:type(type, tensorCache)
parent.type(self, type, tensorCache)
for i = 1, #self.modules do
self.modules[i]:type(type, tensorCache)
end
return self
end

function Container:zeroGradParameters()
for i = 1, #self.modules do
self.modules[i]:zeroGradParameters()
end
end

function Container:updateParameters(learningRate)
for i = 1, #self.modules do
self.modules[i]:updateParameters(learningRate)
end
end

function Container:reset(stdv)
for i = 1, #self.modules do
self.modules[i]:reset(stdv)
end
end

function Container:__tostring__()
local str = torch.type(self) .. ' {\n'
for i = 1, #self.modules do
str = str .. ' (' .. i .. '): ' .. tostring(self.modules[i]):gsub('\n', '\n ') .. '\n'
end
str = str .. '}'
return str
end

return Container
53 changes: 53 additions & 0 deletions nn/Criterion.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
--[[
nn.Criterion - Base class for loss functions
]]--

local Criterion = torch.class('nn.Criterion')

function Criterion:__init()
self.gradInput = torch.Tensor()
self.output = 0
end

function Criterion:updateOutput(input, target)
error('updateOutput method not implemented')
end

function Criterion:forward(input, target)
return self:updateOutput(input, target)
end

function Criterion:updateGradInput(input, target)
error('updateGradInput method not implemented')
end

function Criterion:backward(input, target)
return self:updateGradInput(input, target)
end

function Criterion:type(type)
assert(type, 'Criterion:type(type) expects a string')

-- Convert all tensors
for key, param in pairs(self) do
if torch.isTensor(param) then
self[key] = param:type(type)
end
end

return self
end

function Criterion:float()
return self:type('torch.FloatTensor')
end

function Criterion:double()
return self:type('torch.DoubleTensor')
end

function Criterion:cuda()
return self:type('torch.CudaTensor')
end

return Criterion
40 changes: 40 additions & 0 deletions nn/CrossEntropyCriterion.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
--[[
nn.CrossEntropyCriterion - Cross Entropy loss

Combines LogSoftMax and ClassNLLCriterion

loss = -log(exp(input[target]) / sum(exp(input)))
]]--

local CrossEntropyCriterion, parent = torch.class('nn.CrossEntropyCriterion', 'nn.Criterion')

function CrossEntropyCriterion:__init(weights)
parent.__init(self)
self.weights = weights
self.sizeAverage = true
self.logSoftMax = nn.LogSoftMax()
self.nll = nn.ClassNLLCriterion(weights)
self.nll.sizeAverage = self.sizeAverage
end

function CrossEntropyCriterion:updateOutput(input, target)
-- Apply LogSoftMax
local logProbs = self.logSoftMax:forward(input)

-- Apply NLL
self.output = self.nll:forward(logProbs, target)

return self.output
end

function CrossEntropyCriterion:updateGradInput(input, target)
-- Backprop through NLL
local gradLogProbs = self.nll:backward(self.logSoftMax.output, target)

-- Backprop through LogSoftMax
self.gradInput = self.logSoftMax:backward(input, gradLogProbs)

return self.gradInput
end

return CrossEntropyCriterion
Loading
Loading