Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/Microsoft/CNTK into qiwye…
Browse files Browse the repository at this point in the history
…/asgd-e2e-fix
  • Loading branch information
chivee committed Apr 19, 2017
2 parents b1a2058 + f666539 commit 7a70d12
Show file tree
Hide file tree
Showing 24 changed files with 103 additions and 67 deletions.
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,8 @@ MATH_SRC =\
$(SOURCEDIR)/Math/BatchNormalizationEngine.cpp \
$(SOURCEDIR)/Math/BlockHandlerSSE.cpp \
$(SOURCEDIR)/Math/CUDAPageLockedMemAllocator.cpp \
$(SOURCEDIR)/Math/CPUMatrix.cpp \
$(SOURCEDIR)/Math/CPUMatrixFloat.cpp \
$(SOURCEDIR)/Math/CPUMatrixDouble.cpp \
$(SOURCEDIR)/Math/CPURNGHandle.cpp \
$(SOURCEDIR)/Math/CPUSparseMatrix.cpp \
$(SOURCEDIR)/Math/ConvolutionEngine.cpp \
Expand Down
2 changes: 1 addition & 1 deletion Source/CNTKv2LibraryDll/CompositeFunction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ namespace CNTK
// Create state from scratch, so that function attributes contain all the required key-value pairs.
Dictionary state;
state[PrimitiveFunction::AttributeNameRngSeed] = Internal::GenerateRandomSeed();
state[PrimitiveFunction::AttributeNameRngOffset] = 0;
state[PrimitiveFunction::AttributeNameRngOffset] = size_t(0);
primitiveFunction->SetState(state);
}
}
Expand Down
12 changes: 12 additions & 0 deletions Source/Math/CPUMatrixDouble.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
#include "stdafx.h"
#include "CPUMatrixImpl.h"

namespace Microsoft { namespace MSR { namespace CNTK {

// explicit instantiations, due to CPUMatrix being too big and causing VS2015 cl crash.
template class MATH_API CPUMatrix<double>;
}}}
24 changes: 24 additions & 0 deletions Source/Math/CPUMatrixFloat.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
#include "stdafx.h"
#include "CPUMatrixImpl.h"

namespace Microsoft { namespace MSR { namespace CNTK {

int MATH_API TracingGPUMemoryAllocator::m_traceLevel = 0;

void TracingGPUMemoryAllocator::SetTraceLevel(int traceLevel)
{
m_traceLevel = traceLevel;
}

bool TracingGPUMemoryAllocator::IsTraceEnabled()
{
return (m_traceLevel > 0);
}

// explicit instantiations, due to CPUMatrix being too big and causing VS2015 cl crash.
template class MATH_API CPUMatrix<float>;
}}}
25 changes: 4 additions & 21 deletions Source/Math/CPUMatrix.cpp → Source/Math/CPUMatrixImpl.h
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
// CPUMatrix.cpp : full implementation of all matrix functions on the CPU side
// CPUMatrix.h : template implementation of all matrix functions on the CPU side
//

#include "stdafx.h"
#pragma once

#include "Basics.h"
#include "File.h"

Expand Down Expand Up @@ -66,18 +67,6 @@
#define IDX2C(i, j, ld) (((j) * (ld)) + (i)) // 0 based indexing
namespace Microsoft { namespace MSR { namespace CNTK {

int MATH_API TracingGPUMemoryAllocator::m_traceLevel = 0;

void TracingGPUMemoryAllocator::SetTraceLevel(int traceLevel)
{
m_traceLevel = traceLevel;
}

bool TracingGPUMemoryAllocator::IsTraceEnabled()
{
return (m_traceLevel > 0);
}

#pragma region Helpful Enum Definitions
enum class MatrixOrder
{
Expand Down Expand Up @@ -5923,7 +5912,7 @@ CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShift(const
#pragma endregion Static BLAS Functions

// 'double' version of LogAdd
double LogAddD(double x, double y)
inline double LogAddD(double x, double y)
{
return LogAdd(x, y);
}
Expand Down Expand Up @@ -7184,12 +7173,6 @@ void CPUMatrix<ElemType>::TensorArgOp(const CPUMatrix<ElemType>& a, ElementWiseO
}
}

// =======================================================================
// explicit instantiations
// =======================================================================
template class MATH_API CPUMatrix<float>;
template class MATH_API CPUMatrix<double>;

// We use Matrix<char> as the backing store for QuantizedMatrix
// Let's explicitly instantiate the methods we need for that purpose
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols);
Expand Down
6 changes: 4 additions & 2 deletions Source/Math/Math.vcxproj
Original file line number Diff line number Diff line change
Expand Up @@ -194,12 +194,15 @@
<ClInclude Include="QuantizedMatrix.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="targetver.h" />
<ClInclude Include="CPUMatrixImpl.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="BatchNormalizationEngine.cpp" />
<ClCompile Include="BlockHandlerAVX.cpp" />
<ClCompile Include="BlockHandlerSSE.cpp" />
<ClCompile Include="ConvolutionEngine.cpp" />
<ClCompile Include="CPUMatrixDouble.cpp" />
<ClCompile Include="CPUMatrixFloat.cpp" />
<ClCompile Include="CPURNGHandle.cpp" />
<ClCompile Include="CPUSparseMatrix.cpp" />
<ClCompile Include="CUDAPageLockedMemAllocator.cpp" />
Expand All @@ -209,7 +212,6 @@
<PrecompiledHeader>
</PrecompiledHeader>
</ClCompile>
<ClCompile Include="CPUMatrix.cpp" />
<ClCompile Include="MatrixQuantizerCPU.cpp" />
<ClCompile Include="MatrixQuantizerImpl.cpp" />
<ClCompile Include="NoGPU.cpp" />
Expand All @@ -231,4 +233,4 @@
<Error Condition="'$(MathLibrary)' == 'MKL' And !Exists('$(CNTKCustomMKLPath)')" Text="CNTK custom MKL version $(CNTKCustomMKLVersion) not found. See https://github.com/Microsoft/CNTK/wiki/Setup-CNTK-on-Windows#mkl for instructions." />
<Error Condition="!$(HasBoost)" Text="The Boost library is required to build. Please see https://github.com/Microsoft/CNTK/wiki/Setup-CNTK-on-Windows#boost for installation instructions." />
</Target>
</Project>
</Project>
12 changes: 9 additions & 3 deletions Source/Math/Math.vcxproj.filters
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<ClCompile Include="Matrix.cpp" />
<ClCompile Include="CPUMatrix.cpp">
<Filter>CPU</Filter>
</ClCompile>
<ClCompile Include="CPUSparseMatrix.cpp">
<Filter>CPU</Filter>
</ClCompile>
Expand Down Expand Up @@ -49,6 +46,12 @@
<Filter>CPU</Filter>
</ClCompile>
<ClCompile Include="DataTransferer.cpp" />
<ClCompile Include="CPUMatrixDouble.cpp">
<Filter>CPU</Filter>
</ClCompile>
<ClCompile Include="CPUMatrixFloat.cpp">
<Filter>CPU</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="CommonMatrix.h" />
Expand Down Expand Up @@ -131,6 +134,9 @@
<ClInclude Include="QuantizedOperations.h" />
<ClInclude Include="BlockMultiplierMatrixUtil.h" />
<ClInclude Include="DataTransferer.h" />
<ClInclude Include="CPUMatrixImpl.h">
<Filter>CPU</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<None Include="GPUMatrix.h">
Expand Down
3 changes: 2 additions & 1 deletion Tests/EndToEndTests/CNTKv2Python/Sphinx/run-test
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ else
FIND=find
fi

sphinx-apidoc "$MODULE_DIR" -o . -f "$MODULE_DIR/cntk_py.py" $($FIND "$MODULE_DIR" -type d -name tests | $NORM_PATH)
sphinx-apidoc "$MODULE_DIR" -o . -f \
$( ( printf "$MODULE_DIR/%s\n" cntk_py.py conftest.py internal; $FIND "$MODULE_DIR" -type d -name tests ) | $NORM_PATH)

sphinx-build -b html -d _build/doctrees -W -j $(nproc) . _build/html

Expand Down
8 changes: 4 additions & 4 deletions bindings/python/cntk/debugging/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,16 @@
# for full license information.
# ==============================================================================

'''
Helper functions for debugging graphs.
'''

from __future__ import division
from __future__ import print_function

from .debug import *
from .profiler import *

'''
Helper functions for debugging graphs.
'''

def dump_signature(root, tag=None):
'''
Debug helper that prints the signature of a Function.
Expand Down
2 changes: 1 addition & 1 deletion bindings/python/cntk/eval/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def test_minibatch(self, arguments, device=None):
Returns:
`float`: the average evaluation criterion value per sample for the
tested minibatch.
tested minibatch.
'''
if not device:
device = use_default_device()
Expand Down
4 changes: 3 additions & 1 deletion bindings/python/cntk/layers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@
# for full license information.
# ==============================================================================

# CNTK Layers library
'''
CNTK Layers library
'''

from .blocks import *
from .higher_order_layers import *
Expand Down
4 changes: 2 additions & 2 deletions bindings/python/cntk/layers/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
# ==============================================================================

'''
blocks -- basic building blocks that are semantically not layers (not used in a layered fashion)
e.g. the LSTM
Basic building blocks that are semantically not layers (not used in a layered fashion),
e.g. the LSTM block.
'''

from __future__ import division
Expand Down
4 changes: 2 additions & 2 deletions bindings/python/cntk/layers/higher_order_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
# ==============================================================================

'''
higher_order_layers -- higher-order functions, like Sequential() and ResNetBlock().
Note that sequential higher-order functions like Recurrence() are in sequence.py.
Higher-order functions, like :func:`Sequential` and :func:`ResNetBlock`. Note that
sequential higher-order functions like :func:`~cntk.layers.sequence.Recurrence` are in :mod:`cntk.layers.sequence`.
'''

from types import FunctionType
Expand Down
6 changes: 4 additions & 2 deletions bindings/python/cntk/layers/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@
# for full license information.
# ==============================================================================

# layers -- blocks in the network that are used layer-like, i.e. layered on top of each other
# e.g. a fully connected layer with non-linearity
'''
Blocks in the network that are used layer-like, i.e. layered on top of each other
e.g. a fully connected layer with non-linearity.
'''

from __future__ import division
import numpy as np
Expand Down
5 changes: 3 additions & 2 deletions bindings/python/cntk/layers/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
# for full license information.
# ==============================================================================

# CNTK models collection
# Layers and building blocks that are not generic but represent more specific model types go here.
'''
Layers and building blocks that are not generic but represent more specific model types go here.
'''

from .attention import *
2 changes: 1 addition & 1 deletion bindings/python/cntk/layers/models/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# ==============================================================================

'''
attention -- standard attention model
Standard attention model.
'''

from __future__ import division
Expand Down
12 changes: 7 additions & 5 deletions bindings/python/cntk/layers/sequence.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@
# for full license information.
# ==============================================================================

# sequence -- first/higher-order functions over sequences, like Recurrence()
'''
First / higher-order functions over sequences, like :func:`Recurrence`.
'''

from ..variables import Record
from ..ops import combine, splice, sequence
Expand Down Expand Up @@ -528,10 +530,10 @@ def UnfoldFrom(generator_function, until_predicate=None, length_increase=1, name
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function(initial_state, dynamic_axis_like)`:
A function that accepts two arguments (`initial state` and `dynamic_axis_like`), and performs the unfold operation on it.
The `initial state` argument is the initial state for the recurrence.
The `dynamic_axis_like` must be a sequence and provides a reference for the maximum length of the output sequence.
:class:`~cntk.ops.functions.Function`:
A function that accepts two arguments (`initial state` and `dynamic_axis_like`), and performs the unfold operation on it.
The `initial state` argument is the initial state for the recurrence.
The `dynamic_axis_like` must be a sequence and provides a reference for the maximum length of the output sequence.
'''

generator_function = _sanitize_function(generator_function)
Expand Down
2 changes: 1 addition & 1 deletion bindings/python/cntk/layers/typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# ==============================================================================

'''
The CNTK typing module contains basic CNTK type meta-classes for :func:`~cntk.functions.Function.update_signature` and type signatures for the CNTK :class:`~cntk.functions.Function` decorator.
The CNTK typing module contains basic CNTK type meta-classes for :func:`~cntk.functions.Function.update_signature` and type signatures for the CNTK :class:`~cntk.ops.functions.Function` decorator.
The type of a CNTK :class:`~cntk.variables.Variable` is defined by five properties: `shape`, `dynamic_axes`, `is_sparse`, `dtype`, and `needs_gradient`.
Some API functions accept these variables as independent arguments, e.g. :class:`~cntk.input`.
Expand Down
6 changes: 3 additions & 3 deletions bindings/python/cntk/ops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1535,7 +1535,7 @@ def sqrt(x, name=''):
Note:
CNTK returns zero for sqrt of negative nubmers, this will be changed to
retrun NaN
return NaN
'''
from cntk.cntk_py import sqrt
x = sanitize_input(x)
Expand Down Expand Up @@ -2603,7 +2603,7 @@ def input_variable(shape, dtype=np.float32, needs_gradient=False, is_sparse=Fals
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.variables.Variable`
:class:`~cntk.variables.Variable`
'''
import warnings
warnings.warn('This will be removed in future versions. Please use '
Expand Down Expand Up @@ -2684,7 +2684,7 @@ def placeholder_variable(shape=None, dynamic_axes=None, name=''):
name (str, optional): the name of the placeholder variable in the network
Returns:
:class:`~cntk.ops.variables.Variable`
:class:`~cntk.variables.Variable`
'''
import warnings
warnings.warn('This will be removed in future versions. Please use '
Expand Down
14 changes: 7 additions & 7 deletions bindings/python/cntk/ops/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -766,17 +766,17 @@ def grad(self, at, wrt=None, outputs=None, device=None, as_numpy=True, grad_root
costly conversion but returns a somewhat opaque object. Also, the Value objects
are temporary and only guaranteed to be valid until the next forward/eval/backward/grad call.
You must explicitly clone the temporay Value objects if they need to be accessed later.
grad_root (variable, optional): specify the root of gradients calculation.
grad_root (:class:`~cntk.variables.Variable`, optional): specify the root of gradients calculation.
If not specified, the output of this function will be used as gradient root.
Returns:
dict or NumPy Array or a tuple of these: Dict with keys of ``wrt`` variables and gradient values of
``wrt`` variables. A single NumPy array if there is only one gradient value.
If ``outputs`` were specified (to fetch values for), this method returns a tuple where the 2nd element
of the tuple is the ``outputs`` values; a dict with keys of specified ``outputs`` variables and
values of computed ``outputs``, or a single NumPy array if there is only one output value.
Each element has the same shape as the ``wrt`` or ``outputs`` variables including dynamic axes
(such as the batch axis).
``wrt`` variables. A single NumPy array if there is only one gradient value.
If ``outputs`` were specified (to fetch values for), this method returns a tuple where the 2nd element
of the tuple is the ``outputs`` values; a dict with keys of specified ``outputs`` variables and
values of computed ``outputs``, or a single NumPy array if there is only one output value.
Each element has the same shape as the ``wrt`` or ``outputs`` variables including dynamic axes
(such as the batch axis).
'''
if device is None:
device = DeviceDescriptor.use_default_device()
Expand Down
2 changes: 1 addition & 1 deletion bindings/python/cntk/train/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def test_minibatch(self, arguments, device=None):
Returns:
`float`: the average evaluation criterion value per sample for the
tested minibatch.
tested minibatch.
'''
if not device:
device = use_default_device()
Expand Down
4 changes: 3 additions & 1 deletion bindings/python/doc/build.bat
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,13 @@ set PATH=%CD%\..;%CD%\..\..\..\x64\Release;%PATH%

@REM TODO better align conf.py exclude with excluded paths here
sphinx-apidoc.exe ..\cntk -o . -f ^
..\cntk\blocks.py ^
..\cntk\cntk_py.py ^
..\cntk\conftest.py ^
..\cntk\tests ^
..\cntk\debugging\tests ^
..\cntk\eval\tests ^
..\cntk\internal\tests ^
..\cntk\internal ^
..\cntk\io\tests ^
..\cntk\layers\tests ^
..\cntk\learners\tests ^
Expand Down
Loading

0 comments on commit 7a70d12

Please sign in to comment.