diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e287408fb..398f7a463 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -8,6 +8,22 @@ default: image: ai.b-bug.org:5000/k230_sdk:latest interruptible: true +# download dir publish with tag +download_dir_publish: + rules: + - if: $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_TAG =~ /^v\d+\.\d+.*$/ + extends: + - .setup_env + - .sync_download_dir + +# sdk zip and image publish with tag +release_file_publish: + rules: + - if: $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_TAG =~ /^v\d+\.\d+.*$/ + extends: + - .setup_env + - .sync_release_file + # MR merged merge_sync: rules: @@ -93,8 +109,7 @@ build-image: - git config --global --add safe.directory $CI_PROJECT_DIR .sync_github_gitee: - timeout: 90m - image: ai.b-bug.org:5000/k230_sdk:latest + timeout: 30m tags: - k230_sdk script: @@ -121,6 +136,32 @@ build-image: - echo "---push to github---" - git push --atomic --tags -u -f github main || git push --atomic --tags -u -f github main || git push --atomic --tags -u -f github main +.sync_download_dir: + timeout: 60m + tags: + - k230_sdk + script: + - cd $CI_PROJECT_DIR + - ls -alht + - wget -qc https://ai.b-bug.org/k510/downloads/github/sync_file.sh -O ./sync_file.sh + - chmod +x ./sync_file.sh + - time ./sync_file.sh || time ./sync_file.sh + - echo "all file synced" + +.sync_release_file: + timeout: 60m + tags: + - k230_sdk + script: + - cd $CI_PROJECT_DIR + - ls -alht + - wget -qc https://ai.b-bug.org/k510/downloads/github/sync_release.sh -O ./sync_release.sh + - chmod +x ./sync_release.sh + - echo $CI_COMMIT_TAG || exit 1 + - echo $CONF || exit 1 + - time ./sync_release.sh $CI_COMMIT_TAG $CONF || time ./sync_release.sh $CI_COMMIT_TAG $CONF + - echo "all release file synced" + .generate_version: &generate_version - echo "----------get version from file----------" - pwd diff --git a/Makefile b/Makefile index ee41b59f1..78b570801 100755 --- a/Makefile +++ b/Makefile @@ -112,8 +112,8 @@ prepare_sourcecode:prepare_toolchain #ai @echo "download nncase sdk" @rm -rf src/big/utils/; rm -rf src/big/ai; - @wget -q --show-progress $(DOWNLOAD_URL)/downloads/kmodel/kmodel_v2.3.0.tgz -O - | tar -xzC src/big/ - @wget -q --show-progress $(DOWNLOAD_URL)/downloads/nncase/nncase_k230_v2.3.0.tgz -O - | tar -xzC src/big/ + @wget -q --show-progress $(DOWNLOAD_URL)/downloads/kmodel/kmodel_v2.4.0.tgz -O - | tar -xzC src/big/ + @wget -q --show-progress $(DOWNLOAD_URL)/downloads/nncase/nncase_k230_v2.4.0.tgz -O - | tar -xzC src/big/ #big utils @echo "download big utils" diff --git a/configs/k230_evb_doorlock_ov9286_defconfig b/configs/k230_evb_doorlock_ov9286_defconfig index 5fb93e489..1ef5ebfe3 100755 --- a/configs/k230_evb_doorlock_ov9286_defconfig +++ b/configs/k230_evb_doorlock_ov9286_defconfig @@ -47,7 +47,7 @@ CONFIG_MEM_SPECKLE_BASE=0x7c40000 CONFIG_MEM_SPECKLE_SIZE=0x00000 CONFIG_MEM_MMZ_BASE=0x4ee0000 CONFIG_MEM_MMZ_SIZE=0x30e0000 -CONFIG_MEM_RTAPP_BASE=0x4e00000 +CONFIG_MEM_RTAPP_BASE=0x4ee0000 CONFIG_MEM_RTAPP_SIZE=0x1400000 CONFIG_MEM_BOUNDARY_RESERVED_SIZE=0x00001000 diff --git a/configs/k230d_doorlock_ov9286_defconfig b/configs/k230d_doorlock_ov9286_defconfig index ac7e47964..e4655cb5a 100755 --- a/configs/k230d_doorlock_ov9286_defconfig +++ b/configs/k230d_doorlock_ov9286_defconfig @@ -47,7 +47,7 @@ CONFIG_MEM_SPECKLE_BASE=0x7c40000 CONFIG_MEM_SPECKLE_SIZE=0x00000 CONFIG_MEM_MMZ_BASE=0x4ee0000 CONFIG_MEM_MMZ_SIZE=0x30e0000 -CONFIG_MEM_RTAPP_BASE=0x4e00000 +CONFIG_MEM_RTAPP_BASE=0x4ee0000 CONFIG_MEM_RTAPP_SIZE=0x1400000 CONFIG_MEM_BOUNDARY_RESERVED_SIZE=0x00001000 diff --git a/repo.mak b/repo.mak index dd32751c5..211ebd9bf 100755 --- a/repo.mak +++ b/repo.mak @@ -1,11 +1,11 @@ -RT-SMART_VERSION = "0e9316ca84c3df5b556d1aaaf213bc929bdd103d" -MPP_VERSION = "e224f078e9ec8f5185c2be6b190e55505354bf10" +RT-SMART_VERSION = "0a2c55975d1181e66abf4ee10f8cf29f0966071d" +MPP_VERSION = "0955ee6e157771ff8870bf49fd0c598126abe9f2" UNITTEST_VERSION = "a07caf5e19a6a215605271cfeedce18111790e9d" OPENSBI_VERSION = "9aa2f6d34e685bbbb0afaab4e308b93a9cd06ec7" -BUILDROOT-EXT_VERSION = "b271acab71c5cb5efb00047a223ed21edec40660" -LINUX_VERSION = "70d9f999f4081bd783c089b6e26c97792676be65" +BUILDROOT-EXT_VERSION = "8eafd41f1dde4ff48b914d5978cecf7af627fba6" +LINUX_VERSION = "04b6371ff02d49cba18adc81dfcdbbec0cbfed56" UBOOT_VERSION = "317894980fd498b863820b1343096fe188faae53" -CDK_VERSION = "9ddc100e5be1df8af31e21e29ee1980bc0487a4a" +CDK_VERSION = "fea98217f3a781508de19bb2ecfea390526414ba" RT-SMART_SRC_PATH = src/big/rt-smart MPP_SRC_PATH = src/big/mpp diff --git a/src/big/mpp/include/comm/k_audio_comm.h b/src/big/mpp/include/comm/k_audio_comm.h index 152eb0019..c2a48acf9 100644 --- a/src/big/mpp/include/comm/k_audio_comm.h +++ b/src/big/mpp/include/comm/k_audio_comm.h @@ -47,6 +47,12 @@ typedef enum KD_AUDIO_SOUND_MODE_STEREO = 2, /* stereo */ } k_audio_snd_mode; +typedef enum +{ + KD_I2S_IN_MONO_RIGHT_CHANNEL = 0, //mic input + KD_I2S_IN_MONO_LEFT_CHANNEL = 1, //hp input +} k_i2s_in_mono_channel; + typedef enum { KD_AUDIO_INPUT_TYPE_I2S = 0,//i2s in @@ -97,6 +103,7 @@ typedef struct k_u32 sample_rate; /* sample rate */ k_audio_bit_width bit_width; /* bit_width */ k_audio_snd_mode snd_mode; /* momo or stereo */ + k_i2s_in_mono_channel mono_channel;/* use mic input or headphone input */ k_i2s_work_mode i2s_mode; /*i2s work mode*/ k_u32 frame_num; /* frame num in buf[2,K_MAX_AUDIO_FRAME_NUM] */ k_u32 point_num_per_frame; diff --git a/src/big/mpp/include/comm/k_connector_comm.h b/src/big/mpp/include/comm/k_connector_comm.h index 1a9d8a17c..b8958e4e6 100755 --- a/src/big/mpp/include/comm/k_connector_comm.h +++ b/src/big/mpp/include/comm/k_connector_comm.h @@ -51,7 +51,11 @@ extern "C" { typedef enum { HX8377_V2_MIPI_4LAN_1080X1920_30FPS, + LT9611_MIPI_4LAN_1920X1080_60FPS, LT9611_MIPI_4LAN_1920X1080_30FPS, + LT9611_MIPI_4LAN_1280X720_60FPS, + LT9611_MIPI_4LAN_1280X720_50FPS, + LT9611_MIPI_4LAN_640X480_60FPS, } k_connector_type; typedef struct diff --git a/src/big/mpp/include/comm/k_vo_comm.h b/src/big/mpp/include/comm/k_vo_comm.h index e3667f98c..ce487f654 100644 --- a/src/big/mpp/include/comm/k_vo_comm.h +++ b/src/big/mpp/include/comm/k_vo_comm.h @@ -237,6 +237,21 @@ typedef struct } k_vo_display_resolution; +typedef struct +{ + k_u32 hsync_start; + k_u32 hsync_stop; + k_u32 hsync1_start; + k_u32 hsync1_stop; + k_u32 hsync2_start; + k_u32 hsync2_stop; + k_u32 vsync1_start; + k_u32 vsync1_stop; + k_u32 vsync2_start; + k_u32 vsync2_stop; +} k_vo_sync_attr; + + typedef struct { k_u32 n; diff --git a/src/big/mpp/kernel/connector/src/connector_comm.c b/src/big/mpp/kernel/connector/src/connector_comm.c index 27345639d..74bccb4a5 100755 --- a/src/big/mpp/kernel/connector/src/connector_comm.c +++ b/src/big/mpp/kernel/connector/src/connector_comm.c @@ -180,6 +180,12 @@ k_u32 connector_set_vo_param(k_vo_pub_attr *attr) } +k_u32 connector_set_vo_attr(k_vo_pub_attr *pub_attr, k_vo_sync_attr *sync_attr) +{ + return kd_vo_set_dev_attr(pub_attr, sync_attr); +} + + void connector_set_vo_enable(void) { kd_vo_enable(); diff --git a/src/big/mpp/kernel/connector/src/connector_dev.h b/src/big/mpp/kernel/connector/src/connector_dev.h index 122822a9f..55bc3aba8 100755 --- a/src/big/mpp/kernel/connector/src/connector_dev.h +++ b/src/big/mpp/kernel/connector/src/connector_dev.h @@ -79,6 +79,7 @@ extern k_u32 dwc_dsi_enable(k_u32 enable); extern k_u32 dwc_dst_set_test_mode(void); extern k_u32 vo_init(void); extern k_s32 kd_vo_set_dev_param(k_vo_pub_attr *attr); +extern k_s32 kd_vo_set_dev_attr(k_vo_pub_attr *pub_attr, k_vo_sync_attr *sync_attr); extern void kd_vo_enable(void); extern void kd_vo_set_vtth_intr(k_bool status, k_u32 vpos); @@ -93,6 +94,7 @@ k_u32 connector_set_dsi_enable(k_u32 enable); k_u32 connector_set_dsi_test_mode(void); k_u32 connector_set_vo_init(void); k_u32 connector_set_vo_param(k_vo_pub_attr *attr); +k_u32 connector_set_vo_attr(k_vo_pub_attr *pub_attr, k_vo_sync_attr *sync_attr); void connector_set_vo_enable(void); void connector_set_vtth_intr(k_bool status, k_u32 vpos); void connector_delay_us(uint64_t us); diff --git a/src/big/mpp/kernel/connector/src/lt9611.c b/src/big/mpp/kernel/connector/src/lt9611.c index 9e05e8279..570ecdb17 100755 --- a/src/big/mpp/kernel/connector/src/lt9611.c +++ b/src/big/mpp/kernel/connector/src/lt9611.c @@ -547,20 +547,39 @@ static k_s32 k230_dsi_resolution_init(k_connector_info *info) return 0; } -static k_s32 k230_vo_resolution_init(k_vo_display_resolution *resolution, k_u32 bg_color, k_u32 intr_line) +static k_s32 k230_vo_resolution_init(k_connector_type type, k_vo_display_resolution *resolution, k_u32 bg_color, k_u32 intr_line) { k_vo_display_resolution vo_resolution; - k_vo_pub_attr attr; - - memset(&attr, 0, sizeof(k_vo_pub_attr)); - attr.bg_color = bg_color; - attr.intf_sync = K_VO_OUT_1080P30; - attr.intf_type = K_VO_INTF_MIPI; - attr.sync_info = resolution; + k_vo_pub_attr pub_attr; + k_vo_sync_attr sync_attr; + + memset(&pub_attr, 0, sizeof(k_vo_pub_attr)); + pub_attr.bg_color = bg_color; + pub_attr.intf_sync = K_VO_OUT_1080P30; + pub_attr.intf_type = K_VO_INTF_MIPI; + pub_attr.sync_info = resolution; + + memset(&sync_attr, 0, sizeof(k_vo_sync_attr)); + if (type == LT9611_MIPI_4LAN_1920X1080_60FPS || type == LT9611_MIPI_4LAN_1920X1080_30FPS) { + sync_attr.hsync_start = 2; + sync_attr.hsync_stop = 2; + + sync_attr.hsync1_start = 1; + sync_attr.hsync1_stop = 2; + sync_attr.hsync2_start = 1; + sync_attr.hsync2_stop = 2; + + sync_attr.vsync1_start = 1; + sync_attr.vsync1_stop = 1; + sync_attr.vsync2_start = 1; + sync_attr.vsync2_stop = 1; + } else { + rt_kprintf("Unsupported connector type \n"); + } connector_set_vo_init(); connector_set_vtth_intr(1, intr_line); - connector_set_vo_param(&attr); + connector_set_vo_attr(&pub_attr, &sync_attr); connector_set_vo_enable(); return 0; @@ -588,7 +607,7 @@ k_s32 lt9611_init(void *ctx, k_connector_info *info) rt_thread_mdelay(50); ret |= lt9611_enable_hdmi_out(lt9611_dev); - ret |= k230_vo_resolution_init(&info->resolution, info->bg_color, info->intr_line); + ret |= k230_vo_resolution_init(info->type, &info->resolution, info->bg_color, info->intr_line); ret |= k230_set_phy_freq(&info->phy_attr); ret |= k230_dsi_resolution_init(info); diff --git a/src/big/mpp/kernel/lib/libaudio.a b/src/big/mpp/kernel/lib/libaudio.a index 1500eca03..05e192936 100644 Binary files a/src/big/mpp/kernel/lib/libaudio.a and b/src/big/mpp/kernel/lib/libaudio.a differ diff --git a/src/big/mpp/kernel/lib/libcmpi.a b/src/big/mpp/kernel/lib/libcmpi.a index da9ead337..c27efc202 100644 Binary files a/src/big/mpp/kernel/lib/libcmpi.a and b/src/big/mpp/kernel/lib/libcmpi.a differ diff --git a/src/big/mpp/kernel/lib/libdma.a b/src/big/mpp/kernel/lib/libdma.a index bb80de41d..a5b042833 100644 Binary files a/src/big/mpp/kernel/lib/libdma.a and b/src/big/mpp/kernel/lib/libdma.a differ diff --git a/src/big/mpp/kernel/lib/libdpu.a b/src/big/mpp/kernel/lib/libdpu.a index c0e6bb579..cdd9d9193 100644 Binary files a/src/big/mpp/kernel/lib/libdpu.a and b/src/big/mpp/kernel/lib/libdpu.a differ diff --git a/src/big/mpp/kernel/lib/liblog.a b/src/big/mpp/kernel/lib/liblog.a index 2fb42c775..dbc53f8c4 100644 Binary files a/src/big/mpp/kernel/lib/liblog.a and b/src/big/mpp/kernel/lib/liblog.a differ diff --git a/src/big/mpp/kernel/lib/libmediafreq.a b/src/big/mpp/kernel/lib/libmediafreq.a index 406e21a4a..a7a89ede2 100644 Binary files a/src/big/mpp/kernel/lib/libmediafreq.a and b/src/big/mpp/kernel/lib/libmediafreq.a differ diff --git a/src/big/mpp/kernel/lib/libmmz.a b/src/big/mpp/kernel/lib/libmmz.a index f05c58011..7c0d7b1ef 100644 Binary files a/src/big/mpp/kernel/lib/libmmz.a and b/src/big/mpp/kernel/lib/libmmz.a differ diff --git a/src/big/mpp/kernel/lib/libpm.a b/src/big/mpp/kernel/lib/libpm.a index fc8c42b31..ad91b12a9 100644 Binary files a/src/big/mpp/kernel/lib/libpm.a and b/src/big/mpp/kernel/lib/libpm.a differ diff --git a/src/big/mpp/kernel/lib/libsys.a b/src/big/mpp/kernel/lib/libsys.a index 13e0432bc..32525c46a 100644 Binary files a/src/big/mpp/kernel/lib/libsys.a and b/src/big/mpp/kernel/lib/libsys.a differ diff --git a/src/big/mpp/kernel/lib/libvb.a b/src/big/mpp/kernel/lib/libvb.a index 41fc3a944..d460b7e42 100644 Binary files a/src/big/mpp/kernel/lib/libvb.a and b/src/big/mpp/kernel/lib/libvb.a differ diff --git a/src/big/mpp/kernel/lib/libvdss.a b/src/big/mpp/kernel/lib/libvdss.a index 83f3a1ffc..fde0e1214 100644 Binary files a/src/big/mpp/kernel/lib/libvdss.a and b/src/big/mpp/kernel/lib/libvdss.a differ diff --git a/src/big/mpp/kernel/lib/libvicap.a b/src/big/mpp/kernel/lib/libvicap.a index 23c8c7884..2bfbd5b2a 100644 Binary files a/src/big/mpp/kernel/lib/libvicap.a and b/src/big/mpp/kernel/lib/libvicap.a differ diff --git a/src/big/mpp/kernel/lib/libvo.a b/src/big/mpp/kernel/lib/libvo.a index 9b7188269..1d932ae36 100644 Binary files a/src/big/mpp/kernel/lib/libvo.a and b/src/big/mpp/kernel/lib/libvo.a differ diff --git a/src/big/mpp/kernel/lib/libvpu.a b/src/big/mpp/kernel/lib/libvpu.a index 10573d1e7..1aeea5d46 100644 Binary files a/src/big/mpp/kernel/lib/libvpu.a and b/src/big/mpp/kernel/lib/libvpu.a differ diff --git a/src/big/mpp/kernel/lib/libvvi.a b/src/big/mpp/kernel/lib/libvvi.a index 90958a84e..33c5cb4b8 100644 Binary files a/src/big/mpp/kernel/lib/libvvi.a and b/src/big/mpp/kernel/lib/libvvi.a differ diff --git a/src/big/mpp/kernel/lib/libvvo.a b/src/big/mpp/kernel/lib/libvvo.a index d772483fc..73242d745 100644 Binary files a/src/big/mpp/kernel/lib/libvvo.a and b/src/big/mpp/kernel/lib/libvvo.a differ diff --git a/src/big/mpp/userapps/lib/lib3a.a b/src/big/mpp/userapps/lib/lib3a.a index 2ddbdbe70..348109576 100644 Binary files a/src/big/mpp/userapps/lib/lib3a.a and b/src/big/mpp/userapps/lib/lib3a.a differ diff --git a/src/big/mpp/userapps/lib/libadec.a b/src/big/mpp/userapps/lib/libadec.a index 70a5d9c1c..948bae453 100644 Binary files a/src/big/mpp/userapps/lib/libadec.a and b/src/big/mpp/userapps/lib/libadec.a differ diff --git a/src/big/mpp/userapps/lib/libaenc.a b/src/big/mpp/userapps/lib/libaenc.a index 7c0b80d39..227c56427 100644 Binary files a/src/big/mpp/userapps/lib/libaenc.a and b/src/big/mpp/userapps/lib/libaenc.a differ diff --git a/src/big/mpp/userapps/lib/libai.a b/src/big/mpp/userapps/lib/libai.a index 3e74e2451..a6091fc4d 100644 Binary files a/src/big/mpp/userapps/lib/libai.a and b/src/big/mpp/userapps/lib/libai.a differ diff --git a/src/big/mpp/userapps/lib/libao.a b/src/big/mpp/userapps/lib/libao.a index 434d76d47..9295c349d 100644 Binary files a/src/big/mpp/userapps/lib/libao.a and b/src/big/mpp/userapps/lib/libao.a differ diff --git a/src/big/mpp/userapps/lib/libauto_ctrol.a b/src/big/mpp/userapps/lib/libauto_ctrol.a index 79418faf7..34d98c99b 100644 Binary files a/src/big/mpp/userapps/lib/libauto_ctrol.a and b/src/big/mpp/userapps/lib/libauto_ctrol.a differ diff --git a/src/big/mpp/userapps/lib/libbinder.a b/src/big/mpp/userapps/lib/libbinder.a index adf31cc5f..f7ca35667 100644 Binary files a/src/big/mpp/userapps/lib/libbinder.a and b/src/big/mpp/userapps/lib/libbinder.a differ diff --git a/src/big/mpp/userapps/lib/libbuffer_management.a b/src/big/mpp/userapps/lib/libbuffer_management.a index cdc39e635..b6d2ffcd9 100644 Binary files a/src/big/mpp/userapps/lib/libbuffer_management.a and b/src/big/mpp/userapps/lib/libbuffer_management.a differ diff --git a/src/big/mpp/userapps/lib/libcam_caldb.a b/src/big/mpp/userapps/lib/libcam_caldb.a index 85870b250..40ea88cd3 100644 Binary files a/src/big/mpp/userapps/lib/libcam_caldb.a and b/src/big/mpp/userapps/lib/libcam_caldb.a differ diff --git a/src/big/mpp/userapps/lib/libcam_device.a b/src/big/mpp/userapps/lib/libcam_device.a index addacee10..602083731 100644 Binary files a/src/big/mpp/userapps/lib/libcam_device.a and b/src/big/mpp/userapps/lib/libcam_device.a differ diff --git a/src/big/mpp/userapps/lib/libcam_engine.a b/src/big/mpp/userapps/lib/libcam_engine.a index fd10d1042..d83c1dee3 100644 Binary files a/src/big/mpp/userapps/lib/libcam_engine.a and b/src/big/mpp/userapps/lib/libcam_engine.a differ diff --git a/src/big/mpp/userapps/lib/libcameric_drv.a b/src/big/mpp/userapps/lib/libcameric_drv.a index 8cce98138..a7e475f7c 100644 Binary files a/src/big/mpp/userapps/lib/libcameric_drv.a and b/src/big/mpp/userapps/lib/libcameric_drv.a differ diff --git a/src/big/mpp/userapps/lib/libcameric_reg_drv.a b/src/big/mpp/userapps/lib/libcameric_reg_drv.a index 0a6ad8bad..b36e3855b 100644 Binary files a/src/big/mpp/userapps/lib/libcameric_reg_drv.a and b/src/big/mpp/userapps/lib/libcameric_reg_drv.a differ diff --git a/src/big/mpp/userapps/lib/libcmd_buffer.a b/src/big/mpp/userapps/lib/libcmd_buffer.a index 77c00c56f..8be75e888 100644 Binary files a/src/big/mpp/userapps/lib/libcmd_buffer.a and b/src/big/mpp/userapps/lib/libcmd_buffer.a differ diff --git a/src/big/mpp/userapps/lib/libcommon.a b/src/big/mpp/userapps/lib/libcommon.a index 0bede5f1e..98c7100b6 100644 Binary files a/src/big/mpp/userapps/lib/libcommon.a and b/src/big/mpp/userapps/lib/libcommon.a differ diff --git a/src/big/mpp/userapps/lib/libdma.a b/src/big/mpp/userapps/lib/libdma.a index 86d04e907..22c9df3aa 100644 Binary files a/src/big/mpp/userapps/lib/libdma.a and b/src/big/mpp/userapps/lib/libdma.a differ diff --git a/src/big/mpp/userapps/lib/libdpu.a b/src/big/mpp/userapps/lib/libdpu.a index cc55d102a..cf323b96c 100644 Binary files a/src/big/mpp/userapps/lib/libdpu.a and b/src/big/mpp/userapps/lib/libdpu.a differ diff --git a/src/big/mpp/userapps/lib/libebase.a b/src/big/mpp/userapps/lib/libebase.a index 6d07241b0..5781bf26b 100644 Binary files a/src/big/mpp/userapps/lib/libebase.a and b/src/big/mpp/userapps/lib/libebase.a differ diff --git a/src/big/mpp/userapps/lib/libfpga.a b/src/big/mpp/userapps/lib/libfpga.a index edb5d2a95..b9eecab57 100644 Binary files a/src/big/mpp/userapps/lib/libfpga.a and b/src/big/mpp/userapps/lib/libfpga.a differ diff --git a/src/big/mpp/userapps/lib/libhal.a b/src/big/mpp/userapps/lib/libhal.a index f6851e296..fa40415d7 100644 Binary files a/src/big/mpp/userapps/lib/libhal.a and b/src/big/mpp/userapps/lib/libhal.a differ diff --git a/src/big/mpp/userapps/lib/libisi.a b/src/big/mpp/userapps/lib/libisi.a index ce9ba2f91..39db5cbd5 100644 Binary files a/src/big/mpp/userapps/lib/libisi.a and b/src/big/mpp/userapps/lib/libisi.a differ diff --git a/src/big/mpp/userapps/lib/libisp_drv.a b/src/big/mpp/userapps/lib/libisp_drv.a index 2c1f5c378..8f6635a3c 100644 Binary files a/src/big/mpp/userapps/lib/libisp_drv.a and b/src/big/mpp/userapps/lib/libisp_drv.a differ diff --git a/src/big/mpp/userapps/lib/liboslayer.a b/src/big/mpp/userapps/lib/liboslayer.a index d8457d6d2..a75bcc07b 100644 Binary files a/src/big/mpp/userapps/lib/liboslayer.a and b/src/big/mpp/userapps/lib/liboslayer.a differ diff --git a/src/big/mpp/userapps/lib/libpm.a b/src/big/mpp/userapps/lib/libpm.a index 0b54802c1..8ef46ca2e 100644 Binary files a/src/big/mpp/userapps/lib/libpm.a and b/src/big/mpp/userapps/lib/libpm.a differ diff --git a/src/big/mpp/userapps/lib/libsom_ctrl.a b/src/big/mpp/userapps/lib/libsom_ctrl.a index 859cf5e31..7608c37df 100644 Binary files a/src/big/mpp/userapps/lib/libsom_ctrl.a and b/src/big/mpp/userapps/lib/libsom_ctrl.a differ diff --git a/src/big/mpp/userapps/lib/libstart_engine.a b/src/big/mpp/userapps/lib/libstart_engine.a index 91df8951e..ce104a87f 100644 Binary files a/src/big/mpp/userapps/lib/libstart_engine.a and b/src/big/mpp/userapps/lib/libstart_engine.a differ diff --git a/src/big/mpp/userapps/lib/libswitch.a b/src/big/mpp/userapps/lib/libswitch.a index 508d833c5..d36cf6203 100644 Binary files a/src/big/mpp/userapps/lib/libswitch.a and b/src/big/mpp/userapps/lib/libswitch.a differ diff --git a/src/big/mpp/userapps/lib/libsys.a b/src/big/mpp/userapps/lib/libsys.a index 3a36fc422..2cc755186 100644 Binary files a/src/big/mpp/userapps/lib/libsys.a and b/src/big/mpp/userapps/lib/libsys.a differ diff --git a/src/big/mpp/userapps/lib/libt_common_c.a b/src/big/mpp/userapps/lib/libt_common_c.a index dbab8fc5d..d60786542 100644 Binary files a/src/big/mpp/userapps/lib/libt_common_c.a and b/src/big/mpp/userapps/lib/libt_common_c.a differ diff --git a/src/big/mpp/userapps/lib/libt_database_c.a b/src/big/mpp/userapps/lib/libt_database_c.a index 7419890c9..4f6db51c2 100644 Binary files a/src/big/mpp/userapps/lib/libt_database_c.a and b/src/big/mpp/userapps/lib/libt_database_c.a differ diff --git a/src/big/mpp/userapps/lib/libt_json_c.a b/src/big/mpp/userapps/lib/libt_json_c.a index ff14e5783..4cb97c785 100644 Binary files a/src/big/mpp/userapps/lib/libt_json_c.a and b/src/big/mpp/userapps/lib/libt_json_c.a differ diff --git a/src/big/mpp/userapps/lib/libt_mxml_c.a b/src/big/mpp/userapps/lib/libt_mxml_c.a index 057da3640..a86b9e4af 100644 Binary files a/src/big/mpp/userapps/lib/libt_mxml_c.a and b/src/big/mpp/userapps/lib/libt_mxml_c.a differ diff --git a/src/big/mpp/userapps/lib/libvb.a b/src/big/mpp/userapps/lib/libvb.a index 16c493313..3453e5b77 100644 Binary files a/src/big/mpp/userapps/lib/libvb.a and b/src/big/mpp/userapps/lib/libvb.a differ diff --git a/src/big/mpp/userapps/lib/libvdec.a b/src/big/mpp/userapps/lib/libvdec.a index e3a7bfc9f..10c868f13 100644 Binary files a/src/big/mpp/userapps/lib/libvdec.a and b/src/big/mpp/userapps/lib/libvdec.a differ diff --git a/src/big/mpp/userapps/lib/libvdss.a b/src/big/mpp/userapps/lib/libvdss.a index 97a7cbb9f..1f6f688ea 100644 Binary files a/src/big/mpp/userapps/lib/libvdss.a and b/src/big/mpp/userapps/lib/libvdss.a differ diff --git a/src/big/mpp/userapps/lib/libvenc.a b/src/big/mpp/userapps/lib/libvenc.a index 1d1741af3..7b03473bb 100644 Binary files a/src/big/mpp/userapps/lib/libvenc.a and b/src/big/mpp/userapps/lib/libvenc.a differ diff --git a/src/big/mpp/userapps/lib/libvicap.a b/src/big/mpp/userapps/lib/libvicap.a index 8a01b0983..390fb9462 100644 Binary files a/src/big/mpp/userapps/lib/libvicap.a and b/src/big/mpp/userapps/lib/libvicap.a differ diff --git a/src/big/mpp/userapps/lib/libvideo_in.a b/src/big/mpp/userapps/lib/libvideo_in.a index 1628da155..635e30145 100644 Binary files a/src/big/mpp/userapps/lib/libvideo_in.a and b/src/big/mpp/userapps/lib/libvideo_in.a differ diff --git a/src/big/mpp/userapps/lib/libvirtual_hal.a b/src/big/mpp/userapps/lib/libvirtual_hal.a index c46ded325..e0bb678ba 100644 Binary files a/src/big/mpp/userapps/lib/libvirtual_hal.a and b/src/big/mpp/userapps/lib/libvirtual_hal.a differ diff --git a/src/big/mpp/userapps/lib/libvo.a b/src/big/mpp/userapps/lib/libvo.a index b39cfd591..353be6be7 100644 Binary files a/src/big/mpp/userapps/lib/libvo.a and b/src/big/mpp/userapps/lib/libvo.a differ diff --git a/src/big/mpp/userapps/lib/libvvi.a b/src/big/mpp/userapps/lib/libvvi.a index d8d615ef0..d3433cccb 100644 Binary files a/src/big/mpp/userapps/lib/libvvi.a and b/src/big/mpp/userapps/lib/libvvi.a differ diff --git a/src/big/mpp/userapps/sample/fastboot_app/main.cc b/src/big/mpp/userapps/sample/fastboot_app/main.cc index 8bed9b9e8..4404be533 100644 --- a/src/big/mpp/userapps/sample/fastboot_app/main.cc +++ b/src/big/mpp/userapps/sample/fastboot_app/main.cc @@ -658,6 +658,7 @@ int main(int argc, char *argv[]) int num = 0; int ret; PRINT_TIME_NOW(); + if (argc != 2) { std::cerr << "Usage: " << argv[0] << " " << std::endl; @@ -676,7 +677,8 @@ int main(int argc, char *argv[]) size_t size = CHANNEL * ISP_CHN1_HEIGHT * ISP_CHN1_WIDTH;; MobileRetinaface model(argv[1], CHANNEL, ISP_CHN1_HEIGHT, ISP_CHN1_WIDTH); - std::vector boxes; + DetectResult box_result; + std::vector boxes; TEST_BOOT_TIME_INIT(); @@ -711,9 +713,9 @@ int main(int argc, char *argv[]) model.run(reinterpret_cast(vbvaddr), reinterpret_cast(dump_info.v_frame.phys_addr[0])); kd_mpi_sys_munmap(vbvaddr, size); - // model.run(); // get face boxes - boxes = model.get_result(); + box_result = model.get_result(); + boxes = box_result.boxes; #ifdef TEST_BOOT_TIME TEST_BOOT_TIME_TRIGER(); if(boxes.size() > 0) @@ -726,27 +728,29 @@ int main(int argc, char *argv[]) printf("boxes %llu \n",(perf_get_smodecycles())); } #endif - if(boxes.size() / 4 < face_count) + if(boxes.size() < face_count) { - for (size_t i = boxes.size() / 4; i < face_count; i++) + for (size_t i = boxes.size(); i < face_count; i++) { vo_frame.draw_en = 0; vo_frame.frame_num = i + 1; kd_mpi_vo_draw_frame(&vo_frame); } } - for (size_t i = 0, j = 0; i < boxes.size(); i += 4) + + for (size_t i = 0, j = 0; i < boxes.size(); i += 1) { // std::cout << "[" << boxes[i] << ", " << boxes[i + 1] << ", " << boxes[i + 2] <<", " << boxes[i + 3] << "]" << std::endl; vo_frame.draw_en = 1; - vo_frame.line_x_start = ((uint32_t)boxes[0 + i]) * ISP_CHN0_WIDTH / ISP_CHN1_WIDTH; - vo_frame.line_y_start = ((uint32_t)boxes[1 + i]) * ISP_CHN0_HEIGHT / ISP_CHN1_HEIGHT; - vo_frame.line_x_end = ((uint32_t)boxes[2 + i]) * ISP_CHN0_WIDTH / ISP_CHN1_WIDTH; - vo_frame.line_y_end = ((uint32_t)boxes[3 + i]) * ISP_CHN0_HEIGHT / ISP_CHN1_HEIGHT; + vo_frame.line_x_start = ((uint32_t)boxes[i].x1) * ISP_CHN0_WIDTH / ISP_CHN1_WIDTH; + vo_frame.line_y_start = ((uint32_t)boxes[i].y1) * ISP_CHN0_HEIGHT / ISP_CHN1_HEIGHT; + vo_frame.line_x_end = ((uint32_t)boxes[i].x2) * ISP_CHN0_WIDTH / ISP_CHN1_WIDTH; + vo_frame.line_y_end = ((uint32_t)boxes[i].y2) * ISP_CHN0_HEIGHT / ISP_CHN1_HEIGHT; vo_frame.frame_num = ++j; kd_mpi_vo_draw_frame(&vo_frame); } - face_count = boxes.size() / 4; + + face_count = boxes.size(); ret = kd_mpi_vicap_dump_release(vicap_dev, VICAP_CHN_ID_1, &dump_info); if (ret) { printf("sample_vicap...kd_mpi_vicap_dump_release failed.\n"); @@ -755,7 +759,7 @@ int main(int argc, char *argv[]) app_exit: pthread_join(exit_thread_handle, NULL); - for(size_t i = 0;i < boxes.size()/4;i++) + for(size_t i = 0;i < boxes.size();i++) { vo_frame.draw_en = 0; vo_frame.frame_num = i + 1; diff --git a/src/big/mpp/userapps/sample/fastboot_app/mobile_retinaface.cc b/src/big/mpp/userapps/sample/fastboot_app/mobile_retinaface.cc index da753ce7c..439b6dba6 100644 --- a/src/big/mpp/userapps/sample/fastboot_app/mobile_retinaface.cc +++ b/src/big/mpp/userapps/sample/fastboot_app/mobile_retinaface.cc @@ -29,23 +29,38 @@ static float umeyama_args[] = MobileRetinaface::MobileRetinaface(const char *kmodel_file, size_t channel, size_t height, size_t width) : Model("MobileRetinaface", kmodel_file), ai2d_input_c_(channel), ai2d_input_h_(height), ai2d_input_w_(width) { +#if ENABLE_DEBUG + std::cout << __FUNCTION__ << ": c = " << channel << ", h = " << height << ", w = " << width << std::endl; +#endif + // ai2d output tensor ai2d_out_tensor_ = input_tensor(0); // ai2d config dims_t in_shape { 1, ai2d_input_c_, ai2d_input_h_, ai2d_input_w_ }; - auto out_span = ai2d_out_tensor_.shape(); - dims_t out_shape { out_span.begin(), out_span.end() }; + auto out_shape = input_shape(0); ai2d_datatype_t ai2d_dtype { ai2d_format::NCHW_FMT, ai2d_format::NCHW_FMT, typecode_t::dt_uint8, typecode_t::dt_uint8 }; ai2d_crop_param_t crop_param { false, 0, 0, 0, 0 }; ai2d_shift_param_t shift_param { false, 0 }; + float h_ratio = static_cast(height) / out_shape[2]; + float w_ratio = static_cast(width) / out_shape[3]; + float ratio = h_ratio > w_ratio ? h_ratio : w_ratio; + int h_pad = out_shape[2] - height / ratio; + int h_pad_before = h_pad / 2; + int h_pad_after = h_pad - h_pad_before; + int w_pad = out_shape[3] - width / ratio; + int w_pad_before = w_pad / 2; + int w_pad_after = w_pad - w_pad_before; + #if defined(CONFIG_BOARD_K230_CANMV) ai2d_pad_param_t pad_param { true, { { 0, 0 }, { 0, 0 }, { 70, 70 }, { 0, 0 } }, ai2d_pad_mode::constant, { 0, 0, 0 } }; #else - ai2d_pad_param_t pad_param { true, { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 70, 70 } }, ai2d_pad_mode::constant, { 0, 0, 0 } }; + ai2d_pad_param_t pad_param { true, { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 70, 70 } }, ai2d_pad_mode::constant, { 0, 0, 0 } }; #endif - ai2d_resize_param_t resize_param { true, ai2d_interp_method::tf_bilinear, ai2d_interp_mode::half_pixel }; + + // ai2d_pad_param_t pad_param{true, {{ 0, 0 }, { 0, 0 }, { h_pad_before, h_pad_after }, { w_pad_before, w_pad_after }}, ai2d_pad_mode::constant, { 0, 0, 0 }}; + ai2d_resize_param_t resize_param { true, ai2d_interp_method::tf_bilinear, ai2d_interp_mode::half_pixel }; ai2d_affine_param_t affine_param { false }; ai2d_builder_.reset(new ai2d_builder(in_shape, out_shape, ai2d_dtype, crop_param, shift_param, pad_param, resize_param, affine_param)); ai2d_builder_->build_schedule(); @@ -57,7 +72,7 @@ MobileRetinaface::~MobileRetinaface() void MobileRetinaface::preprocess(uintptr_t vaddr, uintptr_t paddr) { -#if PROFILING +#if ENABLE_PROFILING ScopedTiming st(model_name() + " " + __FUNCTION__); #endif @@ -73,49 +88,88 @@ void MobileRetinaface::preprocess(uintptr_t vaddr, uintptr_t paddr) void MobileRetinaface::postprocess() { -#if PROFILING +#if ENABLE_PROFILING ScopedTiming st(model_name() + " " + __FUNCTION__); #endif - box_t pred_box[16]; - landmarks_t pred_landmarks[16]; - size_t box_num = 0; - size_t landmark_num = 0; - decode(pred_box, &box_num, pred_landmarks, &landmark_num); + std::vector pred_box; + pred_box.reserve(16); + std::vector landmarks; + + decode(pred_box, landmarks); int long_side = ai2d_input_h_ > ai2d_input_w_ ? ai2d_input_h_ : ai2d_input_w_; int short_side = ai2d_input_h_ < ai2d_input_w_ ? ai2d_input_h_ : ai2d_input_w_; int pad = (long_side - short_side) / 2; - bool width_pad = long_side == ai2d_input_h_ ? true : false; - face_boxes_.clear(); - face_boxes_.reserve(sizeof(pred_box) / sizeof(pred_box[0]) * 4); - // std::cout << "box_num = " << box_num << ", landmark_num = " << landmark_num << std::endl; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0; - for (size_t i = 0; i < box_num; i++) + // boxes + result_.boxes.clear(); + result_.boxes.reserve(pred_box.size()); + for (size_t i = 0; i < pred_box.size(); i++) { + face_coordinate box; + if(width_pad) { - x1 = (int)(pred_box[i].x * long_side - pred_box[i].w * long_side / 2) - pad; - y1 = (int)(pred_box[i].y * long_side - pred_box[i].h * long_side / 2); - x2 = (int)(pred_box[i].x * long_side + pred_box[i].w * long_side / 2) - pad; - y2 = (int)(pred_box[i].y * long_side + pred_box[i].h * long_side / 2); + box.x1 = (int)(pred_box[i].x * long_side - pred_box[i].w * long_side / 2) - pad; + box.y1 = (int)(pred_box[i].y * long_side - pred_box[i].h * long_side / 2); + box.x2 = (int)(pred_box[i].x * long_side + pred_box[i].w * long_side / 2) - pad; + box.y2 = (int)(pred_box[i].y * long_side + pred_box[i].h * long_side / 2); } else { - x1 = (int)(pred_box[i].x * long_side - pred_box[i].w * long_side / 2); - y1 = (int)(pred_box[i].y * long_side - pred_box[i].h * long_side / 2) - pad; - x2 = (int)(pred_box[i].x * long_side + pred_box[i].w * long_side / 2); - y2 = (int)(pred_box[i].y * long_side + pred_box[i].h * long_side / 2) - pad; + box.x1 = (int)(pred_box[i].x * long_side - pred_box[i].w * long_side / 2); + box.y1 = (int)(pred_box[i].y * long_side - pred_box[i].h * long_side / 2) - pad; + box.x2 = (int)(pred_box[i].x * long_side + pred_box[i].w * long_side / 2); + box.y2 = (int)(pred_box[i].y * long_side + pred_box[i].h * long_side / 2) - pad; } - face_boxes_.push_back(x1); - face_boxes_.push_back(y1); - face_boxes_.push_back(x2); - face_boxes_.push_back(y2); - // std::cout << "face_location: " << x1 << ", " << y1 << ", " << x2 << ", " << y2 << std::endl; + + box.x1 = box.x1 < 0 ? 1 : box.x1; + box.y1 = box.y1 < 0 ? 1 : box.y1; + box.x2 = box.x2 > ai2d_input_w_ ? ai2d_input_w_ : box.x2; + box.y2 = box.y2 > ai2d_input_h_ ? ai2d_input_h_ : box.y2; + + result_.boxes.push_back(box); +#if ENABLE_DEBUG + std::cout << "face_location: (" << box.x1 << ", " << box.y1 << "), (" << box.x2 << ", " << box.y2 << ")" << std::endl; +#endif + } + + // landmarks + result_.landmarks.clear(); + for (size_t i = 0; i < landmarks.size(); i++) + { + auto landmark = landmarks[i]; + int x = 0, y = 0; +#if ENABLE_DEBUG + std::cout << "landmark: "; +#endif + for (uint32_t j = 0; j < 5; j++) + { + if(width_pad) + { + x = (int)(landmark.points[2 * j + 0] * long_side) - pad; + y = (int)(landmark.points[2 * j + 1] * long_side); + } + else + { + x = (int)(landmark.points[2 * j + 0] * long_side); + y = (int)(landmark.points[2 * j + 1] * long_side) - pad; + } + + landmark.points[2 * j + 0] = x; + landmark.points[2 * j + 1] = y; + +#if ENABLE_DEBUG + std::cout << "(" << x << ", " << y << "),"; +#endif + } +#if ENABLE_DEBUG + std::cout << std::endl; +#endif + result_.landmarks.push_back(landmark); } - // std::cout << "box_num = " << face_boxes_.size() << std::endl; } float MobileRetinaface::overlap(float x1, float w1, float x2, float w2) @@ -247,15 +301,15 @@ landmarks_t MobileRetinaface::get_landmark(float* landmarks, int obj_index) return landmark; } -void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pred_landmarks, size_t *landmark_num) +void MobileRetinaface::decode(std::vector &pred_box, std::vector &pred_landmarks) { const size_t size = 9; float *out[size]; for (size_t i = 0; i < size; i++) { auto tensor = output_tensor(i); - auto mapped_buf = std::move(hrt::map(tensor, hrt::map_read).unwrap()); - out[i] = reinterpret_cast(mapped_buf.buffer().data()); + auto buf = tensor.impl()->to_host().unwrap()->buffer().as_host().unwrap().map(map_access_::map_read).unwrap().buffer(); + out[i] = reinterpret_cast(buf.data()); // dump("kpu output " + std::to_string(i), out[i], 32); } @@ -269,8 +323,6 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre float *landms1 = out[7]; float *landms2 = out[8]; - float RETINAFACE_OBJ_THRESH = 0.2; - float RETINAFACE_NMS_THESH = 0.5; float box[LOC_SIZE] = { 0.0 }; float landms[LAND_SIZE] = { 0.0 }; int objs_num = MIN_SIZE * (1 + 4 + 16); @@ -299,7 +351,7 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre obj_cnt = 0; for (uint32_t oo = 0; oo < objs_num; oo++) { - if(s_probs[oo] >= RETINAFACE_OBJ_THRESH) + if(s_probs[oo] >= obj_threshold_) { s_probs_copy[obj_cnt] = s_probs[oo]; s_copy[obj_cnt].index = s[oo].index; @@ -319,26 +371,27 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre { obj_index = s[i].index; //cout << "s_probs[obj_index]: " << s_probs[obj_index] << endl; - //cout << "RETINAFACE_OBJ_THRESH: " << RETINAFACE_OBJ_THRESH << endl; - if (s_probs[obj_index] < RETINAFACE_OBJ_THRESH) + //cout << "obj_threshold_: " << obj_threshold_ << endl; + if (s_probs[obj_index] < obj_threshold_) continue; // s_probs[obj_index]: 0.99418 //cout << "s_probs[obj_index]: " << s_probs[obj_index] << endl; //printf("pred conf: %f\n", s_probs[obj_index]); box_t a = get_box(boxes, obj_index); + pred_box.push_back(a); + landmarks_t l = get_landmark(landmarks, obj_index); - pred_box[(*box_num)++] = a; - pred_landmarks[(*landmark_num)++] = l; + pred_landmarks.push_back(l); // box a: 0.543867, 0.470195, 0.090413, 0.116365 - //cout << "box a: " << a.x << ", " << a.y << ", " << a.w << ", " << a.h << ";" << endl; - //printf("pred box: %f, %f, %f, %f\n", a.x, a.y, a.w, a.h); + // cout << "box a: " << a.x << ", " << a.y << ", " << a.w << ", " << a.h << ";" << endl; + // printf("pred box: %f, %f, %f, %f\n", a.x, a.y, a.w, a.h); for (j = i + 1; j < objs_num; ++j){ obj_index = s[j].index; - if (s_probs[obj_index] < RETINAFACE_OBJ_THRESH) + if (s_probs[obj_index] < obj_threshold_) continue; box_t b = get_box(boxes, obj_index); iou_cal_times += 1; - if (box_iou(a, b) >= RETINAFACE_NMS_THESH) + if (box_iou(a, b) >= nms_threshold_) s_probs[obj_index] = 0; } } @@ -352,7 +405,7 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre } #else -void MobileRetinaface::deal_conf_opt(float* conf, float* s_probs, int* s, int size, int* obj_cnt, int* real_count, float RETINAFACE_OBJ_THRESH, float* tmp) +void MobileRetinaface::deal_conf_opt(float* conf, float* s_probs, int* s, int size, int* obj_cnt, int* real_count, float* tmp) { softmax_2group_vec(size, conf, conf + size, tmp, tmp + size); // softmax_2group_vec(size, conf + 2 * size, conf + 3 * size, tmp + 2 * size, tmp + 3 * size); @@ -362,7 +415,7 @@ void MobileRetinaface::deal_conf_opt(float* conf, float* s_probs, int* s, int si for(int i = 0; i < size; ++i) { register float soft_vlaue = tmp[size + i]; - if(soft_vlaue >= RETINAFACE_OBJ_THRESH) + if(soft_vlaue >= obj_threshold_) { s[index_s] = cnt; s_probs[index_s] = soft_vlaue; @@ -370,7 +423,7 @@ void MobileRetinaface::deal_conf_opt(float* conf, float* s_probs, int* s, int si } cnt += 1; soft_vlaue = tmp[size * 3 + i]; - if(soft_vlaue >= RETINAFACE_OBJ_THRESH) + if(soft_vlaue >= obj_threshold_) { s[index_s] = cnt; s_probs[index_s] = soft_vlaue; @@ -475,7 +528,7 @@ int nms_comparator2(void* pa, void* pb) return 0; } -void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pred_landmarks, size_t *landmark_num) +void MobileRetinaface::decode(std::vector &pred_box, std::vector &pred_landmarks) { const size_t size = 9; float *out[size]; @@ -496,13 +549,6 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre float *landms1 = out[7]; float *landms2 = out[8]; -#if 0 - float RETINAFACE_OBJ_THRESH = 0.6f; - float RETINAFACE_NMS_THESH = 0.5f; -#else - float RETINAFACE_OBJ_THRESH = 0.3f; - float RETINAFACE_NMS_THESH = 0.2f; -#endif int objs_num = MIN_SIZE * (1 + 4 + 16); int* s = (int*)malloc(objs_num * sizeof(int)); float* s_probs = (float*)malloc(objs_num * sizeof(float)); @@ -512,9 +558,9 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre float*tmp = (float*)malloc(16 * MIN_SIZE / 2 * sizeof(float) * CONF_SIZE * 2); - deal_conf_opt(conf0, s_probs, s, 16 * MIN_SIZE / 2, &obj_cnt, &real_count, RETINAFACE_OBJ_THRESH, tmp); - deal_conf_opt(conf1, s_probs, s, 4 * MIN_SIZE / 2, &obj_cnt, &real_count, RETINAFACE_OBJ_THRESH, tmp); - deal_conf_opt(conf2, s_probs, s, 1 * MIN_SIZE / 2, &obj_cnt, &real_count, RETINAFACE_OBJ_THRESH, tmp); + deal_conf_opt(conf0, s_probs, s, 16 * MIN_SIZE / 2, &obj_cnt, &real_count, tmp); + deal_conf_opt(conf1, s_probs, s, 4 * MIN_SIZE / 2, &obj_cnt, &real_count, tmp); + deal_conf_opt(conf2, s_probs, s, 1 * MIN_SIZE / 2, &obj_cnt, &real_count, tmp); float* boxes = (float*)malloc(objs_num * LOC_SIZE * sizeof(float)); obj_cnt = 0; @@ -542,18 +588,20 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre for (int i = 0; i < objs_num; ++i) { int obj_index = s_int[i]; - if (s_probs[obj_index] < RETINAFACE_OBJ_THRESH) + if (s_probs[obj_index] < obj_threshold_) continue; box_t a = get_box_opt(boxes, obj_index, s[obj_index]); + pred_box.push_back(a); + landmarks_t l = get_landmark_opt(landmarks, obj_index, s[obj_index]); - pred_box[(*box_num)++] = a; - pred_landmarks[(*landmark_num)++] = l; - for (int j = i + 1; j < objs_num; ++j){ + pred_landmarks.push_back(l); + for (int j = i + 1; j < objs_num; ++j) + { obj_index = s_int[j]; - if (s_probs[obj_index] < RETINAFACE_OBJ_THRESH) + if (s_probs[obj_index] < obj_threshold_) continue; box_t b = get_box_opt(boxes, obj_index, s[obj_index]); - if (box_iou(a, b) >= RETINAFACE_NMS_THESH) + if (box_iou(a, b) >= nms_threshold_) s_probs[obj_index] = 0; } } diff --git a/src/big/mpp/userapps/sample/fastboot_app/mobile_retinaface.h b/src/big/mpp/userapps/sample/fastboot_app/mobile_retinaface.h index 3317b3b7e..4f4bb4d01 100644 --- a/src/big/mpp/userapps/sample/fastboot_app/mobile_retinaface.h +++ b/src/big/mpp/userapps/sample/fastboot_app/mobile_retinaface.h @@ -3,20 +3,25 @@ #include "model.h" #include "util.h" -#define ENABLE_RVV 1 #if ENABLE_RVV #include "k230_math.h" #include "rvv_math.h" #endif +typedef struct +{ + std::vector boxes; + std::vector landmarks; +} DetectResult; + class MobileRetinaface : public Model { public: MobileRetinaface(const char *kmodel_file, size_t channel, size_t height, size_t width); ~MobileRetinaface(); - std::vector get_result() const + DetectResult get_result() const { - return face_boxes_; + return result_; } protected: @@ -24,7 +29,7 @@ class MobileRetinaface : public Model void postprocess(); private: - void decode(box_t *pred_box, size_t *box_num, landmarks_t *pred_landmarks, size_t *landmark_num); + void decode(std::vector &pred_box, std::vector &pred_landmarks); float overlap(float x1, float w1, float x2, float w2); float box_intersection(box_t a, box_t b); float box_union(box_t a, box_t b); @@ -36,7 +41,7 @@ class MobileRetinaface : public Model box_t get_box(float *boxes, int obj_index); landmarks_t get_landmark(float *landmarks, int obj_index); #else - void deal_conf_opt(float *conf, float *s_probs, int *s, int size, int *obj_cnt, int *real_count, float RETINAFACE_OBJ_THRESH, float *tmp); + void deal_conf_opt(float *conf, float *s_probs, int *s, int size, int *obj_cnt, int *real_count, float *tmp); void deal_loc_opt(float *loc, float *boxes, int size, int *obj_cnt, int *s, int *real_count); void deal_landms_opt(float *landms, float *landmarks, int size, int *obj_cnt, int *s, int *real_count); box_t get_box_opt(float *boxes, int obj_index, int index_anchors); @@ -47,9 +52,9 @@ class MobileRetinaface : public Model size_t ai2d_input_c_; size_t ai2d_input_h_; size_t ai2d_input_w_; - uintptr_t vaddr_; - uintptr_t paddr_; - std::vector face_boxes_; + float obj_threshold_ = 0.6f; + float nms_threshold_ = 0.5f; + DetectResult result_; }; #endif diff --git a/src/big/mpp/userapps/sample/fastboot_app/model.cc b/src/big/mpp/userapps/sample/fastboot_app/model.cc index 9a8dd0d36..28eafe330 100644 --- a/src/big/mpp/userapps/sample/fastboot_app/model.cc +++ b/src/big/mpp/userapps/sample/fastboot_app/model.cc @@ -4,24 +4,11 @@ #include "model.h" #include "util.h" -template -std::vector read_binary_file(const char *file_name) -{ - std::ifstream ifs(file_name, std::ios::binary); - ifs.seekg(0, ifs.end); - size_t len = ifs.tellg(); - std::vector vec(len / sizeof(T), 0); - ifs.seekg(0, ifs.beg); - ifs.read(reinterpret_cast(vec.data()), len); - ifs.close(); - return std::move(vec); -} - Model::Model(const char *model_name, const char *kmodel_file): model_name_(model_name) { // load kmodel - kmodel_ = read_binary_file(kmodel_file); - interp_.load_model({ (const gsl::byte *)kmodel_.data(), kmodel_.size() }).expect("cannot load kmodel."); + std::ifstream ifs(kmodel_file, std::ios::binary); + interp_.load_model(ifs).expect("load_model failed"); // create kpu input tensors for (size_t i = 0; i < interp_.inputs_size(); i++) @@ -31,15 +18,6 @@ Model::Model(const char *model_name, const char *kmodel_file): model_name_(model auto tensor = host_runtime_tensor::create(desc.datatype, shape, hrt::pool_shared).expect("cannot create input tensor"); interp_.input_tensor(i, tensor).expect("cannot set input tensor"); } - - // create kpu output tensors - for (size_t i = 0; i < interp_.outputs_size(); i++) - { - auto desc = interp_.output_desc(i); - auto shape = interp_.output_shape(i); - auto tensor = host_runtime_tensor::create(desc.datatype, shape, hrt::pool_shared).expect("cannot create output tensor"); - interp_.output_tensor(i, tensor).expect("cannot set output tensor"); - } } Model::~Model() @@ -61,7 +39,7 @@ std::string Model::model_name() const void Model::kpu_run() { -#if PROFILING +#if ENABLE_PROFILING ScopedTiming st(model_name() + " " + __FUNCTION__); #endif @@ -81,4 +59,14 @@ void Model::input_tensor(size_t idx, runtime_tensor &tensor) runtime_tensor Model::output_tensor(size_t idx) { return interp_.output_tensor(idx).expect("cannot get output tensor"); -} \ No newline at end of file +} + +dims_t Model::input_shape(size_t idx) +{ + return interp_.input_shape(idx); +} + +dims_t Model::output_shape(size_t idx) +{ + return interp_.output_shape(idx); +} diff --git a/src/big/mpp/userapps/sample/fastboot_app/model.h b/src/big/mpp/userapps/sample/fastboot_app/model.h index 1621a24b3..139b6b6a4 100644 --- a/src/big/mpp/userapps/sample/fastboot_app/model.h +++ b/src/big/mpp/userapps/sample/fastboot_app/model.h @@ -26,7 +26,8 @@ class Model runtime_tensor input_tensor(size_t idx); void input_tensor(size_t idx, runtime_tensor &tensor); runtime_tensor output_tensor(size_t idx); - + dims_t input_shape(size_t idx); + dims_t output_shape(size_t idx); protected: std::unique_ptr ai2d_builder_; runtime_tensor ai2d_in_tensor_; diff --git a/src/big/mpp/userapps/sample/fastboot_app/util.cc b/src/big/mpp/userapps/sample/fastboot_app/util.cc index 2bc4a92a1..23be42816 100644 --- a/src/big/mpp/userapps/sample/fastboot_app/util.cc +++ b/src/big/mpp/userapps/sample/fastboot_app/util.cc @@ -3,12 +3,17 @@ #include #include "util.h" -void read_binary_file(const char *file_name, char *buffer) +void read_binary_file(const char *file_name, char *buffer, size_t size) { std::ifstream ifs(file_name, std::ios::binary); ifs.seekg(0, ifs.end); - size_t len = ifs.tellg(); - ifs.seekg(0, ifs.beg); + size_t len = ifs.tellg(); + if (size != 0) + { + len = size; + } + + ifs.seekg(0, ifs.beg); ifs.read(buffer, len); ifs.close(); } @@ -54,56 +59,3 @@ void softmax(float* x, float* dx, uint32_t len) dx[i] = x[i] / sum_value; } } - - -uint32_t l2normalize(const float* x, float* dx, int len) -{ - int f; - float sum = 0; - for (f = 0; f < len; ++f) - { - sum += x[f] * x[f]; - } - sum = sqrtf(sum); - for (f = 0; f < len; ++f) - { - dx[f] = x[f] / sum; - } - return 0; -} - -float calCosinDistance(float* faceFeature0P, float* faceFeature1P, int featureLen) -{ - float coorFeature = 0; - // calculate the sum square - for (int fIdx = 0; fIdx < featureLen; fIdx++) - { - float featureVal0 = *(faceFeature0P + fIdx); - float featureVal1 = *(faceFeature1P + fIdx); - coorFeature += featureVal0 * featureVal1; - } - // cosin distance - return (0.5 + 0.5 * coorFeature) * 100; -} - -uint32_t calulate_score(const float* features, const float* saved_features, uint32_t saved_len, float* score) -{ - int i; - int v_id = -1; - float v_score; - float v_score_max = 0.0; - float basef[FL], testf[FL]; - l2normalize(features, testf, FL); - for (i = 0; i < saved_len; i++) - { - l2normalize(saved_features + i * FL, basef, FL); - v_score = calCosinDistance(testf, basef, FL); - if (v_score > v_score_max) - { - v_score_max = v_score; - v_id = i; - } - } - *score = v_score_max; - return v_id; -} \ No newline at end of file diff --git a/src/big/mpp/userapps/sample/fastboot_app/util.h b/src/big/mpp/userapps/sample/fastboot_app/util.h index cc2de6fd2..8254a52c8 100644 --- a/src/big/mpp/userapps/sample/fastboot_app/util.h +++ b/src/big/mpp/userapps/sample/fastboot_app/util.h @@ -1,11 +1,13 @@ #ifndef _UTIL_H #define _UTIL_H #include +#include #include -#define PROFILING 0 +#define ENABLE_RVV 1 +#define ENABLE_DEBUG 0 +#define ENABLE_PROFILING 0 -#define FL 192 typedef struct { int index; @@ -25,12 +27,29 @@ typedef struct float points[10]; } landmarks_t; -void read_binary_file(const char *file_name, char *buffer); +typedef struct +{ + int x1; + int y1; + int x2; + int y2; +} face_coordinate; + +void read_binary_file(const char *file_name, char *buffer, size_t size = 0); +template +std::vector read_binary_file(const char *file_name) +{ + std::ifstream ifs(file_name, std::ios::binary); + ifs.seekg(0, ifs.end); + size_t len = ifs.tellg(); + std::vector vec(len / sizeof(T), 0); + ifs.seekg(0, ifs.beg); + ifs.read(reinterpret_cast(vec.data()), len); + ifs.close(); + return std::move(vec); +} void dump(const std::string &info, volatile float *p, size_t size); void softmax(float *x, float *dx, uint32_t len); -uint32_t l2normalize(const float* x, float* dx, int len); -float calCosinDistance(float* faceFeature0P, float* faceFeature1P, int featureLen); -uint32_t calulate_score(const float* features, const float* saved_features, uint32_t saved_len, float* score); class ScopedTiming { diff --git a/src/big/mpp/userapps/sample/sample_audio/audio_sample.c b/src/big/mpp/userapps/sample/sample_audio/audio_sample.c index 4f92da8a3..e09accc74 100644 --- a/src/big/mpp/userapps/sample/sample_audio/audio_sample.c +++ b/src/big/mpp/userapps/sample/sample_audio/audio_sample.c @@ -496,7 +496,7 @@ k_s32 audio_sample_enable_audio_codec(k_bool enable_audio_codec) g_enable_audio_codec = enable_audio_codec; return K_SUCCESS; } -k_s32 audio_sample_get_ai_i2s_data(const char *filename, k_audio_bit_width bit_width, k_u32 sample_rate,k_u32 channel_count, k_i2s_work_mode i2s_work_mode, k_bool enable_audio3a) +k_s32 audio_sample_get_ai_i2s_data(const char *filename, k_audio_bit_width bit_width, k_u32 sample_rate,k_u32 channel_count,k_i2s_in_mono_channel mono_channel, k_i2s_work_mode i2s_work_mode, k_bool enable_audio3a) { k_aio_dev_attr aio_dev_attr; aio_dev_attr.audio_type = KD_AUDIO_INPUT_TYPE_I2S; @@ -504,6 +504,10 @@ k_s32 audio_sample_get_ai_i2s_data(const char *filename, k_audio_bit_width bit_w aio_dev_attr.kd_audio_attr.i2s_attr.bit_width = bit_width; aio_dev_attr.kd_audio_attr.i2s_attr.chn_cnt = 2; aio_dev_attr.kd_audio_attr.i2s_attr.snd_mode = (1 == channel_count) ? KD_AUDIO_SOUND_MODE_MONO : KD_AUDIO_SOUND_MODE_STEREO; + if(1 == channel_count) + { + aio_dev_attr.kd_audio_attr.i2s_attr.mono_channel = mono_channel; + } aio_dev_attr.kd_audio_attr.i2s_attr.i2s_mode = i2s_work_mode; aio_dev_attr.kd_audio_attr.i2s_attr.frame_num = AUDIO_PERSEC_DIV_NUM; aio_dev_attr.kd_audio_attr.i2s_attr.point_num_per_frame = sample_rate / aio_dev_attr.kd_audio_attr.i2s_attr.frame_num; diff --git a/src/big/mpp/userapps/sample/sample_audio/audio_sample.h b/src/big/mpp/userapps/sample/sample_audio/audio_sample.h index 3d3736bbf..a0e077f78 100644 --- a/src/big/mpp/userapps/sample/sample_audio/audio_sample.h +++ b/src/big/mpp/userapps/sample/sample_audio/audio_sample.h @@ -40,7 +40,7 @@ extern "C" { k_s32 audio_sample_vb_init(k_bool enable_cache, k_u32 sample_rate); //初始化vb k_s32 audio_sample_vb_destroy(); //销毁vb k_s32 audio_sample_enable_audio_codec(k_bool enable_audio_codec); -k_s32 audio_sample_get_ai_i2s_data(const char*filename, k_audio_bit_width bit_width, k_u32 sample_rate,k_u32 channel_count,k_i2s_work_mode i2s_work_mode, k_bool enable_audio3a); //获取i2s输入数据 +k_s32 audio_sample_get_ai_i2s_data(const char*filename, k_audio_bit_width bit_width, k_u32 sample_rate,k_u32 channel_count,k_i2s_in_mono_channel mono_channel,k_i2s_work_mode i2s_work_mode, k_bool enable_audio3a); //获取i2s输入数据 k_s32 audio_sample_get_ai_pdm_data(const char*filename, k_audio_bit_width bit_width, k_u32 sample_rate,k_u32 channel_count); //获取pdm输入数据 k_s32 audio_sample_send_ao_data(const char*filename,int nDev, int nChannel, int samplerate,k_audio_bit_width bit_width,k_i2s_work_mode i2s_work_mode); //发送数据到ao k_s32 audio_sample_api_ai_to_ao(int ai_dev_num, int ai_channel, int ao_dev_num, int ao_channel, k_u32 sample_rate, k_audio_bit_width bit_width,k_i2s_work_mode i2s_work_mode, k_bool enable_audio3a); //通过api获取ai数据发送到ao diff --git a/src/big/mpp/userapps/sample/sample_audio/sample_audio.c b/src/big/mpp/userapps/sample/sample_audio/sample_audio.c index e19f5e7c6..d7557d9cc 100644 --- a/src/big/mpp/userapps/sample/sample_audio/sample_audio.c +++ b/src/big/mpp/userapps/sample/sample_audio/sample_audio.c @@ -57,6 +57,7 @@ static void _help() printf("-loglevel: show kernel log level[0,7]\n"); printf("-bitwidth: set audio bit width(16,24,32)\n"); printf("-channels: channel count\n"); + printf("-monochannel:0:mic input 1:headphone input\n"); printf("-filename: load or save file name\n"); printf("-audio3a: enable audio3a(0,1)\n"); //printf("-i2smode:set i2s mode(1:i2s 2:right justified 4:left justified)\n"); @@ -70,13 +71,14 @@ static k_i2s_work_mode g_i2s_work_mode = K_STANDARD_MODE; static k_bool g_enable_audio3a = K_FALSE; static char g_wav_name[256]; static k_u32 g_channel_count = 2; +static k_i2s_in_mono_channel g_mono_channel = KD_I2S_IN_MONO_RIGHT_CHANNEL;//mono channel use mic input static void *sample_thread_fn(void *arg) { switch (g_type) { case 0: printf("sample ai i2s module\n"); - audio_sample_get_ai_i2s_data(g_wav_name,g_bit_width, g_sample_rate,g_channel_count,g_i2s_work_mode, g_enable_audio3a); //采样精度设置为32时,不要启动2组io,因为无法区分数据是哪组 + audio_sample_get_ai_i2s_data(g_wav_name,g_bit_width, g_sample_rate,g_channel_count,g_mono_channel,g_i2s_work_mode, g_enable_audio3a); //采样精度设置为32时,不要启动2组io,因为无法区分数据是哪组 break; case 1: printf("sample ai pdm module\n"); @@ -196,6 +198,10 @@ int main(int argc, char *argv[]) { g_channel_count = atoi(argv[i + 1]); } + else if (strcmp(argv[i], "-monochannel") == 0) + { + g_mono_channel = (k_i2s_in_mono_channel)atoi(argv[i + 1]); + } else if (strcmp(argv[i], "-loglevel") == 0) { nloglevel = atoi(argv[i + 1]); @@ -232,7 +238,7 @@ int main(int argc, char *argv[]) } - printf("audio type:%d,sample rate:%d,bit width:%d,channels:%d,enablecodec:%d\n", g_type, g_sample_rate,nbitwidth,g_channel_count,g_enable_audio_codec); + printf("audio type:%d,sample rate:%d,bit width:%d,channels:%d,enablecodec:%d,monochannel:%d\n", g_type, g_sample_rate,nbitwidth,g_channel_count,g_enable_audio_codec,g_mono_channel); _set_mod_log(K_ID_AO,nloglevel);//K_DBG_EMERG _set_mod_log(K_ID_AI,nloglevel);//K_DBG_EMERG diff --git a/src/big/mpp/userapps/sample/sample_face_ae/main.cc b/src/big/mpp/userapps/sample/sample_face_ae/main.cc index 1035af347..79e47f479 100644 --- a/src/big/mpp/userapps/sample/sample_face_ae/main.cc +++ b/src/big/mpp/userapps/sample/sample_face_ae/main.cc @@ -278,10 +278,8 @@ int vo_creat_layer_test(k_vo_layer chn_id, layer_info *info) return 0; } -static k_s32 vo_layer_vdss_bind_vo_config(void) +k_s32 sample_connector_init(void) { - k_vo_pub_attr attr; - layer_info info; k_u32 ret = 0; k_s32 connector_fd; #if defined(CONFIG_BOARD_K230_CANMV) @@ -289,11 +287,8 @@ static k_s32 vo_layer_vdss_bind_vo_config(void) #else k_connector_type connector_type = HX8377_V2_MIPI_4LAN_1080X1920_30FPS; #endif - k_connector_info connector_info; - k_vo_layer chn_id = K_VO_LAYER1; + k_connector_info connector_info; - memset(&attr, 0, sizeof(attr)); - memset(&info, 0, sizeof(info)); memset(&connector_info, 0, sizeof(k_connector_info)); //connector get sensor info @@ -314,8 +309,18 @@ static k_s32 vo_layer_vdss_bind_vo_config(void) // connector init kd_mpi_connector_init(connector_fd, connector_info); - // printf("%s>w %d, h %d\n", __func__, w, h); - // config lyaer + return 0; +} + +static k_s32 vo_layer_vdss_bind_vo_config(void) +{ + layer_info info; + k_vo_layer chn_id = K_VO_LAYER1; + + memset(&info, 0, sizeof(info)); + + sample_connector_init(); + info.act_size.width = ISP_CHN0_WIDTH;//1080;//640;//1080; info.act_size.height = ISP_CHN0_HEIGHT;//1920;//480;//1920; info.format = PIXEL_FORMAT_YVU_PLANAR_420; @@ -333,6 +338,7 @@ static void sample_vo_fn(void *arg) usleep(10000); vo_layer_vdss_bind_vo_config(); sample_sys_bind_init(); + return; } static void *sample_vo_thread(void *arg) @@ -658,19 +664,20 @@ k_u32 calc_sum(k_u32 data[], k_u8 size) return sum; } -void face_location_convert_roi(std::vector boxes, k_isp_ae_roi *ae_roi, k_u32 h_offset, k_u32 v_offset, k_u32 scale_h, k_u32 scale_v, k_u32 width, k_u32 height) +void face_location_convert_roi(std::vector boxes, k_isp_ae_roi *ae_roi, k_u32 h_offset, k_u32 v_offset, k_u32 scale_h, k_u32 scale_v, k_u32 width, k_u32 height) { #define CHECK_BOUNDARY(s, o, e) s = (o + s) > e ? (e - o) : (s) // static k_u32 boxes_back[ISP_AE_ROI_WINDOWS_MAX * 4]; - static k_u32 area[ISP_AE_ROI_WINDOWS_MAX] = {0}; + static k_u32 area[8] = {0}; if(ae_roi == nullptr) { std::cout << "face location is Nullptr" << std::endl; return; } - auto box_size = std::min(boxes.size(), ISP_AE_ROI_WINDOWS_MAX * 4); + // auto box_size = std::min(boxes.size(), ISP_AE_ROI_WINDOWS_MAX); + auto box_size = std::min(boxes.size(), 8); if(boxes.empty()) { @@ -679,31 +686,24 @@ void face_location_convert_roi(std::vector boxes, k_isp_ae_roi *ae_roi, k_u return; } - if(box_size % 4 != 0) - { - std::cout << __func__ << ", face box location num error!" << std::endl; - ae_roi->roiNum = 0; - return; - } - - ae_roi->roiNum = box_size / 4; - for(auto i = 0; i < box_size; i += 4) + ae_roi->roiNum = box_size; + for(auto i = 0; i < box_size; i += 1) { - k_u32 index = i / 4; - boxes[i + 0] = boxes[i + 0] < 0 ? 0: boxes[i + 0]; - boxes[i + 1] = boxes[i + 1] < 0 ? 0: boxes[i + 1]; + // k_u32 index = i / 4; + boxes[i].x1 = boxes[i].x1 < 0 ? 0: boxes[i].x1; + boxes[i].y1 = boxes[i].y1 < 0 ? 0: boxes[i].y1; // ae_roi->roiWindow[index].weight = 100.0; - ae_roi->roiWindow[index].window.hOffset = (k_u32)(boxes[i + 0] * width / scale_h + h_offset); - ae_roi->roiWindow[index].window.vOffset = (k_u32)(boxes[i + 1] * height / scale_v + v_offset); + ae_roi->roiWindow[i].window.hOffset = (k_u32)(boxes[i].x1 * width / scale_h + h_offset); + ae_roi->roiWindow[i].window.vOffset = (k_u32)(boxes[i].y1 * height / scale_v + v_offset); - ae_roi->roiWindow[index].window.width = ((k_u32)(boxes[i + 2] - (k_u32)boxes[i + 0]) * width / scale_h); - ae_roi->roiWindow[index].window.height = ((k_u32)(boxes[i + 3] - (k_u32)boxes[i + 1]) * height / scale_v); + ae_roi->roiWindow[i].window.width = ((k_u32)(boxes[i].x2 - (k_u32)boxes[i].x1) * width / scale_h); + ae_roi->roiWindow[i].window.height = ((k_u32)(boxes[i].y2 - (k_u32)boxes[i].y1) * height / scale_v); - CHECK_BOUNDARY(ae_roi->roiWindow[index].window.width, ae_roi->roiWindow[index].window.hOffset, sensor_info.width); - CHECK_BOUNDARY(ae_roi->roiWindow[index].window.height, ae_roi->roiWindow[index].window.vOffset, sensor_info.height); + CHECK_BOUNDARY(ae_roi->roiWindow[i].window.width, ae_roi->roiWindow[i].window.hOffset, sensor_info.width); + CHECK_BOUNDARY(ae_roi->roiWindow[i].window.height, ae_roi->roiWindow[i].window.vOffset, sensor_info.height); - area[index] = ae_roi->roiWindow[index].window.width * ae_roi->roiWindow[index].window.height; + area[i] = ae_roi->roiWindow[i].window.width * ae_roi->roiWindow[i].window.height; } k_u32 sum = calc_sum(area, ae_roi->roiNum); @@ -743,7 +743,8 @@ int main(int argc, char *argv[]) MobileRetinaface model(argv[1], CHANNEL, ISP_CHN1_HEIGHT, ISP_CHN1_WIDTH); - std::vector boxes; + DetectResult box_result; + std::vector boxes; TEST_BOOT_TIME_INIT(); @@ -788,7 +789,8 @@ int main(int argc, char *argv[]) kd_mpi_sys_munmap(vbvaddr, size); // model.run(); // get face boxes - boxes = model.get_result(); + box_result = model.get_result(); + boxes = box_result.boxes; #ifdef TEST_BOOT_TIME TEST_BOOT_TIME_TRIGER(); if(boxes.size() > 0) @@ -802,9 +804,9 @@ int main(int argc, char *argv[]) } #endif - if(boxes.size() / 4 < face_count) + if(boxes.size() < face_count) { - for (size_t i = boxes.size() / 4; i < face_count; i++) + for (size_t i = boxes.size(); i < face_count; i++) { vo_frame.draw_en = 0; vo_frame.frame_num = i + 1; @@ -812,18 +814,18 @@ int main(int argc, char *argv[]) } } - for (size_t i = 0, j = 0; i < boxes.size(); i += 4) + for (size_t i = 0, j = 0; i < boxes.size(); i += 1) { // std::cout << "[" << boxes[i] << ", " << boxes[i + 1] << ", " << boxes[i + 2] <<", " << boxes[i + 3] << "]" << std::endl; vo_frame.draw_en = 1; - vo_frame.line_x_start = ((uint32_t)boxes[0 + i]) * ISP_CHN0_WIDTH / ISP_CHN1_WIDTH; - vo_frame.line_y_start = ((uint32_t)boxes[1 + i]) * ISP_CHN0_HEIGHT / ISP_CHN1_HEIGHT; - vo_frame.line_x_end = ((uint32_t)boxes[2 + i]) * ISP_CHN0_WIDTH / ISP_CHN1_WIDTH; - vo_frame.line_y_end = ((uint32_t)boxes[3 + i]) * ISP_CHN0_HEIGHT / ISP_CHN1_HEIGHT; + vo_frame.line_x_start = ((uint32_t)boxes[i].x1) * ISP_CHN0_WIDTH / ISP_CHN1_WIDTH; + vo_frame.line_y_start = ((uint32_t)boxes[i].y1) * ISP_CHN0_HEIGHT / ISP_CHN1_HEIGHT; + vo_frame.line_x_end = ((uint32_t)boxes[i].x2) * ISP_CHN0_WIDTH / ISP_CHN1_WIDTH; + vo_frame.line_y_end = ((uint32_t)boxes[i].y2) * ISP_CHN0_HEIGHT / ISP_CHN1_HEIGHT; vo_frame.frame_num = ++j; kd_mpi_vo_draw_frame(&vo_frame); } - face_count = boxes.size() / 4; + face_count = boxes.size(); k_isp_ae_roi user_roi; #if defined(CONFIG_BOARD_K230_CANMV) @@ -840,7 +842,7 @@ int main(int argc, char *argv[]) app_exit: pthread_join(exit_thread_handle, NULL); - for(size_t i = 0;i < boxes.size()/4;i++) + for(size_t i = 0;i < boxes.size();i++) { vo_frame.draw_en = 0; vo_frame.frame_num = i + 1; diff --git a/src/big/mpp/userapps/sample/sample_face_ae/mobile_retinaface.cc b/src/big/mpp/userapps/sample/sample_face_ae/mobile_retinaface.cc index f8084babd..439b6dba6 100644 --- a/src/big/mpp/userapps/sample/sample_face_ae/mobile_retinaface.cc +++ b/src/big/mpp/userapps/sample/sample_face_ae/mobile_retinaface.cc @@ -29,22 +29,37 @@ static float umeyama_args[] = MobileRetinaface::MobileRetinaface(const char *kmodel_file, size_t channel, size_t height, size_t width) : Model("MobileRetinaface", kmodel_file), ai2d_input_c_(channel), ai2d_input_h_(height), ai2d_input_w_(width) { +#if ENABLE_DEBUG + std::cout << __FUNCTION__ << ": c = " << channel << ", h = " << height << ", w = " << width << std::endl; +#endif + // ai2d output tensor ai2d_out_tensor_ = input_tensor(0); // ai2d config dims_t in_shape { 1, ai2d_input_c_, ai2d_input_h_, ai2d_input_w_ }; - auto out_span = ai2d_out_tensor_.shape(); - dims_t out_shape { out_span.begin(), out_span.end() }; + auto out_shape = input_shape(0); ai2d_datatype_t ai2d_dtype { ai2d_format::NCHW_FMT, ai2d_format::NCHW_FMT, typecode_t::dt_uint8, typecode_t::dt_uint8 }; ai2d_crop_param_t crop_param { false, 0, 0, 0, 0 }; ai2d_shift_param_t shift_param { false, 0 }; + float h_ratio = static_cast(height) / out_shape[2]; + float w_ratio = static_cast(width) / out_shape[3]; + float ratio = h_ratio > w_ratio ? h_ratio : w_ratio; + int h_pad = out_shape[2] - height / ratio; + int h_pad_before = h_pad / 2; + int h_pad_after = h_pad - h_pad_before; + int w_pad = out_shape[3] - width / ratio; + int w_pad_before = w_pad / 2; + int w_pad_after = w_pad - w_pad_before; + #if defined(CONFIG_BOARD_K230_CANMV) ai2d_pad_param_t pad_param { true, { { 0, 0 }, { 0, 0 }, { 70, 70 }, { 0, 0 } }, ai2d_pad_mode::constant, { 0, 0, 0 } }; #else - ai2d_pad_param_t pad_param { true, { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 70, 70 } }, ai2d_pad_mode::constant, { 0, 0, 0 } }; + ai2d_pad_param_t pad_param { true, { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 70, 70 } }, ai2d_pad_mode::constant, { 0, 0, 0 } }; #endif + + // ai2d_pad_param_t pad_param{true, {{ 0, 0 }, { 0, 0 }, { h_pad_before, h_pad_after }, { w_pad_before, w_pad_after }}, ai2d_pad_mode::constant, { 0, 0, 0 }}; ai2d_resize_param_t resize_param { true, ai2d_interp_method::tf_bilinear, ai2d_interp_mode::half_pixel }; ai2d_affine_param_t affine_param { false }; ai2d_builder_.reset(new ai2d_builder(in_shape, out_shape, ai2d_dtype, crop_param, shift_param, pad_param, resize_param, affine_param)); @@ -57,7 +72,7 @@ MobileRetinaface::~MobileRetinaface() void MobileRetinaface::preprocess(uintptr_t vaddr, uintptr_t paddr) { -#if PROFILING +#if ENABLE_PROFILING ScopedTiming st(model_name() + " " + __FUNCTION__); #endif @@ -73,49 +88,88 @@ void MobileRetinaface::preprocess(uintptr_t vaddr, uintptr_t paddr) void MobileRetinaface::postprocess() { -#if PROFILING +#if ENABLE_PROFILING ScopedTiming st(model_name() + " " + __FUNCTION__); #endif - box_t pred_box[16]; - landmarks_t pred_landmarks[16]; - size_t box_num = 0; - size_t landmark_num = 0; - decode(pred_box, &box_num, pred_landmarks, &landmark_num); + std::vector pred_box; + pred_box.reserve(16); + std::vector landmarks; + + decode(pred_box, landmarks); int long_side = ai2d_input_h_ > ai2d_input_w_ ? ai2d_input_h_ : ai2d_input_w_; int short_side = ai2d_input_h_ < ai2d_input_w_ ? ai2d_input_h_ : ai2d_input_w_; int pad = (long_side - short_side) / 2; - bool width_pad = long_side == ai2d_input_h_ ? true : false; - face_boxes_.clear(); - face_boxes_.reserve(sizeof(pred_box) / sizeof(pred_box[0]) * 4); - // std::cout << "box_num = " << box_num << ", landmark_num = " << landmark_num << std::endl; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0; - for (size_t i = 0; i < box_num; i++) + // boxes + result_.boxes.clear(); + result_.boxes.reserve(pred_box.size()); + for (size_t i = 0; i < pred_box.size(); i++) { + face_coordinate box; + if(width_pad) { - x1 = (int)(pred_box[i].x * long_side - pred_box[i].w * long_side / 2) - pad; - y1 = (int)(pred_box[i].y * long_side - pred_box[i].h * long_side / 2); - x2 = (int)(pred_box[i].x * long_side + pred_box[i].w * long_side / 2) - pad; - y2 = (int)(pred_box[i].y * long_side + pred_box[i].h * long_side / 2); + box.x1 = (int)(pred_box[i].x * long_side - pred_box[i].w * long_side / 2) - pad; + box.y1 = (int)(pred_box[i].y * long_side - pred_box[i].h * long_side / 2); + box.x2 = (int)(pred_box[i].x * long_side + pred_box[i].w * long_side / 2) - pad; + box.y2 = (int)(pred_box[i].y * long_side + pred_box[i].h * long_side / 2); } else { - x1 = (int)(pred_box[i].x * long_side - pred_box[i].w * long_side / 2); - y1 = (int)(pred_box[i].y * long_side - pred_box[i].h * long_side / 2) - pad; - x2 = (int)(pred_box[i].x * long_side + pred_box[i].w * long_side / 2); - y2 = (int)(pred_box[i].y * long_side + pred_box[i].h * long_side / 2) - pad; + box.x1 = (int)(pred_box[i].x * long_side - pred_box[i].w * long_side / 2); + box.y1 = (int)(pred_box[i].y * long_side - pred_box[i].h * long_side / 2) - pad; + box.x2 = (int)(pred_box[i].x * long_side + pred_box[i].w * long_side / 2); + box.y2 = (int)(pred_box[i].y * long_side + pred_box[i].h * long_side / 2) - pad; } - face_boxes_.push_back(x1); - face_boxes_.push_back(y1); - face_boxes_.push_back(x2); - face_boxes_.push_back(y2); - // std::cout << "face_location: " << x1 << ", " << y1 << ", " << x2 << ", " << y2 << std::endl; + + box.x1 = box.x1 < 0 ? 1 : box.x1; + box.y1 = box.y1 < 0 ? 1 : box.y1; + box.x2 = box.x2 > ai2d_input_w_ ? ai2d_input_w_ : box.x2; + box.y2 = box.y2 > ai2d_input_h_ ? ai2d_input_h_ : box.y2; + + result_.boxes.push_back(box); +#if ENABLE_DEBUG + std::cout << "face_location: (" << box.x1 << ", " << box.y1 << "), (" << box.x2 << ", " << box.y2 << ")" << std::endl; +#endif + } + + // landmarks + result_.landmarks.clear(); + for (size_t i = 0; i < landmarks.size(); i++) + { + auto landmark = landmarks[i]; + int x = 0, y = 0; +#if ENABLE_DEBUG + std::cout << "landmark: "; +#endif + for (uint32_t j = 0; j < 5; j++) + { + if(width_pad) + { + x = (int)(landmark.points[2 * j + 0] * long_side) - pad; + y = (int)(landmark.points[2 * j + 1] * long_side); + } + else + { + x = (int)(landmark.points[2 * j + 0] * long_side); + y = (int)(landmark.points[2 * j + 1] * long_side) - pad; + } + + landmark.points[2 * j + 0] = x; + landmark.points[2 * j + 1] = y; + +#if ENABLE_DEBUG + std::cout << "(" << x << ", " << y << "),"; +#endif + } +#if ENABLE_DEBUG + std::cout << std::endl; +#endif + result_.landmarks.push_back(landmark); } - // std::cout << "box_num = " << face_boxes_.size() << std::endl; } float MobileRetinaface::overlap(float x1, float w1, float x2, float w2) @@ -247,15 +301,15 @@ landmarks_t MobileRetinaface::get_landmark(float* landmarks, int obj_index) return landmark; } -void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pred_landmarks, size_t *landmark_num) +void MobileRetinaface::decode(std::vector &pred_box, std::vector &pred_landmarks) { const size_t size = 9; float *out[size]; for (size_t i = 0; i < size; i++) { auto tensor = output_tensor(i); - auto mapped_buf = std::move(hrt::map(tensor, hrt::map_read).unwrap()); - out[i] = reinterpret_cast(mapped_buf.buffer().data()); + auto buf = tensor.impl()->to_host().unwrap()->buffer().as_host().unwrap().map(map_access_::map_read).unwrap().buffer(); + out[i] = reinterpret_cast(buf.data()); // dump("kpu output " + std::to_string(i), out[i], 32); } @@ -269,8 +323,6 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre float *landms1 = out[7]; float *landms2 = out[8]; - float RETINAFACE_OBJ_THRESH = 0.2; - float RETINAFACE_NMS_THESH = 0.5; float box[LOC_SIZE] = { 0.0 }; float landms[LAND_SIZE] = { 0.0 }; int objs_num = MIN_SIZE * (1 + 4 + 16); @@ -299,7 +351,7 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre obj_cnt = 0; for (uint32_t oo = 0; oo < objs_num; oo++) { - if(s_probs[oo] >= RETINAFACE_OBJ_THRESH) + if(s_probs[oo] >= obj_threshold_) { s_probs_copy[obj_cnt] = s_probs[oo]; s_copy[obj_cnt].index = s[oo].index; @@ -319,26 +371,27 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre { obj_index = s[i].index; //cout << "s_probs[obj_index]: " << s_probs[obj_index] << endl; - //cout << "RETINAFACE_OBJ_THRESH: " << RETINAFACE_OBJ_THRESH << endl; - if (s_probs[obj_index] < RETINAFACE_OBJ_THRESH) + //cout << "obj_threshold_: " << obj_threshold_ << endl; + if (s_probs[obj_index] < obj_threshold_) continue; // s_probs[obj_index]: 0.99418 //cout << "s_probs[obj_index]: " << s_probs[obj_index] << endl; //printf("pred conf: %f\n", s_probs[obj_index]); box_t a = get_box(boxes, obj_index); + pred_box.push_back(a); + landmarks_t l = get_landmark(landmarks, obj_index); - pred_box[(*box_num)++] = a; - pred_landmarks[(*landmark_num)++] = l; + pred_landmarks.push_back(l); // box a: 0.543867, 0.470195, 0.090413, 0.116365 - //cout << "box a: " << a.x << ", " << a.y << ", " << a.w << ", " << a.h << ";" << endl; - //printf("pred box: %f, %f, %f, %f\n", a.x, a.y, a.w, a.h); + // cout << "box a: " << a.x << ", " << a.y << ", " << a.w << ", " << a.h << ";" << endl; + // printf("pred box: %f, %f, %f, %f\n", a.x, a.y, a.w, a.h); for (j = i + 1; j < objs_num; ++j){ obj_index = s[j].index; - if (s_probs[obj_index] < RETINAFACE_OBJ_THRESH) + if (s_probs[obj_index] < obj_threshold_) continue; box_t b = get_box(boxes, obj_index); iou_cal_times += 1; - if (box_iou(a, b) >= RETINAFACE_NMS_THESH) + if (box_iou(a, b) >= nms_threshold_) s_probs[obj_index] = 0; } } @@ -352,7 +405,7 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre } #else -void MobileRetinaface::deal_conf_opt(float* conf, float* s_probs, int* s, int size, int* obj_cnt, int* real_count, float RETINAFACE_OBJ_THRESH, float* tmp) +void MobileRetinaface::deal_conf_opt(float* conf, float* s_probs, int* s, int size, int* obj_cnt, int* real_count, float* tmp) { softmax_2group_vec(size, conf, conf + size, tmp, tmp + size); // softmax_2group_vec(size, conf + 2 * size, conf + 3 * size, tmp + 2 * size, tmp + 3 * size); @@ -362,7 +415,7 @@ void MobileRetinaface::deal_conf_opt(float* conf, float* s_probs, int* s, int si for(int i = 0; i < size; ++i) { register float soft_vlaue = tmp[size + i]; - if(soft_vlaue >= RETINAFACE_OBJ_THRESH) + if(soft_vlaue >= obj_threshold_) { s[index_s] = cnt; s_probs[index_s] = soft_vlaue; @@ -370,7 +423,7 @@ void MobileRetinaface::deal_conf_opt(float* conf, float* s_probs, int* s, int si } cnt += 1; soft_vlaue = tmp[size * 3 + i]; - if(soft_vlaue >= RETINAFACE_OBJ_THRESH) + if(soft_vlaue >= obj_threshold_) { s[index_s] = cnt; s_probs[index_s] = soft_vlaue; @@ -388,7 +441,7 @@ void MobileRetinaface::deal_loc_opt(float* loc, float* boxes, int size, int* obj { register int cnt = *obj_cnt; register int index_s = *real_count; - for (uint32_t ww = 0; ww < (uint32_t)size; ww++) + for (uint32_t ww = 0; ww < size; ww++) { for (uint32_t hh = 0; hh < 2; hh++) { @@ -412,7 +465,7 @@ void MobileRetinaface::deal_landms_opt(float* landms, float* landmarks, int size { register int cnt = *obj_cnt; register int index_s = *real_count; - for (uint32_t ww = 0; ww < (uint32_t)size; ww++) + for (uint32_t ww = 0; ww < size; ww++) { for (uint32_t hh = 0; hh < 2; hh++) { @@ -475,7 +528,7 @@ int nms_comparator2(void* pa, void* pb) return 0; } -void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pred_landmarks, size_t *landmark_num) +void MobileRetinaface::decode(std::vector &pred_box, std::vector &pred_landmarks) { const size_t size = 9; float *out[size]; @@ -496,13 +549,6 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre float *landms1 = out[7]; float *landms2 = out[8]; -#if 0 - float RETINAFACE_OBJ_THRESH = 0.6f; - float RETINAFACE_NMS_THESH = 0.5f; -#else - float RETINAFACE_OBJ_THRESH = 0.3f; - float RETINAFACE_NMS_THESH = 0.2f; -#endif int objs_num = MIN_SIZE * (1 + 4 + 16); int* s = (int*)malloc(objs_num * sizeof(int)); float* s_probs = (float*)malloc(objs_num * sizeof(float)); @@ -512,9 +558,9 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre float*tmp = (float*)malloc(16 * MIN_SIZE / 2 * sizeof(float) * CONF_SIZE * 2); - deal_conf_opt(conf0, s_probs, s, 16 * MIN_SIZE / 2, &obj_cnt, &real_count, RETINAFACE_OBJ_THRESH, tmp); - deal_conf_opt(conf1, s_probs, s, 4 * MIN_SIZE / 2, &obj_cnt, &real_count, RETINAFACE_OBJ_THRESH, tmp); - deal_conf_opt(conf2, s_probs, s, 1 * MIN_SIZE / 2, &obj_cnt, &real_count, RETINAFACE_OBJ_THRESH, tmp); + deal_conf_opt(conf0, s_probs, s, 16 * MIN_SIZE / 2, &obj_cnt, &real_count, tmp); + deal_conf_opt(conf1, s_probs, s, 4 * MIN_SIZE / 2, &obj_cnt, &real_count, tmp); + deal_conf_opt(conf2, s_probs, s, 1 * MIN_SIZE / 2, &obj_cnt, &real_count, tmp); float* boxes = (float*)malloc(objs_num * LOC_SIZE * sizeof(float)); obj_cnt = 0; @@ -542,18 +588,20 @@ void MobileRetinaface::decode(box_t *pred_box, size_t *box_num, landmarks_t *pre for (int i = 0; i < objs_num; ++i) { int obj_index = s_int[i]; - if (s_probs[obj_index] < RETINAFACE_OBJ_THRESH) + if (s_probs[obj_index] < obj_threshold_) continue; box_t a = get_box_opt(boxes, obj_index, s[obj_index]); + pred_box.push_back(a); + landmarks_t l = get_landmark_opt(landmarks, obj_index, s[obj_index]); - pred_box[(*box_num)++] = a; - pred_landmarks[(*landmark_num)++] = l; - for (int j = i + 1; j < objs_num; ++j){ + pred_landmarks.push_back(l); + for (int j = i + 1; j < objs_num; ++j) + { obj_index = s_int[j]; - if (s_probs[obj_index] < RETINAFACE_OBJ_THRESH) + if (s_probs[obj_index] < obj_threshold_) continue; box_t b = get_box_opt(boxes, obj_index, s[obj_index]); - if (box_iou(a, b) >= RETINAFACE_NMS_THESH) + if (box_iou(a, b) >= nms_threshold_) s_probs[obj_index] = 0; } } diff --git a/src/big/mpp/userapps/sample/sample_face_ae/mobile_retinaface.h b/src/big/mpp/userapps/sample/sample_face_ae/mobile_retinaface.h index 3317b3b7e..4f4bb4d01 100644 --- a/src/big/mpp/userapps/sample/sample_face_ae/mobile_retinaface.h +++ b/src/big/mpp/userapps/sample/sample_face_ae/mobile_retinaface.h @@ -3,20 +3,25 @@ #include "model.h" #include "util.h" -#define ENABLE_RVV 1 #if ENABLE_RVV #include "k230_math.h" #include "rvv_math.h" #endif +typedef struct +{ + std::vector boxes; + std::vector landmarks; +} DetectResult; + class MobileRetinaface : public Model { public: MobileRetinaface(const char *kmodel_file, size_t channel, size_t height, size_t width); ~MobileRetinaface(); - std::vector get_result() const + DetectResult get_result() const { - return face_boxes_; + return result_; } protected: @@ -24,7 +29,7 @@ class MobileRetinaface : public Model void postprocess(); private: - void decode(box_t *pred_box, size_t *box_num, landmarks_t *pred_landmarks, size_t *landmark_num); + void decode(std::vector &pred_box, std::vector &pred_landmarks); float overlap(float x1, float w1, float x2, float w2); float box_intersection(box_t a, box_t b); float box_union(box_t a, box_t b); @@ -36,7 +41,7 @@ class MobileRetinaface : public Model box_t get_box(float *boxes, int obj_index); landmarks_t get_landmark(float *landmarks, int obj_index); #else - void deal_conf_opt(float *conf, float *s_probs, int *s, int size, int *obj_cnt, int *real_count, float RETINAFACE_OBJ_THRESH, float *tmp); + void deal_conf_opt(float *conf, float *s_probs, int *s, int size, int *obj_cnt, int *real_count, float *tmp); void deal_loc_opt(float *loc, float *boxes, int size, int *obj_cnt, int *s, int *real_count); void deal_landms_opt(float *landms, float *landmarks, int size, int *obj_cnt, int *s, int *real_count); box_t get_box_opt(float *boxes, int obj_index, int index_anchors); @@ -47,9 +52,9 @@ class MobileRetinaface : public Model size_t ai2d_input_c_; size_t ai2d_input_h_; size_t ai2d_input_w_; - uintptr_t vaddr_; - uintptr_t paddr_; - std::vector face_boxes_; + float obj_threshold_ = 0.6f; + float nms_threshold_ = 0.5f; + DetectResult result_; }; #endif diff --git a/src/big/mpp/userapps/sample/sample_face_ae/model.cc b/src/big/mpp/userapps/sample/sample_face_ae/model.cc index 9a8dd0d36..28eafe330 100644 --- a/src/big/mpp/userapps/sample/sample_face_ae/model.cc +++ b/src/big/mpp/userapps/sample/sample_face_ae/model.cc @@ -4,24 +4,11 @@ #include "model.h" #include "util.h" -template -std::vector read_binary_file(const char *file_name) -{ - std::ifstream ifs(file_name, std::ios::binary); - ifs.seekg(0, ifs.end); - size_t len = ifs.tellg(); - std::vector vec(len / sizeof(T), 0); - ifs.seekg(0, ifs.beg); - ifs.read(reinterpret_cast(vec.data()), len); - ifs.close(); - return std::move(vec); -} - Model::Model(const char *model_name, const char *kmodel_file): model_name_(model_name) { // load kmodel - kmodel_ = read_binary_file(kmodel_file); - interp_.load_model({ (const gsl::byte *)kmodel_.data(), kmodel_.size() }).expect("cannot load kmodel."); + std::ifstream ifs(kmodel_file, std::ios::binary); + interp_.load_model(ifs).expect("load_model failed"); // create kpu input tensors for (size_t i = 0; i < interp_.inputs_size(); i++) @@ -31,15 +18,6 @@ Model::Model(const char *model_name, const char *kmodel_file): model_name_(model auto tensor = host_runtime_tensor::create(desc.datatype, shape, hrt::pool_shared).expect("cannot create input tensor"); interp_.input_tensor(i, tensor).expect("cannot set input tensor"); } - - // create kpu output tensors - for (size_t i = 0; i < interp_.outputs_size(); i++) - { - auto desc = interp_.output_desc(i); - auto shape = interp_.output_shape(i); - auto tensor = host_runtime_tensor::create(desc.datatype, shape, hrt::pool_shared).expect("cannot create output tensor"); - interp_.output_tensor(i, tensor).expect("cannot set output tensor"); - } } Model::~Model() @@ -61,7 +39,7 @@ std::string Model::model_name() const void Model::kpu_run() { -#if PROFILING +#if ENABLE_PROFILING ScopedTiming st(model_name() + " " + __FUNCTION__); #endif @@ -81,4 +59,14 @@ void Model::input_tensor(size_t idx, runtime_tensor &tensor) runtime_tensor Model::output_tensor(size_t idx) { return interp_.output_tensor(idx).expect("cannot get output tensor"); -} \ No newline at end of file +} + +dims_t Model::input_shape(size_t idx) +{ + return interp_.input_shape(idx); +} + +dims_t Model::output_shape(size_t idx) +{ + return interp_.output_shape(idx); +} diff --git a/src/big/mpp/userapps/sample/sample_face_ae/model.h b/src/big/mpp/userapps/sample/sample_face_ae/model.h index 1621a24b3..139b6b6a4 100644 --- a/src/big/mpp/userapps/sample/sample_face_ae/model.h +++ b/src/big/mpp/userapps/sample/sample_face_ae/model.h @@ -26,7 +26,8 @@ class Model runtime_tensor input_tensor(size_t idx); void input_tensor(size_t idx, runtime_tensor &tensor); runtime_tensor output_tensor(size_t idx); - + dims_t input_shape(size_t idx); + dims_t output_shape(size_t idx); protected: std::unique_ptr ai2d_builder_; runtime_tensor ai2d_in_tensor_; diff --git a/src/big/mpp/userapps/sample/sample_face_ae/util.cc b/src/big/mpp/userapps/sample/sample_face_ae/util.cc index 510e87bc7..23be42816 100644 --- a/src/big/mpp/userapps/sample/sample_face_ae/util.cc +++ b/src/big/mpp/userapps/sample/sample_face_ae/util.cc @@ -3,12 +3,17 @@ #include #include "util.h" -void read_binary_file(const char *file_name, char *buffer) +void read_binary_file(const char *file_name, char *buffer, size_t size) { std::ifstream ifs(file_name, std::ios::binary); ifs.seekg(0, ifs.end); - size_t len = ifs.tellg(); - ifs.seekg(0, ifs.beg); + size_t len = ifs.tellg(); + if (size != 0) + { + len = size; + } + + ifs.seekg(0, ifs.beg); ifs.read(buffer, len); ifs.close(); } @@ -54,56 +59,3 @@ void softmax(float* x, float* dx, uint32_t len) dx[i] = x[i] / sum_value; } } - - -uint32_t l2normalize(const float* x, float* dx, int len) -{ - int f; - float sum = 0; - for (f = 0; f < len; ++f) - { - sum += x[f] * x[f]; - } - sum = sqrtf(sum); - for (f = 0; f < len; ++f) - { - dx[f] = x[f] / sum; - } - return 0; -} - -float calCosinDistance(float* faceFeature0P, float* faceFeature1P, int featureLen) -{ - float coorFeature = 0; - // calculate the sum square - for (int fIdx = 0; fIdx < featureLen; fIdx++) - { - float featureVal0 = *(faceFeature0P + fIdx); - float featureVal1 = *(faceFeature1P + fIdx); - coorFeature += featureVal0 * featureVal1; - } - // cosin distance - return (0.5 + 0.5 * coorFeature) * 100; -} - -uint32_t calulate_score(const float* features, const float* saved_features, uint32_t saved_len, float* score) -{ - unsigned int i; - int v_id = -1; - float v_score; - float v_score_max = 0.0; - float basef[FL], testf[FL]; - l2normalize(features, testf, FL); - for (i = 0; i < saved_len; i++) - { - l2normalize(saved_features + i * FL, basef, FL); - v_score = calCosinDistance(testf, basef, FL); - if (v_score > v_score_max) - { - v_score_max = v_score; - v_id = i; - } - } - *score = v_score_max; - return v_id; -} \ No newline at end of file diff --git a/src/big/mpp/userapps/sample/sample_face_ae/util.h b/src/big/mpp/userapps/sample/sample_face_ae/util.h index cc2de6fd2..8254a52c8 100644 --- a/src/big/mpp/userapps/sample/sample_face_ae/util.h +++ b/src/big/mpp/userapps/sample/sample_face_ae/util.h @@ -1,11 +1,13 @@ #ifndef _UTIL_H #define _UTIL_H #include +#include #include -#define PROFILING 0 +#define ENABLE_RVV 1 +#define ENABLE_DEBUG 0 +#define ENABLE_PROFILING 0 -#define FL 192 typedef struct { int index; @@ -25,12 +27,29 @@ typedef struct float points[10]; } landmarks_t; -void read_binary_file(const char *file_name, char *buffer); +typedef struct +{ + int x1; + int y1; + int x2; + int y2; +} face_coordinate; + +void read_binary_file(const char *file_name, char *buffer, size_t size = 0); +template +std::vector read_binary_file(const char *file_name) +{ + std::ifstream ifs(file_name, std::ios::binary); + ifs.seekg(0, ifs.end); + size_t len = ifs.tellg(); + std::vector vec(len / sizeof(T), 0); + ifs.seekg(0, ifs.beg); + ifs.read(reinterpret_cast(vec.data()), len); + ifs.close(); + return std::move(vec); +} void dump(const std::string &info, volatile float *p, size_t size); void softmax(float *x, float *dx, uint32_t len); -uint32_t l2normalize(const float* x, float* dx, int len); -float calCosinDistance(float* faceFeature0P, float* faceFeature1P, int featureLen); -uint32_t calulate_score(const float* features, const float* saved_features, uint32_t saved_len, float* score); class ScopedTiming { diff --git a/src/big/mpp/userapps/sample/sample_vicap/sample_vicap.c b/src/big/mpp/userapps/sample/sample_vicap/sample_vicap.c index 2539eacfc..1a7aa0647 100755 --- a/src/big/mpp/userapps/sample/sample_vicap/sample_vicap.c +++ b/src/big/mpp/userapps/sample/sample_vicap/sample_vicap.c @@ -367,7 +367,7 @@ static void usage(void) printf("usage: ./sample_vicap -mode 0 -dev 0 -sensor 0 -chn 0 -chn 1 -ow 640 -oh 480 -preview 1 -rotation 1\n"); printf("Options:\n"); printf(" -mode: vicap work mode[0: online mode, 1: offline mode. only offline mode support multiple sensor input]\tdefault 0\n"); - printf(" -conn: vo connector device [0: hx8399, 1: lt9611.]\tdefault 0\n"); + printf(" -conn: vo connector device [0: hx8399, 1: lt9611-1920x1080p60, 2: lt9611-1920x1080p30]\tdefault 0\n"); printf(" -itype: vicap input type[0,1]\t0: sensor input, 1: user raw image input, default 0\n"); printf(" -ifile: the input raw image file, only used for user raw image input\n"); @@ -485,6 +485,8 @@ int main(int argc, char *argv[]) if (conn == 0) { connector_type = HX8377_V2_MIPI_4LAN_1080X1920_30FPS; } else if (conn == 1) { + connector_type = LT9611_MIPI_4LAN_1920X1080_60FPS; + } else if (conn == 2) { connector_type = LT9611_MIPI_4LAN_1920X1080_30FPS; } else { printf("unsupport connector deivce.\n"); diff --git a/src/big/mpp/userapps/sample/sample_vo/sample_vo.c b/src/big/mpp/userapps/sample/sample_vo/sample_vo.c index d7b0080ad..ad0fb5f73 100755 --- a/src/big/mpp/userapps/sample/sample_vo/sample_vo.c +++ b/src/big/mpp/userapps/sample/sample_vo/sample_vo.c @@ -149,7 +149,7 @@ int main(int argc, char *argv[]) break; case DISPALY_VO_LT9611_TEST : // sample_connector_init(); - sample_connector_osd_install_frame(LT9611_MIPI_4LAN_1920X1080_30FPS); + sample_connector_osd_install_frame(LT9611_MIPI_4LAN_1920X1080_60FPS); break; default : break; diff --git a/src/big/mpp/userapps/src/connector/mpi_connector.c b/src/big/mpp/userapps/src/connector/mpi_connector.c index 63acd5a93..70e86e50d 100755 --- a/src/big/mpp/userapps/src/connector/mpi_connector.c +++ b/src/big/mpp/userapps/src/connector/mpi_connector.c @@ -38,9 +38,23 @@ const k_connector_info connector_info_list[] = { K_DSI_4LAN, K_BURST_MODE, K_VO_LP_MODE, - {15, 295, 0x17, 0x96}, - //{74250, 445500, 2200, 1920, 44, 148, 88, 1125, 1080, 5, 36, 4}, - {74250, 445500, 2200, 1920, 44, 148, 88, 1125, 1080, 5, 4, 36}, + {8, 165, 0x09, 0x96}, + {148500, 890666, 2200, 1920, 44, 148, 88, 1125, 1080, 5, 4, 36}, + LT9611_MIPI_4LAN_1920X1080_60FPS, + }, + { + "lt9611", + 0, + 0, + BACKGROUND_BLACK_COLOR, + 10, + K_DSI_4LAN, + K_BURST_MODE, + K_VO_LP_MODE, + {8, 165, 0x09, 0x96}, // use LT9611_MIPI_4LAN_1920X1080_60FPS's configuration + {148500, 890666, 2200, 1920, 44, 148, 88, 1125, 1080, 5, 4, 36}, // use LT9611_MIPI_4LAN_1920X1080_60FPS's configuration + //{15, 295, 0x17, 0x96}, + //{74250, 445500, 2200, 1920, 44, 148, 88, 1125, 1080, 5, 4, 36}, LT9611_MIPI_4LAN_1920X1080_30FPS, }, }; diff --git a/src/big/rt-smart/kernel/bsp/maix3/testcase/kernel/spi_nandflash_w25n01gv.c b/src/big/rt-smart/kernel/bsp/maix3/testcase/kernel/spi_nandflash_w25n01gv.c index 29d56c689..4f0eec03c 100755 --- a/src/big/rt-smart/kernel/bsp/maix3/testcase/kernel/spi_nandflash_w25n01gv.c +++ b/src/big/rt-smart/kernel/bsp/maix3/testcase/kernel/spi_nandflash_w25n01gv.c @@ -354,6 +354,7 @@ static void nandflash_less_page_read(uint32_t col_addr, uint32_t ra_addr, uint8_ nandflash_wait_idle(); cfg.mode = DWENUM_SPI_EEPROM; + spi_dev_w25n->config = cfg; spi_dev_w25n->bus->owner = RT_NULL; cmd[0] = READ_FROM_CACHE_FAST; @@ -450,6 +451,8 @@ static void nandflash_init(void) int ret; uint32_t addr = 0; + rt_memcpy(current_flash_info, &default_nandflash_info, sizeof(flash_info_t)); + data_prepare(); nandflash_reset(); diff --git a/src/big/rt-smart/kernel/rt-thread/components/lwp/lwp_user_mm.c b/src/big/rt-smart/kernel/rt-thread/components/lwp/lwp_user_mm.c index 30fa806ba..a9932bf40 100644 --- a/src/big/rt-smart/kernel/rt-thread/components/lwp/lwp_user_mm.c +++ b/src/big/rt-smart/kernel/rt-thread/components/lwp/lwp_user_mm.c @@ -367,7 +367,7 @@ void* lwp_mmap2(void *addr, size_t length, int prot, if (fd == -1) { rt_mm_lock(); - if (length == 0x1b0) /* TODO: workround for tls bug, by aozima, need fix asap. */ + if (length == 0x1b0||length == 0xb70) /* TODO: workround for tls bug, by aozima, need fix asap. */ { ret = lwp_map_user(lwp_self(), addr, length + ARCH_PAGE_SIZE, 0); ret += ARCH_PAGE_SIZE; diff --git a/src/common/cdk/user/component/datafifo/host/lib/libdatafifo.a b/src/common/cdk/user/component/datafifo/host/lib/libdatafifo.a index f81c569ba..89b60299b 100644 Binary files a/src/common/cdk/user/component/datafifo/host/lib/libdatafifo.a and b/src/common/cdk/user/component/datafifo/host/lib/libdatafifo.a differ diff --git a/src/common/cdk/user/component/datafifo/slave/lib/libdatafifo.a b/src/common/cdk/user/component/datafifo/slave/lib/libdatafifo.a index c5db938bd..21ec993e5 100644 Binary files a/src/common/cdk/user/component/datafifo/slave/lib/libdatafifo.a and b/src/common/cdk/user/component/datafifo/slave/lib/libdatafifo.a differ diff --git a/src/common/cdk/user/component/ipcmsg/host/lib/libipcmsg.a b/src/common/cdk/user/component/ipcmsg/host/lib/libipcmsg.a index 207ff7e49..0785df172 100644 Binary files a/src/common/cdk/user/component/ipcmsg/host/lib/libipcmsg.a and b/src/common/cdk/user/component/ipcmsg/host/lib/libipcmsg.a differ diff --git a/src/common/cdk/user/component/ipcmsg/slave/lib/libipcmsg.a b/src/common/cdk/user/component/ipcmsg/slave/lib/libipcmsg.a index d538c0816..6d98a413d 100644 Binary files a/src/common/cdk/user/component/ipcmsg/slave/lib/libipcmsg.a and b/src/common/cdk/user/component/ipcmsg/slave/lib/libipcmsg.a differ diff --git a/src/common/cdk/user/mapi/sample/sample_venc/sample_venc.c b/src/common/cdk/user/mapi/sample/sample_venc/sample_venc.c index 26a8875fa..afa14ad38 100755 --- a/src/common/cdk/user/mapi/sample/sample_venc/sample_venc.c +++ b/src/common/cdk/user/mapi/sample/sample_venc/sample_venc.c @@ -104,35 +104,6 @@ struct option long_options[] = { {NULL, 0, NULL, 0}, }; -static k_vicap_sensor_type get_sensor_type(k_u32 sensor_index) -{ - switch (sensor_index) - { - case 0: - return OV_OV9732_MIPI_1280X720_30FPS_10BIT_LINEAR; - case 1: - return OV_OV9286_MIPI_1280X720_30FPS_10BIT_LINEAR_IR; - case 2: - return OV_OV9286_MIPI_1280X720_30FPS_10BIT_LINEAR_SPECKLE; - case 3: - return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR; - case 4: - return IMX335_MIPI_2LANE_RAW12_2592X1944_30FPS_LINEAR; - case 5: - return IMX335_MIPI_4LANE_RAW12_2592X1944_30FPS_LINEAR; - case 6: - return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_MCLK_7425_LINEAR; - case 7: - return IMX335_MIPI_2LANE_RAW12_2592X1944_30FPS_MCLK_7425_LINEAR; - case 8: - return IMX335_MIPI_4LANE_RAW12_2592X1944_30FPS_MCLK_7425_LINEAR; - default: - printf("unsupport sensor type %d, use default IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR\n", sensor_index); - return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR; - } - return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR; -} - static k_payload_type get_venc_type(k_u32 vtype_index) { switch (vtype_index) { case 0: @@ -154,7 +125,7 @@ k_u32 parse_option(int argc, char *argv[], SampleCtx *psample_ctx) { while ((c = getopt_long(argc, argv, "s:n:t:o:h", long_options, &option_index)) != -1) { switch (c) { case 's': { - psample_ctx->sensor_type = get_sensor_type(atoi(optarg)); + psample_ctx->sensor_type = (k_vicap_sensor_type)atoi(optarg); printf("sensor type: %d.\n", psample_ctx->sensor_type); break; } @@ -184,15 +155,7 @@ k_u32 parse_option(int argc, char *argv[], SampleCtx *psample_ctx) { case 'h': { printf("Usage: %s -s 0 -n 2 -t 0\n", argv[0]); printf(" -s or --sensor_type [sensor_index],\n"); - printf(" 0: ov9732\n"); - printf(" 1: ov9286 ir\n"); - printf(" 2: ov9286 speckle\n"); - printf(" 3: imx335 2LANE 1920Wx1080H\n"); - printf(" 4: imx335 2LANE 2592Wx1944H\n"); - printf(" 5: imx335 4LANE 2592Wx1944H\n"); - printf(" 6: imx335 2LANE MCLK 7425 1920Wx1080H\n"); - printf(" 7: imx335 2LANE MCLK 7425 2592Wx1944H\n"); - printf(" 8: imx335 4LANE MCLK 7425 2592Wx1944H\n"); + printf(" see vicap doc\n"); printf(" -n or --chn_num [number], 1, 2, 3.\n"); printf(" -t or --type [type_index],\n"); printf(" 0: h264 type\n"); @@ -254,7 +217,7 @@ int main(int argc, char *argv[]) { k_vicap_dev_set_info dev_attr_info; memset(&dev_attr_info, 0, sizeof(dev_attr_info)); - if (sample_context.sensor_type <= OV_OV9286_MIPI_1280X720_30FPS_10BIT_LINEAR_SPECKLE) + if (sample_context.sensor_type <= OV_OV9286_MIPI_1280X720_60FPS_10BIT_LINEAR_IR_SPECKLE || sample_context.sensor_type >= SC_SC035HGS_MIPI_1LANE_RAW10_640X480_120FPS_LINEAR) dev_attr_info.dw_en = K_FALSE; else dev_attr_info.dw_en = K_TRUE; diff --git a/src/common/cdk/user/middleware/kplayer/include/kplayer.h b/src/common/cdk/user/middleware/kplayer/include/kplayer.h index 02b7b5b95..388c155aa 100755 --- a/src/common/cdk/user/middleware/kplayer/include/kplayer.h +++ b/src/common/cdk/user/middleware/kplayer/include/kplayer.h @@ -49,6 +49,11 @@ typedef struct kPLAYER_PROGRESS_INFO_E k_u64 cur_time; }K_PLAYER_PROGRESS_INFO; +typedef enum { + K_PLAYER_HX8377_V2_MIPI_4LAN_1080X1920_30FPS, + K_PLAYER_LT9611_MIPI_4LAN_1920X1080_30FPS, +} K_PLAYER_CONNECTOR_TYPE; + typedef k_s32 (*K_PLAYER_EVENT_FN)(K_PLAYER_EVENT_E enEvent, void* pData); /** @@ -65,6 +70,13 @@ k_s32 kd_player_init(k_bool init_vo); */ k_s32 kd_player_deinit(k_bool deinit_vo); +/** +* @brief set connector type +* @param[in] connector_type : set vo connector type +* @retval N/A +*/ +void kd_player_set_connector_type(K_PLAYER_CONNECTOR_TYPE connector_type); + /** * @brief register call back fun * @param[in] pfnCallback : K_PLAYER_EVENT_E: call back fun diff --git a/src/common/cdk/user/middleware/kplayer/src/display_cfg.c b/src/common/cdk/user/middleware/kplayer/src/display_cfg.c index b5e501962..16497774a 100644 --- a/src/common/cdk/user/middleware/kplayer/src/display_cfg.c +++ b/src/common/cdk/user/middleware/kplayer/src/display_cfg.c @@ -48,6 +48,8 @@ #define PRIVATE_POLL_SZE (1920 * 1080 * 3 / 2) #define PRIVATE_POLL_NUM (4) +static k_connector_type g_connector_type = HX8377_V2_MIPI_4LAN_1080X1920_30FPS; + typedef struct { k_u64 osd_phy_addr; @@ -172,7 +174,8 @@ static k_s32 sample_connector_init(void) k_s32 ret = 0; char dev_name[64] = {0}; k_s32 connector_fd; - k_connector_type connector_type = HX8377_V2_MIPI_4LAN_1080X1920_30FPS; + k_connector_type connector_type = g_connector_type; + printf("connector_type:%d \n",g_connector_type); k_connector_info connector_info; memset(&connector_info, 0, sizeof(k_connector_info)); @@ -202,6 +205,10 @@ static k_s32 sample_connector_init(void) return 0; } +void display_set_connector_type(k_connector_type type) +{ + g_connector_type = type; +} void display_layer_init(k_u32 width,k_u32 height) { @@ -225,9 +232,18 @@ void display_layer_init(k_u32 width,k_u32 height) // config lyaer if (width > 1088) { - info.act_size.width = height; - info.act_size.height = width; - info.func = K_ROTATION_90; + if (g_connector_type == HX8377_V2_MIPI_4LAN_1080X1920_30FPS) + { + info.act_size.width = height; + info.act_size.height = width; + info.func = K_ROTATION_90; + } + else + { + info.act_size.width = width; + info.act_size.height = height; + info.func = 0; + } } else { diff --git a/src/common/cdk/user/middleware/kplayer/src/display_cfg.h b/src/common/cdk/user/middleware/kplayer/src/display_cfg.h index 7101a6607..75306559e 100644 --- a/src/common/cdk/user/middleware/kplayer/src/display_cfg.h +++ b/src/common/cdk/user/middleware/kplayer/src/display_cfg.h @@ -33,10 +33,12 @@ extern "C" { #endif #include "k_type.h" +#include "k_connector_comm.h" k_s32 vo_init(); k_s32 vo_deinit(); void display_layer_init(k_u32 width,k_u32 height); +void display_set_connector_type(k_connector_type type); void display_layer_deinit(); #ifdef __cplusplus diff --git a/src/common/cdk/user/middleware/kplayer/src/kplayer.c b/src/common/cdk/user/middleware/kplayer/src/kplayer.c index a89856550..b383865b5 100755 --- a/src/common/cdk/user/middleware/kplayer/src/kplayer.c +++ b/src/common/cdk/user/middleware/kplayer/src/kplayer.c @@ -45,6 +45,7 @@ static k_bool g_play_start = K_FALSE; static k_bool g_play_pause = K_FALSE; static pthread_t g_play_tid = 0; static K_PLAYER_PROGRESS_INFO g_progress_info; +static K_PLAYER_CONNECTOR_TYPE g_connector_type = K_PLAYER_HX8377_V2_MIPI_4LAN_1080X1920_30FPS; static k_payload_type _get_payload_type(k_mp4_codec_id_e codec_id) { @@ -158,6 +159,11 @@ k_s32 kd_player_deinit(k_bool deinit_vo) return K_SUCCESS; } +void kd_player_set_connector_type(K_PLAYER_CONNECTOR_TYPE connector_type) +{ + g_connector_type = connector_type; +} + k_s32 kd_player_setdatasource(const k_char *filePath) { k_s32 ret; @@ -175,7 +181,7 @@ k_s32 kd_player_setdatasource(const k_char *filePath) if (g_video_track != INVALID_STREAM_TRACK) { - ret = disp_open(g_player_video_info.type,g_player_video_info.width,g_player_video_info.height); + ret = disp_open(g_player_video_info.type,g_player_video_info.width,g_player_video_info.height,g_connector_type); if (ret != K_SUCCESS) { printf("disp_open failed\n"); @@ -347,7 +353,7 @@ static int g_pic_size = 0; k_s32 kd_picture_init() { k_s32 ret; - ret = disp_open(K_PT_JPEG,g_pic_width,g_pic_height); + ret = disp_open(K_PT_JPEG,g_pic_width,g_pic_height,g_connector_type); if (ret != K_SUCCESS) { printf("disp_open failed\n"); diff --git a/src/common/cdk/user/middleware/kplayer/src/player_res.c b/src/common/cdk/user/middleware/kplayer/src/player_res.c index 1daaebd58..8432fb99c 100755 --- a/src/common/cdk/user/middleware/kplayer/src/player_res.c +++ b/src/common/cdk/user/middleware/kplayer/src/player_res.c @@ -153,9 +153,10 @@ k_s32 sys_deinit(k_bool deinit_vo) return K_SUCCESS; } -static k_s32 _initvo(k_u32 width,k_u32 height,k_u32 vdec_chn) +static k_s32 _initvo(k_u32 width,k_u32 height,k_u32 vdec_chn,K_PLAYER_CONNECTOR_TYPE type) { //init vo + display_set_connector_type((k_connector_type)type); display_layer_init(width,height); if (K_SUCCESS != kd_mapi_vdec_bind_vo(vdec_chn, 0, BIND_VO_LAYER)) @@ -167,7 +168,7 @@ static k_s32 _initvo(k_u32 width,k_u32 height,k_u32 vdec_chn) return K_SUCCESS; } -k_s32 disp_open(k_payload_type video_dec_type,k_u32 width,k_u32 height) +k_s32 disp_open(k_payload_type video_dec_type,k_u32 width,k_u32 height,K_PLAYER_CONNECTOR_TYPE type) { if (_vdec_vb_create_pool(0) != K_SUCCESS) @@ -198,7 +199,7 @@ k_s32 disp_open(k_payload_type video_dec_type,k_u32 width,k_u32 height) return K_FAILED; } - _initvo(width,height,ch); + _initvo(width,height,ch,type); return K_SUCCESS; } @@ -261,7 +262,7 @@ k_s32 disp_play(k_u8*pdata,k_u32 len,k_u64 timestamp,k_bool end_stream) k_u8 *virt_addr; k_u32 blk_size; - blk_size = STREAM_BUF_SIZE; + blk_size = len; while(1) { diff --git a/src/common/cdk/user/middleware/kplayer/src/player_res.h b/src/common/cdk/user/middleware/kplayer/src/player_res.h index c011d753a..9fae81d03 100755 --- a/src/common/cdk/user/middleware/kplayer/src/player_res.h +++ b/src/common/cdk/user/middleware/kplayer/src/player_res.h @@ -28,6 +28,7 @@ #include "k_type.h" #include "k_payload_comm.h" +#include "kplayer.h" #ifdef __cplusplus @@ -39,7 +40,7 @@ extern "C" { k_s32 sys_init(k_bool init_vo); k_s32 sys_deinit(k_bool deinit_vo); -k_s32 disp_open(k_payload_type video_dec_type,k_u32 width,k_u32 height);//K_PT_H264/K_PT_H265 +k_s32 disp_open(k_payload_type video_dec_type,k_u32 width,k_u32 height,K_PLAYER_CONNECTOR_TYPE type);//K_PT_H264/K_PT_H265 k_s32 disp_play(k_u8*pdata,k_u32 len,k_u64 timestamp,k_bool end_stream); k_s32 disp_close(); diff --git a/src/common/cdk/user/middleware/mp4_format/src/mp4_format.c b/src/common/cdk/user/middleware/mp4_format/src/mp4_format.c index 5aa80dfe6..aa40381ff 100644 --- a/src/common/cdk/user/middleware/mp4_format/src/mp4_format.c +++ b/src/common/cdk/user/middleware/mp4_format/src/mp4_format.c @@ -609,7 +609,7 @@ int kd_mp4_write_frame(KD_HANDLE mp4_handle, KD_HANDLE track_handle, k_mp4_frame // FIXME, code-refactor expected k_track_ctx *track_audio = get_audio_track(mp4_handle); - if (track_audio->add_to_mp4 < 0) { + if (track_audio && track_audio->add_to_mp4 < 0) { track_audio->add_to_mp4 = mp4_writer_add_audio(mp4_instance->muxer_instance.mov, MOV_OBJECT_G711u, 1, 16, 8000, NULL, 0); } mp4_writer_init_segment(mp4_instance->muxer_instance.mov); @@ -636,7 +636,7 @@ int kd_mp4_write_frame(KD_HANDLE mp4_handle, KD_HANDLE track_handle, k_mp4_frame // FIXME, code-refactor expected k_track_ctx *track_audio = get_audio_track(mp4_handle); - if (track_audio->add_to_mp4 < 0) { + if (track_audio && track_audio->add_to_mp4 < 0) { track_audio->add_to_mp4 = mp4_writer_add_audio(mp4_instance->muxer_instance.mov, MOV_OBJECT_G711u, 1, 16, 8000, NULL, 0); } mp4_writer_init_segment(mp4_instance->muxer_instance.mov); diff --git a/src/common/cdk/user/samples/backchannel_client/main.cpp b/src/common/cdk/user/samples/backchannel_client/main.cpp index adfb4c82c..2b09bca63 100644 --- a/src/common/cdk/user/samples/backchannel_client/main.cpp +++ b/src/common/cdk/user/samples/backchannel_client/main.cpp @@ -5,6 +5,7 @@ #include #include "rtsp_client.h" #include "media.h" +#include "vo_cfg.h" using namespace std::chrono_literals; @@ -140,10 +141,15 @@ int main(int argc, char *argv[]) { g_exit_flag.store(false); if (argc < 2) { - printf("Usage: ./backchannel_client \n"); + printf("Usage: ./backchannel_client \n"); + printf(" out_type: vo type, see vo doc, default 0.\n"); return 0; } std::string url = argv[1]; + if (argc > 2) { + k_connector_type connector_type = (k_connector_type)atoi(argv[2]); + set_connector_type(connector_type); + } MyRtspClient *client = new MyRtspClient(); if (!client || client->Init() < 0) { diff --git a/src/common/cdk/user/samples/backchannel_client/vo_cfg.cpp b/src/common/cdk/user/samples/backchannel_client/vo_cfg.cpp index 3a4ee5086..05b8a04d0 100644 --- a/src/common/cdk/user/samples/backchannel_client/vo_cfg.cpp +++ b/src/common/cdk/user/samples/backchannel_client/vo_cfg.cpp @@ -33,12 +33,11 @@ #include "mapi_vo_api.h" #include "mapi_sys_api.h" #include "k_video_comm.h" -#include "k_vo_comm.h" -#include "k_connector_comm.h" -#include "mpi_connector_api.h" #define ENABLE_VO_LAYER 1 +static k_connector_type g_connector_type = HX8377_V2_MIPI_4LAN_1080X1920_30FPS; + typedef struct { k_u64 osd_phy_addr; @@ -162,7 +161,7 @@ static k_s32 sample_connector_init(void) k_s32 ret = 0; char dev_name[64] = {0}; k_s32 connector_fd; - k_connector_type connector_type = HX8377_V2_MIPI_4LAN_1080X1920_30FPS; + k_connector_type connector_type = g_connector_type; k_connector_info connector_info; memset(&connector_info, 0, sizeof(k_connector_info)); @@ -202,6 +201,10 @@ void vo_enable() kd_mapi_vo_enable(); } +void set_connector_type(k_connector_type &connector_type) { + g_connector_type = connector_type; +} + void vo_deinit() { kd_mapi_vo_disable(); @@ -214,7 +217,7 @@ void vo_layer_init(k_u32 width,k_u32 height) memset(&info, 0, sizeof(info)); // config layer - if (width > 1080) + if (width > 1080 && g_connector_type == HX8377_V2_MIPI_4LAN_1080X1920_30FPS) { info.act_size.width = height; info.act_size.height = width; diff --git a/src/common/cdk/user/samples/backchannel_client/vo_cfg.h b/src/common/cdk/user/samples/backchannel_client/vo_cfg.h index 57eb09a62..bd9c3e672 100644 --- a/src/common/cdk/user/samples/backchannel_client/vo_cfg.h +++ b/src/common/cdk/user/samples/backchannel_client/vo_cfg.h @@ -27,8 +27,12 @@ #define __VO_CFG_H__ #include "k_type.h" +#include "k_vo_comm.h" +#include "k_connector_comm.h" +#include "mpi_connector_api.h" void vo_init(); +void set_connector_type(k_connector_type &connector_type); void vo_enable(); void vo_deinit(); diff --git a/src/common/cdk/user/samples/mp4_muxer/sample_muxer.c b/src/common/cdk/user/samples/mp4_muxer/sample_muxer.c index c6cf942e2..9b9bc3b6e 100644 --- a/src/common/cdk/user/samples/mp4_muxer/sample_muxer.c +++ b/src/common/cdk/user/samples/mp4_muxer/sample_muxer.c @@ -399,6 +399,19 @@ int main(int argc, char *argv[]) { .audio_sample_rate = 44100 }; + if (argc > 1) { + for (int i = 1; i < argc; i += 2) { + if (strcmp(argv[i], "-s") == 0) { + sample_context.sensor_type = (k_vicap_sensor_type)atoi(argv[i+1]); + } else if (strcmp(argv[i], "-h") == 0) { + printf("Usage: ./sample_muxer -s 7\n"); + printf("-s: the sensor type: default 7\n"); + printf(" see camera sensor doc.\n"); + return 0; + } + } + } + printf("mp4 muxer...\n"); k_u32 ret = kd_mapi_sys_init(); diff --git a/src/common/cdk/user/samples/rtsp_demo/main.cpp b/src/common/cdk/user/samples/rtsp_demo/main.cpp index f5e522a71..3f01f0652 100644 --- a/src/common/cdk/user/samples/rtsp_demo/main.cpp +++ b/src/common/cdk/user/samples/rtsp_demo/main.cpp @@ -12,52 +12,15 @@ static void sigHandler(int sig_no) { } static void help() { - std::cout << "Usage: ./rtsp_demo -s 0 -n 2 -t h265 -w 1280 -h 720." << std::endl; - std::cout << "-s: the sensor type:" << std::endl; - std::cout << " 0: ov9732." << std::endl; - std::cout << " 1: ov9286 ir." << std::endl; - std::cout << " 2: ov9286 speckle." << std::endl; - std::cout << " 3: imx335 2LANE 1920Wx1080H." << std::endl; - std::cout << " 4: imx335 2LANE 2592Wx1944H." << std::endl; - std::cout << " 5: imx335 4LANE 2592Wx1944H." << std::endl; - std::cout << " 6: imx335 2LANE MCLK 7425 1920Wx1080H." << std::endl; - std::cout << " 7: imx335 2LANE MCLK 7425 2592Wx1944H." << std::endl; - std::cout << " 8: imx335 4LANE MCLK 7425 2592Wx1944H." << std::endl; + std::cout << "Usage: ./rtsp_demo -s 7 -n 2 -t h265 -w 1280 -h 720." << std::endl; + std::cout << "-s: the sensor type: default 7" << std::endl; + std::cout << " see camera sensor doc." << std::endl; std::cout << "-n: the session number, range: [1, 3]." << std::endl; std::cout << "-t: the video encoder type: h264/h265/mjpeg." << std::endl; std::cout << "-w: the video encoder width." << std::endl; std::cout << "-h: the video encoder height." << std::endl; } -static k_vicap_sensor_type get_sensor_type(k_u32 sensor_index) -{ - switch (sensor_index) - { - case 0: - return OV_OV9732_MIPI_1280X720_30FPS_10BIT_LINEAR; - case 1: - return OV_OV9286_MIPI_1280X720_30FPS_10BIT_LINEAR_IR; - case 2: - return OV_OV9286_MIPI_1280X720_30FPS_10BIT_LINEAR_SPECKLE; - case 3: - return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR; - case 4: - return IMX335_MIPI_2LANE_RAW12_2592X1944_30FPS_LINEAR; - case 5: - return IMX335_MIPI_4LANE_RAW12_2592X1944_30FPS_LINEAR; - case 6: - return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_MCLK_7425_LINEAR; - case 7: - return IMX335_MIPI_2LANE_RAW12_2592X1944_30FPS_MCLK_7425_LINEAR; - case 8: - return IMX335_MIPI_4LANE_RAW12_2592X1944_30FPS_MCLK_7425_LINEAR; - default: - printf("unsupport sensor type %d, use default IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR\n", sensor_index); - return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR; - } - return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR; -} - int main(int argc, char *argv[]) { signal(SIGINT, sigHandler); signal(SIGPIPE, SIG_IGN); @@ -96,7 +59,7 @@ int main(int argc, char *argv[]) { } else if (strcmp(argv[i], "-h") == 0) { video_height = atoi(argv[i+1]); } else if (strcmp(argv[i], "-s") == 0) { - sensor_type = get_sensor_type(atoi(argv[i+1])); + sensor_type = (k_vicap_sensor_type)atoi(argv[i+1]); } else if (strcmp(argv[i], "--help") == 0) { help(); @@ -111,9 +74,8 @@ int main(int argc, char *argv[]) { return -1; } - std::string session_name = "session"; - for (int i = 0; i < session_num; i++) { + std::string session_name = "session"; SessionAttr session_attr; memset(&session_attr, 0, sizeof(session_attr)); session_attr.session_idx = i; diff --git a/src/common/cdk/user/samples/rtsp_demo/src/streaming_player.cpp b/src/common/cdk/user/samples/rtsp_demo/src/streaming_player.cpp index 5a4957897..cbe45e1aa 100644 --- a/src/common/cdk/user/samples/rtsp_demo/src/streaming_player.cpp +++ b/src/common/cdk/user/samples/rtsp_demo/src/streaming_player.cpp @@ -51,7 +51,7 @@ int StreamingPlayer::StreamingPlayerInit() { } memset(&dev_attr_info_, 0, sizeof(dev_attr_info_)); - if (sensor_type_ <= OV_OV9286_MIPI_1280X720_30FPS_10BIT_LINEAR_SPECKLE) + if (sensor_type_ <= OV_OV9286_MIPI_1280X720_60FPS_10BIT_LINEAR_IR_SPECKLE || sensor_type_ >= SC_SC035HGS_MIPI_1LANE_RAW10_640X480_120FPS_LINEAR) dev_attr_info_.dw_en = K_FALSE; else dev_attr_info_.dw_en = K_TRUE; diff --git a/src/common/cdk/user/samples/rtsp_server/main.cpp b/src/common/cdk/user/samples/rtsp_server/main.cpp index 8399b6be0..427e3cbb9 100644 --- a/src/common/cdk/user/samples/rtsp_server/main.cpp +++ b/src/common/cdk/user/samples/rtsp_server/main.cpp @@ -19,16 +19,8 @@ static void sigHandler(int sig_no) { static void Usage() { std::cout << "Usage: ./rtsp_sever [-v] [-s ] [-t ] [-w ] [-h ] [-b ] [-a ]" << std::endl; std::cout << "-v: enable video session" << std::endl; - std::cout << "-s: the sensor type, default 3 :" << std::endl; - std::cout << " 0: ov9732." << std::endl; - std::cout << " 1: ov9286 ir." << std::endl; - std::cout << " 2: ov9286 speckle." << std::endl; - std::cout << " 3: imx335 2LANE 1920Wx1080H." << std::endl; - std::cout << " 4: imx335 2LANE 2592Wx1944H." << std::endl; - std::cout << " 5: imx335 4LANE 2592Wx1944H." << std::endl; - std::cout << " 6: imx335 2LANE MCLK 7425 1920Wx1080H." << std::endl; - std::cout << " 7: imx335 2LANE MCLK 7425 2592Wx1944H." << std::endl; - std::cout << " 8: imx335 4LANE MCLK 7425 2592Wx1944H." << std::endl; + std::cout << "-s: the sensor type, default 7 :" << std::endl; + std::cout << " see camera sensor doc." << std::endl; std::cout << "-t: the video encoder type: h264/h265, default h265" << std::endl; std::cout << "-w: the video encoder width, default 1280" << std::endl; std::cout << "-h: the video encoder height, default 720" << std::endl; @@ -37,23 +29,6 @@ static void Usage() { exit(-1); } -static k_vicap_sensor_type get_sensor_type(k_u32 sensor_index) { - switch (sensor_index) { - case 0: return OV_OV9732_MIPI_1280X720_30FPS_10BIT_LINEAR; - case 1: return OV_OV9286_MIPI_1280X720_30FPS_10BIT_LINEAR_IR; - case 2: return OV_OV9286_MIPI_1280X720_30FPS_10BIT_LINEAR_SPECKLE; - case 3: return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR; - case 4: return IMX335_MIPI_2LANE_RAW12_2592X1944_30FPS_LINEAR; - case 5: return IMX335_MIPI_4LANE_RAW12_2592X1944_30FPS_LINEAR; - case 6: return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_MCLK_7425_LINEAR; - case 7: return IMX335_MIPI_2LANE_RAW12_2592X1944_30FPS_MCLK_7425_LINEAR; - case 8: return IMX335_MIPI_4LANE_RAW12_2592X1944_30FPS_MCLK_7425_LINEAR; - default: - printf("unsupport sensor type %d, use default IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR\n", sensor_index); - return IMX335_MIPI_2LANE_RAW12_1920X1080_30FPS_LINEAR; - } -} - int parse_config(int argc, char *argv[], KdMediaInputConfig &config) { int result; opterr = 0; @@ -68,8 +43,8 @@ int parse_config(int argc, char *argv[], KdMediaInputConfig &config) { } case 's' : { int n = atoi(optarg); - if (n < 0 || n > 8) Usage(); - config.sensor_type = get_sensor_type(n); + if (n < 0 || n > 27) Usage(); + config.sensor_type = (k_vicap_sensor_type)n; config.video_valid = true; break; } diff --git a/src/common/cdk/user/samples/rtsp_server/media.cpp b/src/common/cdk/user/samples/rtsp_server/media.cpp index d96915c3c..9cede5185 100644 --- a/src/common/cdk/user/samples/rtsp_server/media.cpp +++ b/src/common/cdk/user/samples/rtsp_server/media.cpp @@ -74,7 +74,7 @@ int KdMedia::Init(const KdMediaInputConfig &config) { media_attr.media_config.vb_config.comm_pool[3].mode = VB_REMAP_MODE_NOCACHE; memset(&vcap_dev_info_, 0, sizeof(vcap_dev_info_)); - if (config.sensor_type <= OV_OV9286_MIPI_1280X720_30FPS_10BIT_LINEAR_SPECKLE) + if (config.sensor_type <= OV_OV9286_MIPI_1280X720_60FPS_10BIT_LINEAR_IR_SPECKLE || config.sensor_type >= SC_SC035HGS_MIPI_1LANE_RAW10_640X480_120FPS_LINEAR) vcap_dev_info_.dw_en = K_FALSE; else vcap_dev_info_.dw_en = K_TRUE; diff --git a/src/little/buildroot-ext/package/crypto_test/src/crypto_test.c b/src/little/buildroot-ext/package/crypto_test/src/crypto_test.c index 8560d51b8..b5c52611c 100755 --- a/src/little/buildroot-ext/package/crypto_test/src/crypto_test.c +++ b/src/little/buildroot-ext/package/crypto_test/src/crypto_test.c @@ -247,7 +247,7 @@ int socket_rsa(rsa_type_t rsatype, const uint8_t *d, uint8_t *in, uint8_t *out, - uint8_t *outlen) + uint32_t *outlen) { int len; int opfd; @@ -530,6 +530,9 @@ int do_rsa(void) if(memcmp(out, rsa_tp[i].s, outlen) == 0) { printf("RSA sign Success!\n"); + for(j=0; j configs/c.1/strings/0x409/configuration ln -s functions/hid.usb0/ configs/c.1/ ln -s functions/hid.usb1/ configs/c.1/ -echo 91540000.usb-otg > UDC +echo 91500000.usb-otg > UDC diff --git a/src/little/buildroot-ext/package/usb_test/src/gadget-storage-mem.sh b/src/little/buildroot-ext/package/usb_test/src/gadget-storage-mem.sh index a1caad6d4..67ae2071f 100755 --- a/src/little/buildroot-ext/package/usb_test/src/gadget-storage-mem.sh +++ b/src/little/buildroot-ext/package/usb_test/src/gadget-storage-mem.sh @@ -32,4 +32,4 @@ echo /dev/loop0 >functions/mass_storage.usb0/lun.0/file ln -s functions/mass_storage.usb0 configs/c.1 -echo 91540000.usb-otg > UDC \ No newline at end of file +echo 91500000.usb-otg > UDC \ No newline at end of file diff --git a/src/little/buildroot-ext/package/usb_test/src/gadget-storage.sh b/src/little/buildroot-ext/package/usb_test/src/gadget-storage.sh index a56b7d367..dd84711d5 100755 --- a/src/little/buildroot-ext/package/usb_test/src/gadget-storage.sh +++ b/src/little/buildroot-ext/package/usb_test/src/gadget-storage.sh @@ -32,4 +32,4 @@ fi ln -s functions/mass_storage.usb0 configs/c.1 -echo 91540000.usb-otg > UDC \ No newline at end of file +echo 91500000.usb-otg > UDC \ No newline at end of file diff --git a/src/little/buildroot-ext/package/usb_test/src/gadget-usbtest.sh b/src/little/buildroot-ext/package/usb_test/src/gadget-usbtest.sh index 9e2dbb711..c0bf3799f 100755 --- a/src/little/buildroot-ext/package/usb_test/src/gadget-usbtest.sh +++ b/src/little/buildroot-ext/package/usb_test/src/gadget-usbtest.sh @@ -38,4 +38,4 @@ echo 120 > configs/c.1/MaxPower ln -s functions/Loopback.0 configs/c.2 ln -s functions/SourceSink.0 configs/c.1 -echo 91540000.usb-otg > UDC \ No newline at end of file +echo 91500000.usb-otg > UDC \ No newline at end of file diff --git a/src/little/buildroot-ext/package/ws2812/src/ws2812.c b/src/little/buildroot-ext/package/ws2812/src/ws2812.c index 757c09e26..19a87889f 100755 --- a/src/little/buildroot-ext/package/ws2812/src/ws2812.c +++ b/src/little/buildroot-ext/package/ws2812/src/ws2812.c @@ -1,356 +1,216 @@ -/* Copyright (c) 2023, Canaan Bright Sight Co., Ltd - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -static void sleep_ms(unsigned int ms) -{ - struct timeval tval; - - tval.tv_sec = ms / 1000; - tval.tv_usec = (ms * 1000) % 1000000; - select(0, NULL, NULL, NULL, &tval); -} - - -/* ws2812 24bit GRB -------------------------------------- -| 8bit G | 8bit R | 8bit B | -------------------------------------- - -*/ - -/* 测试使用灯条共33个ws2812灯珠 */ -// green -static unsigned char color1[] = { 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0, \ - 0xff, 0x0, 0x0 -}; - -// blue -static unsigned char color2[] = { 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff, \ - 0x0, 0x0, 0xff -}; - -// red -static unsigned char color3[] = { 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0, \ - 0x0, 0xff, 0x0 -}; - -static unsigned char grb_color[] = { - 0x00, 0xff, 0x00, - 0xff, 0x00, 0x00, - 0x00, 0x00, 0xff, - 0x00, 0xff, 0x00, - 0xff, 0x00, 0x00, - 0x00, 0x00, 0xff, - 0x00, 0x8b, 0x00, - 0x00, 0xcd, 0x00, - 0x00, 0xef, 0x00, - 0x00, 0xff, 0x00, - 0x45, 0x8b, 0x00, - 0x66, 0xcd, 0x00, - 0x76, 0xee, 0x00, - 0x7f, 0xff, 0x00, - 0x7e, 0x8b, 0x66, - 0xba, 0xcd, 0x96, - 0xd8, 0xee, 0xae, - 0xe7, 0xff, 0xba, - 0x47, 0x8b, 0x26, - 0x68, 0xcd, 0x39, - 0x79, 0xee, 0x42, - 0x82, 0xff, 0x47, - 0x95, 0xcd, 0x0c, - 0xad, 0xee, 0x0e, - 0xb9, 0xff, 0x0f, - 0xad, 0xcd, 0x00, - 0xc9, 0xee, 0x00, - 0xd7, 0xff, 0x00, - 0xcd, 0xcd, 0x00, - 0xee, 0xee, 0x00, - 0xff, 0xff, 0x00, - 0xee, 0x76, 0x00, - 0xff, 0x7f, 0x00, -}; - -/* 33个灯,每个灯24bit */ -static unsigned char light[99] = { 0x0 }; - -int main(int argc, char *argv[]) -{ - int fd; - int i = 0; - int m = 33; - int n; - - fd = open("/dev/ws2812", O_RDWR); - if(fd<=0) - { - printf("open ws2812 device error!\n"); - return 0; - } - -#if 0 - /* 多彩色 */ - write(fd, grb_color, sizeof(grb_color)); - - /* 闪烁 */ - write(fd, color1, sizeof(color1)); - sleep(1); - write(fd, color2, sizeof(color2)); - sleep(1); - write(fd, color3, sizeof(color3)); - sleep(1); - write(fd, color1, sizeof(color1)); - sleep_ms(50); - write(fd, color2, sizeof(color2)); - sleep_ms(50); - write(fd, color3, sizeof(color3)); - sleep_ms(50); - write(fd, color1, sizeof(color1)); - sleep_ms(50); - write(fd, color2, sizeof(color2)); - sleep_ms(50); - write(fd, color3, sizeof(color3)); - sleep_ms(50); - write(fd, color1, sizeof(color1)); - sleep_ms(50); - write(fd, color2, sizeof(color2)); - sleep_ms(50); - write(fd, color3, sizeof(color3)); - sleep_ms(50); - write(fd, color1, sizeof(color1)); - sleep_ms(50); - write(fd, color2, sizeof(color2)); - sleep_ms(50); - write(fd, color3, sizeof(color3)); - sleep_ms(50); - write(fd, color1, sizeof(color1)); - sleep_ms(50); - write(fd, color2, sizeof(color2)); - sleep_ms(50); - write(fd, color3, sizeof(color3)); - sleep_ms(50); - write(fd, color1, sizeof(color1)); - sleep_ms(50); - write(fd, color2, sizeof(color2)); - sleep_ms(50); - write(fd, color3, sizeof(color3)); - sleep_ms(50); -#endif - -#if 0 - /* 流水灯 */ - while (1) - { - for (i = 0; i < m; i++) - { - write(fd, light, sizeof(light)); - - light[i*3 + 2] = 0xff; - if (i > 0) - light[(i-1)*3 +2] = 0x0; - sleep_ms(50); - } - m --; - if (m < 0) - { - light[2] = 0xff; - write(fd, light, sizeof(light)); - break; - } - } - - memset(light, 0, sizeof(light)); - write(fd, light, sizeof(light)); -#endif - -#if 1 - /* 呼吸灯 */ - while(1) - { - for (n = 0; n < 0xff; n++) - { - for (i = 0; i < m; i++) - { - light[i*3] = n; //g - } - write(fd, light, sizeof(light)); - sleep_ms(5); - } - - for (n = 0xff; n >= 0; n--) - { - for (i = 0; i < m; i++) - { - light[i*3] = n; //g - } - write(fd, light, sizeof(light)); - sleep_ms(5); - } - - for (n = 0; n < 0xff; n++) - { - for (i = 0; i < m; i++) - { - light[i*3 + 1] = n; //r - } - write(fd, light, sizeof(light)); - sleep_ms(5); - } - - for (n = 0xff; n >= 0; n--) - { - for (i = 0; i < m; i++) - { - light[i*3 + 1] = n; //r - } - write(fd, light, sizeof(light)); - sleep_ms(5); - } - - for (n = 0; n < 0xff; n++) - { - for (i = 0; i < m; i++) - { - light[i*3 + 2] = n; //b - } - write(fd, light, sizeof(light)); - sleep_ms(5); - } - - for (n = 0xff; n >= 0; n--) - { - for (i = 0; i < m; i++) - { - light[i*3 + 2] = n; //b - } - write(fd, light, sizeof(light)); - sleep_ms(5); - } - } -#endif - - return 0; -} - +/* Copyright (c) 2023, Canaan Bright Sight Co., Ltd + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define WSIOC_RESET _IOW('W', 0x10, int) +#define LED_NUM 24 /* 根据实际使用的数量修改 */ + +static void sleep_ms(unsigned int ms) +{ + struct timeval tval; + + tval.tv_sec = ms / 1000; + tval.tv_usec = (ms * 1000) % 1000000; + select(0, NULL, NULL, NULL, &tval); +} + +/* ws2812 24bit GRB +------------------------------------- +| 8bit G | 8bit R | 8bit B | +------------------------------------- +1 --> 1110 +0 --> 1000 + +*/ + +/* 测试使用灯条共24个ws2812灯珠 */ + +static void w2812_reset_clear(int fd, int num) +{ + unsigned char *clear; + sleep(1); + + clear = (unsigned char*)malloc(num * 3); + memset(clear, 0, num * 3); + + write(fd, clear, num * 3); + write(fd, clear, num * 3); + sleep(1); + free(clear); +} + +static void waterfall_light(int fd, int num) +{ + int i; + int cnt = 10; + int len = num * 3; + unsigned char *data; + data = (unsigned char*)malloc(len); + + w2812_reset_clear(fd, LED_NUM); + printf("waterfall light....\n"); + + memset(data, 0, len); + + while (cnt--) + { + for(i = 0; i < num; i++) + { + data[i*3 + 0] = 0xa9; + data[i*3 + 1] = 0xa9; + data[i*3 + 2] = 0xa9; + write(fd, data, len); + sleep_ms(20); + } + + memset(data, 0, len); + write(fd, data, len); + } + free(data); +} + +static void breath_light(int fd, int num) +{ + int len = num * 3; + unsigned char *data; + int i, n; + data = (unsigned char*)malloc(len); + + w2812_reset_clear(fd, LED_NUM); + printf("breathing light.....\n"); + + memset(data, 0, len); + + while(1) + { + for (i = 0; i < 0xff; i++) + { + for (n = 0; n < num; n++) + { + data[n * 3] = 0x0; + data[n * 3 + 1] = i; + data[n * 3 + 2] = 0x0; + } + write(fd, data, len); + sleep_ms(1); + } + + for (i = 0xff; i >= 0; i--) + { + for (n = 0; n < num; n++) + { + data[n * 3] = 0x0; + data[n * 3 + 1] = i; + data[n * 3 + 2] = 0x0; + } + write(fd, data, len); + sleep_ms(1); + } + + for (i = 0; i < 0xff; i++) + { + for (n = 0; n < num; n++) + { + data[n * 3] = i; + data[n * 3 + 1] = 0x0; + data[n * 3 + 2] = 0x0; + } + write(fd, data, len); + sleep_ms(1); + } + + for (i = 0xff; i >= 0; i--) + { + for (n = 0; n < num; n++) + { + data[n * 3] = i; + data[n * 3 + 1] = 0x0; + data[n * 3 + 2] = 0x0; + } + write(fd, data, len); + sleep_ms(1); + } + + for (i = 0; i < 0xff; i++) + { + for (n = 0; n < num; n++) + { + data[n * 3] = 0x0; + data[n * 3 + 1] = 0x0; + data[n * 3 + 2] = i; + } + write(fd, data, len); + sleep_ms(1); + } + + for (i = 0xff; i >= 0; i--) + { + for (n = 0; n < num; n++) + { + data[n * 3] = 0x0; + data[n * 3 + 1] = 0x0; + data[n * 3 + 2] = i; + } + write(fd, data, len); + sleep_ms(1); + } + } + + free(data); +} + + +int main(int argc, char *argv[]) +{ + int fd; + int n = LED_NUM; + + fd = open("/dev/ws2812", O_RDWR); + if(fd<=0) + { + printf("open ws2812 device error!\n"); + return 0; + } + + if (argv[1]) + { + n = atoi(argv[1]); + } + + // ioctl(fd, WSIOC_RESET, NULL); /* 产生复位时序接口,按需使用 */ + + waterfall_light(fd, n); + breath_light(fd, n); + close(fd); + return 0; +} diff --git a/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb.dts b/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb.dts index be5f23600..25e8e1ca1 100755 --- a/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb.dts +++ b/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb.dts @@ -216,5 +216,6 @@ }; &usbotg1 { + dr_mode = "host"; status = "okay"; }; \ No newline at end of file diff --git a/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb_doorlock.dts b/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb_doorlock.dts index d936a544d..dfceef925 100755 --- a/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb_doorlock.dts +++ b/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb_doorlock.dts @@ -208,5 +208,6 @@ }; &usbotg1 { + dr_mode = "host"; status = "okay"; }; \ No newline at end of file diff --git a/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb_nand.dts b/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb_nand.dts index 5792d46dc..8391af4b3 100755 --- a/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb_nand.dts +++ b/src/little/linux/arch/riscv/boot/dts/kendryte/k230_evb_nand.dts @@ -164,5 +164,6 @@ }; &usbotg1 { + dr_mode = "host"; status = "okay"; }; \ No newline at end of file diff --git a/src/little/linux/arch/riscv/boot/dts/kendryte/k230_peephole_device.dts b/src/little/linux/arch/riscv/boot/dts/kendryte/k230_peephole_device.dts index 726212bbe..05a2944a7 100755 --- a/src/little/linux/arch/riscv/boot/dts/kendryte/k230_peephole_device.dts +++ b/src/little/linux/arch/riscv/boot/dts/kendryte/k230_peephole_device.dts @@ -224,5 +224,6 @@ }; &usbotg1 { + dr_mode = "host"; status = "okay"; }; diff --git a/src/little/linux/drivers/canaan-ws2812/canaan-i2s-ws2812.c b/src/little/linux/drivers/canaan-ws2812/canaan-i2s-ws2812.c index 22291d06e..75bb2013a 100755 --- a/src/little/linux/drivers/canaan-ws2812/canaan-i2s-ws2812.c +++ b/src/little/linux/drivers/canaan-ws2812/canaan-i2s-ws2812.c @@ -32,6 +32,7 @@ #include "canaan-i2s-ws2812.h" #define CANAAN_DRIVER_NAME "canaan-ws2812" +#define WSIOC_RESET _IOW('W', 0x10, int) #define I2S_CHANNEL_0 0 #define I2S_CHANNEL_1 1 @@ -292,50 +293,26 @@ static void k230_i2s_init(const void *reg, int i2s_tx_num, int word_len) static void i2s_transmit_data(const void *reg, int channel_num, void *buf, int len) { int i = 0; - int remanent = len % 4; int *data = buf; - char *re = buf; - int value = 0; i2s_t *i2s = (i2s_t*)reg; - while (i < len/4) + while (i < len/8) { if (i2s->channel[channel_num].isr & 0x10) { - i2s->channel[channel_num].left_rxtx = data[i]; - if ((i + 1) >= (len / 4)) - break; - i2s->channel[channel_num].right_rxtx = data[i+1]; - i += 2; + i2s->channel[channel_num].left_rxtx = data[i*2]; + i2s->channel[channel_num].right_rxtx = data[i*2+1]; + i++; } } - - if (remanent) - { - for (i = 0; i < remanent; i++) - { - value |= re[len - 1 - i] << 8*(3-i); - } - if (i == 3) - { - value |= 0xff; - } else - if (i == 2) - { - value |= 0xffff; - } else - if (i == 1) - { - value |= 0xffffff; - } - i2s->channel[channel_num].right_rxtx = value; - } } static int ws2812_open(struct inode *inode, struct file *file) { ws2812_t *ws2812 = container_of(file->private_data, ws2812_t, mdev); + + k230_i2s_init(ws2812->base, I2S_CHANNEL_0, 32); /* enable i2s channel */ i2s_tx_channel_enable(ws2812->base, I2S_CHANNEL_0, true); //channel0 output enable @@ -357,7 +334,13 @@ static ssize_t ws2812_write(struct file *file, const char __user *buf, size_t si return -EFAULT; } - ws2812_data = kmalloc(size * 4 + 4, GFP_KERNEL); + if (size % 2) + { + ws2812_data = kmalloc(size * 4 + 4, GFP_KERNEL); + } else { + ws2812_data = kmalloc(size * 4, GFP_KERNEL); + } + if (!ws2812_data) return -ENOMEM; @@ -365,16 +348,45 @@ static ssize_t ws2812_write(struct file *file, const char __user *buf, size_t si { ws2812_data[i] = rgb_to_ws2812(data[i]); } - ws2812_data[i] = 0xffffffff; - i2s_transmit_data(ws2812->base, I2S_CHANNEL_0, (void*)ws2812_data, size * 4 + 4); + if (size % 2) + { + ws2812_data[i] = 0x0; + i2s_transmit_data(ws2812->base, I2S_CHANNEL_0, (void*)ws2812_data, size * 4 + 4); + } else { + i2s_transmit_data(ws2812->base, I2S_CHANNEL_0, (void*)ws2812_data, size * 4); + } + + kfree(data); + kfree(ws2812_data); return size; } +static long ws2812_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + unsigned int *data; + int i; + ws2812_t *ws2812 = container_of(file->private_data, ws2812_t, mdev); + + data = kmalloc(400, GFP_KERNEL); + if (!data) + return -ENOMEM; + + for (i = 0; i < 50; i++) + data[i] = 0xffffffff; + for (i = 50; i < 100; i++) + data[i] = 0x0; + + i2s_transmit_data(ws2812->base, I2S_CHANNEL_0, (void*)data, 400); + kfree(data); + return 0; +} + static const struct file_operations ws2812_fops = { .owner = THIS_MODULE, .open = ws2812_open, .write = ws2812_write, + .unlocked_ioctl = ws2812_ioctl, }; static int k230_ws2812_probe(struct platform_device *pdev) @@ -415,8 +427,6 @@ static int k230_ws2812_probe(struct platform_device *pdev) misc->fops = &ws2812_fops; misc->parent = dev; - k230_i2s_init(ws2812->base, I2S_CHANNEL_0, 32); - ret = misc_register(misc); if(ret < 0){ printk("ws2812 register cdev failed!\n"); diff --git a/src/reference/ai_poc/CMakeLists.txt b/src/reference/ai_poc/CMakeLists.txt index 7c06b196a..4fcb28317 100644 --- a/src/reference/ai_poc/CMakeLists.txt +++ b/src/reference/ai_poc/CMakeLists.txt @@ -19,8 +19,6 @@ include_directories(${nncase_sdk_root}/riscv64/nncase/include) include_directories(${nncase_sdk_root}/riscv64/nncase/include/nncase/runtime) link_directories(${nncase_sdk_root}/riscv64/nncase/lib/) -add_definitions(-DCONFIG_BOARD_K230_CANMV) - add_subdirectory(face_detection) add_subdirectory(face_landmark) add_subdirectory(face_glasses) @@ -52,3 +50,10 @@ add_subdirectory(smoke_detect) add_subdirectory(nanotracker) add_subdirectory(traffic_light_detect) add_subdirectory(space_resize) +add_subdirectory(eye_gaze) +add_subdirectory(helmet_detect) +add_subdirectory(puzzle_game) +add_subdirectory(dynamic_gesture) +add_subdirectory(ocr) +add_subdirectory(yolop_lane_seg) +add_subdirectory(anomaly_det) \ No newline at end of file diff --git a/src/reference/ai_poc/README.md b/src/reference/ai_poc/README.md deleted file mode 100644 index 0cd2e8d0a..000000000 --- a/src/reference/ai_poc/README.md +++ /dev/null @@ -1,24 +0,0 @@ -## K230 AI Demo编译说明 - -K230 AI Demo集成了人脸、人体、手部、车牌、单词续写等模块,包含了分类、检测、分割、识别和跟踪等多种功能,给客户提供如何使用K230开发AI相关应用的参考。 - -K230 AI Demo编译支持CANMV-K230(右侧)和EVB-K230(左侧)2种开发板,编译时默认适配CANMV-K230开发板。 - -CANMV-K230 and EVB-K230对比图 - -```cmake -#CMakeLists.txt -..... -include_directories(${nncase_sdk_root}/riscv64/nncase/include/nncase/runtime) -link_directories(${nncase_sdk_root}/riscv64/nncase/lib/) -#若需适配EVB-K230开发板,则去掉当前目录CMakeLists.txt中的add_definitions(-DCONFIG_BOARD_K230_CANMV)行 -add_definitions(-DCONFIG_BOARD_K230_CANMV) - -add_subdirectory(face_detection) -..... -``` - - - - - diff --git a/src/reference/ai_poc/anomaly_det/CMakeLists.txt b/src/reference/ai_poc/anomaly_det/CMakeLists.txt new file mode 100644 index 000000000..b795aa01e --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/CMakeLists.txt @@ -0,0 +1,22 @@ +set(src main.cc utils.cc ai_base.cc anomaly_det.cc) +set(bin anomaly_det.elf) +find_package(OpenMP REQUIRED) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Ofast") +include_directories(${PROJECT_SOURCE_DIR}) +include_directories(.) +include_directories(${nncase_sdk_root}/riscv64/rvvlib/include) +include_directories(${k230_sdk}/src/big/mpp/userapps/api/) +include_directories(${k230_sdk}/src/big/mpp/include) +include_directories(${k230_sdk}/src/big/mpp/include/comm) +include_directories(${k230_sdk}/src/big/mpp/userapps/sample/sample_vo) +link_directories(${nncase_sdk_root}/riscv64/rvvlib/) +add_executable(${bin} ${src}) +target_compile_options(${bin} PRIVATE ${OpenMP_CXX_FLAGS}) + +target_link_libraries(${bin} -Wl,--start-group rvv Nncase.Runtime.Native nncase.rt_modules.k230 functional_k230 sys vicap vb cam_device cam_engine + hal oslayer ebase fpga isp_drv binder auto_ctrol common cam_caldb isi 3a buffer_management cameric_drv video_in virtual_hal start_engine cmd_buffer + switch cameric_reg_drv t_database_c t_mxml_c t_json_c t_common_c vo connector sensor atomic dma -Wl,--end-group) + +target_link_libraries(${bin} opencv_imgcodecs opencv_imgproc opencv_core zlib libjpeg-turbo libopenjp2 libpng libtiff libwebp csi_cv OpenMP::OpenMP_CXX) +install(TARGETS ${bin} DESTINATION bin) + diff --git a/src/reference/ai_poc/anomaly_det/Eigen/Cholesky b/src/reference/ai_poc/anomaly_det/Eigen/Cholesky new file mode 100644 index 000000000..a318ceb79 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/Cholesky @@ -0,0 +1,45 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CHOLESKY_MODULE_H +#define EIGEN_CHOLESKY_MODULE_H + +#include "Core" +#include "Jacobi" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Cholesky_Module Cholesky module + * + * + * + * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices. + * Those decompositions are also accessible via the following methods: + * - MatrixBase::llt() + * - MatrixBase::ldlt() + * - SelfAdjointView::llt() + * - SelfAdjointView::ldlt() + * + * \code + * #include + * \endcode + */ + +#include "src/Cholesky/LLT.h" +#include "src/Cholesky/LDLT.h" +#ifdef EIGEN_USE_LAPACKE +#ifdef EIGEN_USE_MKL +#include "mkl_lapacke.h" +#else +#include "src/misc/lapacke.h" +#endif +#include "src/Cholesky/LLT_LAPACKE.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CHOLESKY_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/CholmodSupport b/src/reference/ai_poc/anomaly_det/Eigen/CholmodSupport new file mode 100644 index 000000000..bed8924d3 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/CholmodSupport @@ -0,0 +1,48 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H +#define EIGEN_CHOLMODSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { + #include +} + +/** \ingroup Support_modules + * \defgroup CholmodSupport_Module CholmodSupport module + * + * This module provides an interface to the Cholmod library which is part of the suitesparse package. + * It provides the two following main factorization classes: + * - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization. + * - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial). + * + * For the sake of completeness, this module also propose the two following classes: + * - class CholmodSimplicialLLT + * - class CholmodSimplicialLDLT + * Note that these classes does not bring any particular advantage compared to the built-in + * SimplicialLLT and SimplicialLDLT factorization classes. + * + * \code + * #include + * \endcode + * + * In order to use this module, the cholmod headers must be accessible from the include paths, and your binary must be linked to the cholmod library and its dependencies. + * The dependencies depend on how cholmod has been compiled. + * For a cmake based project, you can use our FindCholmod.cmake module to help you in this task. + * + */ + +#include "src/CholmodSupport/CholmodSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CHOLMODSUPPORT_MODULE_H + diff --git a/src/reference/ai_poc/anomaly_det/Eigen/Core b/src/reference/ai_poc/anomaly_det/Eigen/Core new file mode 100644 index 000000000..5921e15f9 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/Core @@ -0,0 +1,384 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2007-2011 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CORE_H +#define EIGEN_CORE_H + +// first thing Eigen does: stop the compiler from reporting useless warnings. +#include "src/Core/util/DisableStupidWarnings.h" + +// then include this file where all our macros are defined. It's really important to do it first because +// it's where we do all the compiler/OS/arch detections and define most defaults. +#include "src/Core/util/Macros.h" + +// This detects SSE/AVX/NEON/etc. and configure alignment settings +#include "src/Core/util/ConfigureVectorization.h" + +// We need cuda_runtime.h/hip_runtime.h to ensure that +// the EIGEN_USING_STD macro works properly on the device side +#if defined(EIGEN_CUDACC) + #include +#elif defined(EIGEN_HIPCC) + #include +#endif + + +#ifdef EIGEN_EXCEPTIONS + #include +#endif + +// Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3) +// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details. +#if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_LEAST(4,6) && EIGEN_GNUC_AT_MOST(5,5) + #pragma GCC optimize ("-fno-ipa-cp-clone") +#endif + +// Prevent ICC from specializing std::complex operators that silently fail +// on device. This allows us to use our own device-compatible specializations +// instead. +#if defined(EIGEN_COMP_ICC) && defined(EIGEN_GPU_COMPILE_PHASE) \ + && !defined(_OVERRIDE_COMPLEX_SPECIALIZATION_) +#define _OVERRIDE_COMPLEX_SPECIALIZATION_ 1 +#endif +#include + +// this include file manages BLAS and MKL related macros +// and inclusion of their respective header files +#include "src/Core/util/MKL_support.h" + + +#if defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16) + #define EIGEN_HAS_GPU_FP16 +#endif + +#if defined(EIGEN_HAS_CUDA_BF16) || defined(EIGEN_HAS_HIP_BF16) + #define EIGEN_HAS_GPU_BF16 +#endif + +#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE) + #define EIGEN_HAS_OPENMP +#endif + +#ifdef EIGEN_HAS_OPENMP +#include +#endif + +// MSVC for windows mobile does not have the errno.h file +#if !(EIGEN_COMP_MSVC && EIGEN_OS_WINCE) && !EIGEN_COMP_ARM +#define EIGEN_HAS_ERRNO +#endif + +#ifdef EIGEN_HAS_ERRNO +#include +#endif +#include +#include +#include +#include +#include +#include +#ifndef EIGEN_NO_IO + #include +#endif +#include +#include +#include +#include // for CHAR_BIT +// for min/max: +#include + +#if EIGEN_HAS_CXX11 +#include +#endif + +// for std::is_nothrow_move_assignable +#ifdef EIGEN_INCLUDE_TYPE_TRAITS +#include +#endif + +// for outputting debug info +#ifdef EIGEN_DEBUG_ASSIGN +#include +#endif + +// required for __cpuid, needs to be included after cmath +#if EIGEN_COMP_MSVC && EIGEN_ARCH_i386_OR_x86_64 && !EIGEN_OS_WINCE + #include +#endif + +#if defined(EIGEN_USE_SYCL) + #undef min + #undef max + #undef isnan + #undef isinf + #undef isfinite + #include + #include + #include + #include + #include + #ifndef EIGEN_SYCL_LOCAL_THREAD_DIM0 + #define EIGEN_SYCL_LOCAL_THREAD_DIM0 16 + #endif + #ifndef EIGEN_SYCL_LOCAL_THREAD_DIM1 + #define EIGEN_SYCL_LOCAL_THREAD_DIM1 16 + #endif +#endif + + +#if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS || defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API || defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS || defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API || defined EIGEN2_SUPPORT +// This will generate an error message: +#error Eigen2-support is only available up to version 3.2. Please go to "http://eigen.tuxfamily.org/index.php?title=Eigen2" for further information +#endif + +namespace Eigen { + +// we use size_t frequently and we'll never remember to prepend it with std:: every time just to +// ensure QNX/QCC support +using std::size_t; +// gcc 4.6.0 wants std:: for ptrdiff_t +using std::ptrdiff_t; + +} + +/** \defgroup Core_Module Core module + * This is the main module of Eigen providing dense matrix and vector support + * (both fixed and dynamic size) with all the features corresponding to a BLAS library + * and much more... + * + * \code + * #include + * \endcode + */ + +#include "src/Core/util/Constants.h" +#include "src/Core/util/Meta.h" +#include "src/Core/util/ForwardDeclarations.h" +#include "src/Core/util/StaticAssert.h" +#include "src/Core/util/XprHelper.h" +#include "src/Core/util/Memory.h" +#include "src/Core/util/IntegralConstant.h" +#include "src/Core/util/SymbolicIndex.h" + +#include "src/Core/NumTraits.h" +#include "src/Core/MathFunctions.h" +#include "src/Core/GenericPacketMath.h" +#include "src/Core/MathFunctionsImpl.h" +#include "src/Core/arch/Default/ConjHelper.h" +// Generic half float support +#include "src/Core/arch/Default/Half.h" +#include "src/Core/arch/Default/BFloat16.h" +#include "src/Core/arch/Default/TypeCasting.h" +#include "src/Core/arch/Default/GenericPacketMathFunctionsFwd.h" + +#if defined EIGEN_VECTORIZE_AVX512 + #include "src/Core/arch/SSE/PacketMath.h" + #include "src/Core/arch/SSE/TypeCasting.h" + #include "src/Core/arch/SSE/Complex.h" + #include "src/Core/arch/AVX/PacketMath.h" + #include "src/Core/arch/AVX/TypeCasting.h" + #include "src/Core/arch/AVX/Complex.h" + #include "src/Core/arch/AVX512/PacketMath.h" + #include "src/Core/arch/AVX512/TypeCasting.h" + #include "src/Core/arch/AVX512/Complex.h" + #include "src/Core/arch/SSE/MathFunctions.h" + #include "src/Core/arch/AVX/MathFunctions.h" + #include "src/Core/arch/AVX512/MathFunctions.h" +#elif defined EIGEN_VECTORIZE_AVX + // Use AVX for floats and doubles, SSE for integers + #include "src/Core/arch/SSE/PacketMath.h" + #include "src/Core/arch/SSE/TypeCasting.h" + #include "src/Core/arch/SSE/Complex.h" + #include "src/Core/arch/AVX/PacketMath.h" + #include "src/Core/arch/AVX/TypeCasting.h" + #include "src/Core/arch/AVX/Complex.h" + #include "src/Core/arch/SSE/MathFunctions.h" + #include "src/Core/arch/AVX/MathFunctions.h" +#elif defined EIGEN_VECTORIZE_SSE + #include "src/Core/arch/SSE/PacketMath.h" + #include "src/Core/arch/SSE/TypeCasting.h" + #include "src/Core/arch/SSE/MathFunctions.h" + #include "src/Core/arch/SSE/Complex.h" +#elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX) + #include "src/Core/arch/AltiVec/PacketMath.h" + #include "src/Core/arch/AltiVec/MathFunctions.h" + #include "src/Core/arch/AltiVec/Complex.h" +#elif defined EIGEN_VECTORIZE_NEON + #include "src/Core/arch/NEON/PacketMath.h" + #include "src/Core/arch/NEON/TypeCasting.h" + #include "src/Core/arch/NEON/MathFunctions.h" + #include "src/Core/arch/NEON/Complex.h" +#elif defined EIGEN_VECTORIZE_SVE + #include "src/Core/arch/SVE/PacketMath.h" + #include "src/Core/arch/SVE/TypeCasting.h" + #include "src/Core/arch/SVE/MathFunctions.h" +#elif defined EIGEN_VECTORIZE_ZVECTOR + #include "src/Core/arch/ZVector/PacketMath.h" + #include "src/Core/arch/ZVector/MathFunctions.h" + #include "src/Core/arch/ZVector/Complex.h" +#elif defined EIGEN_VECTORIZE_MSA + #include "src/Core/arch/MSA/PacketMath.h" + #include "src/Core/arch/MSA/MathFunctions.h" + #include "src/Core/arch/MSA/Complex.h" +#endif + +#if defined EIGEN_VECTORIZE_GPU + #include "src/Core/arch/GPU/PacketMath.h" + #include "src/Core/arch/GPU/MathFunctions.h" + #include "src/Core/arch/GPU/TypeCasting.h" +#endif + +#if defined(EIGEN_USE_SYCL) + #include "src/Core/arch/SYCL/SyclMemoryModel.h" + #include "src/Core/arch/SYCL/InteropHeaders.h" +#if !defined(EIGEN_DONT_VECTORIZE_SYCL) + #include "src/Core/arch/SYCL/PacketMath.h" + #include "src/Core/arch/SYCL/MathFunctions.h" + #include "src/Core/arch/SYCL/TypeCasting.h" +#endif +#endif + +#include "src/Core/arch/Default/Settings.h" +// This file provides generic implementations valid for scalar as well +#include "src/Core/arch/Default/GenericPacketMathFunctions.h" + +#include "src/Core/functors/TernaryFunctors.h" +#include "src/Core/functors/BinaryFunctors.h" +#include "src/Core/functors/UnaryFunctors.h" +#include "src/Core/functors/NullaryFunctors.h" +#include "src/Core/functors/StlFunctors.h" +#include "src/Core/functors/AssignmentFunctors.h" + +// Specialized functors to enable the processing of complex numbers +// on CUDA devices +#ifdef EIGEN_CUDACC +#include "src/Core/arch/CUDA/Complex.h" +#endif + +#include "src/Core/util/IndexedViewHelper.h" +#include "src/Core/util/ReshapedHelper.h" +#include "src/Core/ArithmeticSequence.h" +#ifndef EIGEN_NO_IO + #include "src/Core/IO.h" +#endif +#include "src/Core/DenseCoeffsBase.h" +#include "src/Core/DenseBase.h" +#include "src/Core/MatrixBase.h" +#include "src/Core/EigenBase.h" + +#include "src/Core/Product.h" +#include "src/Core/CoreEvaluators.h" +#include "src/Core/AssignEvaluator.h" + +#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874 + // at least confirmed with Doxygen 1.5.5 and 1.5.6 + #include "src/Core/Assign.h" +#endif + +#include "src/Core/ArrayBase.h" +#include "src/Core/util/BlasUtil.h" +#include "src/Core/DenseStorage.h" +#include "src/Core/NestByValue.h" + +// #include "src/Core/ForceAlignedAccess.h" + +#include "src/Core/ReturnByValue.h" +#include "src/Core/NoAlias.h" +#include "src/Core/PlainObjectBase.h" +#include "src/Core/Matrix.h" +#include "src/Core/Array.h" +#include "src/Core/CwiseTernaryOp.h" +#include "src/Core/CwiseBinaryOp.h" +#include "src/Core/CwiseUnaryOp.h" +#include "src/Core/CwiseNullaryOp.h" +#include "src/Core/CwiseUnaryView.h" +#include "src/Core/SelfCwiseBinaryOp.h" +#include "src/Core/Dot.h" +#include "src/Core/StableNorm.h" +#include "src/Core/Stride.h" +#include "src/Core/MapBase.h" +#include "src/Core/Map.h" +#include "src/Core/Ref.h" +#include "src/Core/Block.h" +#include "src/Core/VectorBlock.h" +#include "src/Core/IndexedView.h" +#include "src/Core/Reshaped.h" +#include "src/Core/Transpose.h" +#include "src/Core/DiagonalMatrix.h" +#include "src/Core/Diagonal.h" +#include "src/Core/DiagonalProduct.h" +#include "src/Core/Redux.h" +#include "src/Core/Visitor.h" +#include "src/Core/Fuzzy.h" +#include "src/Core/Swap.h" +#include "src/Core/CommaInitializer.h" +#include "src/Core/GeneralProduct.h" +#include "src/Core/Solve.h" +#include "src/Core/Inverse.h" +#include "src/Core/SolverBase.h" +#include "src/Core/PermutationMatrix.h" +#include "src/Core/Transpositions.h" +#include "src/Core/TriangularMatrix.h" +#include "src/Core/SelfAdjointView.h" +#include "src/Core/products/GeneralBlockPanelKernel.h" +#include "src/Core/products/Parallelizer.h" +#include "src/Core/ProductEvaluators.h" +#include "src/Core/products/GeneralMatrixVector.h" +#include "src/Core/products/GeneralMatrixMatrix.h" +#include "src/Core/SolveTriangular.h" +#include "src/Core/products/GeneralMatrixMatrixTriangular.h" +#include "src/Core/products/SelfadjointMatrixVector.h" +#include "src/Core/products/SelfadjointMatrixMatrix.h" +#include "src/Core/products/SelfadjointProduct.h" +#include "src/Core/products/SelfadjointRank2Update.h" +#include "src/Core/products/TriangularMatrixVector.h" +#include "src/Core/products/TriangularMatrixMatrix.h" +#include "src/Core/products/TriangularSolverMatrix.h" +#include "src/Core/products/TriangularSolverVector.h" +#include "src/Core/BandMatrix.h" +#include "src/Core/CoreIterators.h" +#include "src/Core/ConditionEstimator.h" + +#if defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX) + #include "src/Core/arch/AltiVec/MatrixProduct.h" +#elif defined EIGEN_VECTORIZE_NEON + #include "src/Core/arch/NEON/GeneralBlockPanelKernel.h" +#endif + +#include "src/Core/BooleanRedux.h" +#include "src/Core/Select.h" +#include "src/Core/VectorwiseOp.h" +#include "src/Core/PartialReduxEvaluator.h" +#include "src/Core/Random.h" +#include "src/Core/Replicate.h" +#include "src/Core/Reverse.h" +#include "src/Core/ArrayWrapper.h" +#include "src/Core/StlIterators.h" + +#ifdef EIGEN_USE_BLAS +#include "src/Core/products/GeneralMatrixMatrix_BLAS.h" +#include "src/Core/products/GeneralMatrixVector_BLAS.h" +#include "src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h" +#include "src/Core/products/SelfadjointMatrixMatrix_BLAS.h" +#include "src/Core/products/SelfadjointMatrixVector_BLAS.h" +#include "src/Core/products/TriangularMatrixMatrix_BLAS.h" +#include "src/Core/products/TriangularMatrixVector_BLAS.h" +#include "src/Core/products/TriangularSolverMatrix_BLAS.h" +#endif // EIGEN_USE_BLAS + +#ifdef EIGEN_USE_MKL_VML +#include "src/Core/Assign_MKL.h" +#endif + +#include "src/Core/GlobalFunctions.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CORE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/Dense b/src/reference/ai_poc/anomaly_det/Eigen/Dense new file mode 100644 index 000000000..5768910bd --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/Dense @@ -0,0 +1,7 @@ +#include "Core" +#include "LU" +#include "Cholesky" +#include "QR" +#include "SVD" +#include "Geometry" +#include "Eigenvalues" diff --git a/src/reference/ai_poc/anomaly_det/Eigen/Eigen b/src/reference/ai_poc/anomaly_det/Eigen/Eigen new file mode 100644 index 000000000..654c8dc63 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/Eigen @@ -0,0 +1,2 @@ +#include "Dense" +#include "Sparse" diff --git a/src/reference/ai_poc/anomaly_det/Eigen/Eigenvalues b/src/reference/ai_poc/anomaly_det/Eigen/Eigenvalues new file mode 100644 index 000000000..5467a2e7b --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/Eigenvalues @@ -0,0 +1,60 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EIGENVALUES_MODULE_H +#define EIGEN_EIGENVALUES_MODULE_H + +#include "Core" + +#include "Cholesky" +#include "Jacobi" +#include "Householder" +#include "LU" +#include "Geometry" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Eigenvalues_Module Eigenvalues module + * + * + * + * This module mainly provides various eigenvalue solvers. + * This module also provides some MatrixBase methods, including: + * - MatrixBase::eigenvalues(), + * - MatrixBase::operatorNorm() + * + * \code + * #include + * \endcode + */ + +#include "src/misc/RealSvd2x2.h" +#include "src/Eigenvalues/Tridiagonalization.h" +#include "src/Eigenvalues/RealSchur.h" +#include "src/Eigenvalues/EigenSolver.h" +#include "src/Eigenvalues/SelfAdjointEigenSolver.h" +#include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h" +#include "src/Eigenvalues/HessenbergDecomposition.h" +#include "src/Eigenvalues/ComplexSchur.h" +#include "src/Eigenvalues/ComplexEigenSolver.h" +#include "src/Eigenvalues/RealQZ.h" +#include "src/Eigenvalues/GeneralizedEigenSolver.h" +#include "src/Eigenvalues/MatrixBaseEigenvalues.h" +#ifdef EIGEN_USE_LAPACKE +#ifdef EIGEN_USE_MKL +#include "mkl_lapacke.h" +#else +#include "src/misc/lapacke.h" +#endif +#include "src/Eigenvalues/RealSchur_LAPACKE.h" +#include "src/Eigenvalues/ComplexSchur_LAPACKE.h" +#include "src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_EIGENVALUES_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/Geometry b/src/reference/ai_poc/anomaly_det/Eigen/Geometry new file mode 100644 index 000000000..bc78110a8 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/Geometry @@ -0,0 +1,59 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GEOMETRY_MODULE_H +#define EIGEN_GEOMETRY_MODULE_H + +#include "Core" + +#include "SVD" +#include "LU" +#include + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Geometry_Module Geometry module + * + * This module provides support for: + * - fixed-size homogeneous transformations + * - translation, scaling, 2D and 3D rotations + * - \link Quaternion quaternions \endlink + * - cross products (\ref MatrixBase::cross, \ref MatrixBase::cross3) + * - orthognal vector generation (\ref MatrixBase::unitOrthogonal) + * - some linear components: \link ParametrizedLine parametrized-lines \endlink and \link Hyperplane hyperplanes \endlink + * - \link AlignedBox axis aligned bounding boxes \endlink + * - \link umeyama least-square transformation fitting \endlink + * + * \code + * #include + * \endcode + */ + +#include "src/Geometry/OrthoMethods.h" +#include "src/Geometry/EulerAngles.h" + +#include "src/Geometry/Homogeneous.h" +#include "src/Geometry/RotationBase.h" +#include "src/Geometry/Rotation2D.h" +#include "src/Geometry/Quaternion.h" +#include "src/Geometry/AngleAxis.h" +#include "src/Geometry/Transform.h" +#include "src/Geometry/Translation.h" +#include "src/Geometry/Scaling.h" +#include "src/Geometry/Hyperplane.h" +#include "src/Geometry/ParametrizedLine.h" +#include "src/Geometry/AlignedBox.h" +#include "src/Geometry/Umeyama.h" + +// Use the SSE optimized version whenever possible. +#if (defined EIGEN_VECTORIZE_SSE) || (defined EIGEN_VECTORIZE_NEON) +#include "src/Geometry/arch/Geometry_SIMD.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_GEOMETRY_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/Householder b/src/reference/ai_poc/anomaly_det/Eigen/Householder new file mode 100644 index 000000000..f2fa79969 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/Householder @@ -0,0 +1,29 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_HOUSEHOLDER_MODULE_H +#define EIGEN_HOUSEHOLDER_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Householder_Module Householder module + * This module provides Householder transformations. + * + * \code + * #include + * \endcode + */ + +#include "src/Householder/Householder.h" +#include "src/Householder/HouseholderSequence.h" +#include "src/Householder/BlockHouseholder.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_HOUSEHOLDER_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/IterativeLinearSolvers b/src/reference/ai_poc/anomaly_det/Eigen/IterativeLinearSolvers new file mode 100644 index 000000000..957d5750b --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/IterativeLinearSolvers @@ -0,0 +1,48 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ITERATIVELINEARSOLVERS_MODULE_H +#define EIGEN_ITERATIVELINEARSOLVERS_MODULE_H + +#include "SparseCore" +#include "OrderingMethods" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** + * \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module + * + * This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a squared matrix, usually very large and sparse. + * Those solvers are accessible via the following classes: + * - ConjugateGradient for selfadjoint (hermitian) matrices, + * - LeastSquaresConjugateGradient for rectangular least-square problems, + * - BiCGSTAB for general square matrices. + * + * These iterative solvers are associated with some preconditioners: + * - IdentityPreconditioner - not really useful + * - DiagonalPreconditioner - also called Jacobi preconditioner, work very well on diagonal dominant matrices. + * - IncompleteLUT - incomplete LU factorization with dual thresholding + * + * Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, UmfPackSupport, SuperLUSupport. + * + \code + #include + \endcode + */ + +#include "src/IterativeLinearSolvers/SolveWithGuess.h" +#include "src/IterativeLinearSolvers/IterativeSolverBase.h" +#include "src/IterativeLinearSolvers/BasicPreconditioners.h" +#include "src/IterativeLinearSolvers/ConjugateGradient.h" +#include "src/IterativeLinearSolvers/LeastSquareConjugateGradient.h" +#include "src/IterativeLinearSolvers/BiCGSTAB.h" +#include "src/IterativeLinearSolvers/IncompleteLUT.h" +#include "src/IterativeLinearSolvers/IncompleteCholesky.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ITERATIVELINEARSOLVERS_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/Jacobi b/src/reference/ai_poc/anomaly_det/Eigen/Jacobi new file mode 100644 index 000000000..43edc7a19 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/Jacobi @@ -0,0 +1,32 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_JACOBI_MODULE_H +#define EIGEN_JACOBI_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Jacobi_Module Jacobi module + * This module provides Jacobi and Givens rotations. + * + * \code + * #include + * \endcode + * + * In addition to listed classes, it defines the two following MatrixBase methods to apply a Jacobi or Givens rotation: + * - MatrixBase::applyOnTheLeft() + * - MatrixBase::applyOnTheRight(). + */ + +#include "src/Jacobi/Jacobi.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_JACOBI_MODULE_H + diff --git a/src/reference/ai_poc/anomaly_det/Eigen/KLUSupport b/src/reference/ai_poc/anomaly_det/Eigen/KLUSupport new file mode 100644 index 000000000..b23d90535 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/KLUSupport @@ -0,0 +1,41 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_KLUSUPPORT_MODULE_H +#define EIGEN_KLUSUPPORT_MODULE_H + +#include + +#include + +extern "C" { +#include +#include + } + +/** \ingroup Support_modules + * \defgroup KLUSupport_Module KLUSupport module + * + * This module provides an interface to the KLU library which is part of the suitesparse package. + * It provides the following factorization class: + * - class KLU: a sparse LU factorization, well-suited for circuit simulation. + * + * \code + * #include + * \endcode + * + * In order to use this module, the klu and btf headers must be accessible from the include paths, and your binary must be linked to the klu library and its dependencies. + * The dependencies depend on how umfpack has been compiled. + * For a cmake based project, you can use our FindKLU.cmake module to help you in this task. + * + */ + +#include "src/KLUSupport/KLUSupport.h" + +#include + +#endif // EIGEN_KLUSUPPORT_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/LU b/src/reference/ai_poc/anomaly_det/Eigen/LU new file mode 100644 index 000000000..1236ceb04 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/LU @@ -0,0 +1,47 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LU_MODULE_H +#define EIGEN_LU_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup LU_Module LU module + * This module includes %LU decomposition and related notions such as matrix inversion and determinant. + * This module defines the following MatrixBase methods: + * - MatrixBase::inverse() + * - MatrixBase::determinant() + * + * \code + * #include + * \endcode + */ + +#include "src/misc/Kernel.h" +#include "src/misc/Image.h" +#include "src/LU/FullPivLU.h" +#include "src/LU/PartialPivLU.h" +#ifdef EIGEN_USE_LAPACKE +#ifdef EIGEN_USE_MKL +#include "mkl_lapacke.h" +#else +#include "src/misc/lapacke.h" +#endif +#include "src/LU/PartialPivLU_LAPACKE.h" +#endif +#include "src/LU/Determinant.h" +#include "src/LU/InverseImpl.h" + +#if defined EIGEN_VECTORIZE_SSE || defined EIGEN_VECTORIZE_NEON + #include "src/LU/arch/InverseSize4.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_LU_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/MetisSupport b/src/reference/ai_poc/anomaly_det/Eigen/MetisSupport new file mode 100644 index 000000000..85c41bf34 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/MetisSupport @@ -0,0 +1,35 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_METISSUPPORT_MODULE_H +#define EIGEN_METISSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { +#include +} + + +/** \ingroup Support_modules + * \defgroup MetisSupport_Module MetisSupport module + * + * \code + * #include + * \endcode + * This module defines an interface to the METIS reordering package (http://glaros.dtc.umn.edu/gkhome/views/metis). + * It can be used just as any other built-in method as explained in \link OrderingMethods_Module here. \endlink + */ + + +#include "src/MetisSupport/MetisSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_METISSUPPORT_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/OrderingMethods b/src/reference/ai_poc/anomaly_det/Eigen/OrderingMethods new file mode 100644 index 000000000..29691a62b --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/OrderingMethods @@ -0,0 +1,70 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ORDERINGMETHODS_MODULE_H +#define EIGEN_ORDERINGMETHODS_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** + * \defgroup OrderingMethods_Module OrderingMethods module + * + * This module is currently for internal use only + * + * It defines various built-in and external ordering methods for sparse matrices. + * They are typically used to reduce the number of elements during + * the sparse matrix decomposition (LLT, LU, QR). + * Precisely, in a preprocessing step, a permutation matrix P is computed using + * those ordering methods and applied to the columns of the matrix. + * Using for instance the sparse Cholesky decomposition, it is expected that + * the nonzeros elements in LLT(A*P) will be much smaller than that in LLT(A). + * + * + * Usage : + * \code + * #include + * \endcode + * + * A simple usage is as a template parameter in the sparse decomposition classes : + * + * \code + * SparseLU > solver; + * \endcode + * + * \code + * SparseQR > solver; + * \endcode + * + * It is possible as well to call directly a particular ordering method for your own purpose, + * \code + * AMDOrdering ordering; + * PermutationMatrix perm; + * SparseMatrix A; + * //Fill the matrix ... + * + * ordering(A, perm); // Call AMD + * \endcode + * + * \note Some of these methods (like AMD or METIS), need the sparsity pattern + * of the input matrix to be symmetric. When the matrix is structurally unsymmetric, + * Eigen computes internally the pattern of \f$A^T*A\f$ before calling the method. + * If your matrix is already symmetric (at leat in structure), you can avoid that + * by calling the method with a SelfAdjointView type. + * + * \code + * // Call the ordering on the pattern of the lower triangular matrix A + * ordering(A.selfadjointView(), perm); + * \endcode + */ + +#include "src/OrderingMethods/Amd.h" +#include "src/OrderingMethods/Ordering.h" +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ORDERINGMETHODS_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/PaStiXSupport b/src/reference/ai_poc/anomaly_det/Eigen/PaStiXSupport new file mode 100644 index 000000000..234619acc --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/PaStiXSupport @@ -0,0 +1,49 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PASTIXSUPPORT_MODULE_H +#define EIGEN_PASTIXSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { +#include +#include +} + +#ifdef complex +#undef complex +#endif + +/** \ingroup Support_modules + * \defgroup PaStiXSupport_Module PaStiXSupport module + * + * This module provides an interface to the PaSTiX library. + * PaSTiX is a general \b supernodal, \b parallel and \b opensource sparse solver. + * It provides the two following main factorization classes: + * - class PastixLLT : a supernodal, parallel LLt Cholesky factorization. + * - class PastixLDLT: a supernodal, parallel LDLt Cholesky factorization. + * - class PastixLU : a supernodal, parallel LU factorization (optimized for a symmetric pattern). + * + * \code + * #include + * \endcode + * + * In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be linked to the PaSTiX library and its dependencies. + * This wrapper resuires PaStiX version 5.x compiled without MPI support. + * The dependencies depend on how PaSTiX has been compiled. + * For a cmake based project, you can use our FindPaSTiX.cmake module to help you in this task. + * + */ + +#include "src/PaStiXSupport/PaStiXSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_PASTIXSUPPORT_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/PardisoSupport b/src/reference/ai_poc/anomaly_det/Eigen/PardisoSupport new file mode 100644 index 000000000..340edf51f --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/PardisoSupport @@ -0,0 +1,35 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PARDISOSUPPORT_MODULE_H +#define EIGEN_PARDISOSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include + +/** \ingroup Support_modules + * \defgroup PardisoSupport_Module PardisoSupport module + * + * This module brings support for the Intel(R) MKL PARDISO direct sparse solvers. + * + * \code + * #include + * \endcode + * + * In order to use this module, the MKL headers must be accessible from the include paths, and your binary must be linked to the MKL library and its dependencies. + * See this \ref TopicUsingIntelMKL "page" for more information on MKL-Eigen integration. + * + */ + +#include "src/PardisoSupport/PardisoSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_PARDISOSUPPORT_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/QR b/src/reference/ai_poc/anomaly_det/Eigen/QR new file mode 100644 index 000000000..8465b62ce --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/QR @@ -0,0 +1,50 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_QR_MODULE_H +#define EIGEN_QR_MODULE_H + +#include "Core" + +#include "Cholesky" +#include "Jacobi" +#include "Householder" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup QR_Module QR module + * + * + * + * This module provides various QR decompositions + * This module also provides some MatrixBase methods, including: + * - MatrixBase::householderQr() + * - MatrixBase::colPivHouseholderQr() + * - MatrixBase::fullPivHouseholderQr() + * + * \code + * #include + * \endcode + */ + +#include "src/QR/HouseholderQR.h" +#include "src/QR/FullPivHouseholderQR.h" +#include "src/QR/ColPivHouseholderQR.h" +#include "src/QR/CompleteOrthogonalDecomposition.h" +#ifdef EIGEN_USE_LAPACKE +#ifdef EIGEN_USE_MKL +#include "mkl_lapacke.h" +#else +#include "src/misc/lapacke.h" +#endif +#include "src/QR/HouseholderQR_LAPACKE.h" +#include "src/QR/ColPivHouseholderQR_LAPACKE.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_QR_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/QtAlignedMalloc b/src/reference/ai_poc/anomaly_det/Eigen/QtAlignedMalloc new file mode 100644 index 000000000..6fe82374a --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/QtAlignedMalloc @@ -0,0 +1,39 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_QTMALLOC_MODULE_H +#define EIGEN_QTMALLOC_MODULE_H + +#include "Core" + +#if (!EIGEN_MALLOC_ALREADY_ALIGNED) + +#include "src/Core/util/DisableStupidWarnings.h" + +void *qMalloc(std::size_t size) +{ + return Eigen::internal::aligned_malloc(size); +} + +void qFree(void *ptr) +{ + Eigen::internal::aligned_free(ptr); +} + +void *qRealloc(void *ptr, std::size_t size) +{ + void* newPtr = Eigen::internal::aligned_malloc(size); + std::memcpy(newPtr, ptr, size); + Eigen::internal::aligned_free(ptr); + return newPtr; +} + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif + +#endif // EIGEN_QTMALLOC_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/SPQRSupport b/src/reference/ai_poc/anomaly_det/Eigen/SPQRSupport new file mode 100644 index 000000000..f70390c17 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/SPQRSupport @@ -0,0 +1,34 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPQRSUPPORT_MODULE_H +#define EIGEN_SPQRSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include "SuiteSparseQR.hpp" + +/** \ingroup Support_modules + * \defgroup SPQRSupport_Module SuiteSparseQR module + * + * This module provides an interface to the SPQR library, which is part of the suitesparse package. + * + * \code + * #include + * \endcode + * + * In order to use this module, the SPQR headers must be accessible from the include paths, and your binary must be linked to the SPQR library and its dependencies (Cholmod, AMD, COLAMD,...). + * For a cmake based project, you can use our FindSPQR.cmake and FindCholmod.Cmake modules + * + */ + +#include "src/CholmodSupport/CholmodSupport.h" +#include "src/SPQRSupport/SuiteSparseQRSupport.h" + +#endif diff --git a/src/reference/ai_poc/anomaly_det/Eigen/SVD b/src/reference/ai_poc/anomaly_det/Eigen/SVD new file mode 100644 index 000000000..345179496 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/SVD @@ -0,0 +1,50 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SVD_MODULE_H +#define EIGEN_SVD_MODULE_H + +#include "QR" +#include "Householder" +#include "Jacobi" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup SVD_Module SVD module + * + * + * + * This module provides SVD decomposition for matrices (both real and complex). + * Two decomposition algorithms are provided: + * - JacobiSVD implementing two-sided Jacobi iterations is numerically very accurate, fast for small matrices, but very slow for larger ones. + * - BDCSVD implementing a recursive divide & conquer strategy on top of an upper-bidiagonalization which remains fast for large problems. + * These decompositions are accessible via the respective classes and following MatrixBase methods: + * - MatrixBase::jacobiSvd() + * - MatrixBase::bdcSvd() + * + * \code + * #include + * \endcode + */ + +#include "src/misc/RealSvd2x2.h" +#include "src/SVD/UpperBidiagonalization.h" +#include "src/SVD/SVDBase.h" +#include "src/SVD/JacobiSVD.h" +#include "src/SVD/BDCSVD.h" +#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT) +#ifdef EIGEN_USE_MKL +#include "mkl_lapacke.h" +#else +#include "src/misc/lapacke.h" +#endif +#include "src/SVD/JacobiSVD_LAPACKE.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SVD_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/Sparse b/src/reference/ai_poc/anomaly_det/Eigen/Sparse new file mode 100644 index 000000000..a2ef7a665 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/Sparse @@ -0,0 +1,34 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_MODULE_H +#define EIGEN_SPARSE_MODULE_H + +/** \defgroup Sparse_Module Sparse meta-module + * + * Meta-module including all related modules: + * - \ref SparseCore_Module + * - \ref OrderingMethods_Module + * - \ref SparseCholesky_Module + * - \ref SparseLU_Module + * - \ref SparseQR_Module + * - \ref IterativeLinearSolvers_Module + * + \code + #include + \endcode + */ + +#include "SparseCore" +#include "OrderingMethods" +#include "SparseCholesky" +#include "SparseLU" +#include "SparseQR" +#include "IterativeLinearSolvers" + +#endif // EIGEN_SPARSE_MODULE_H + diff --git a/src/reference/ai_poc/anomaly_det/Eigen/SparseCholesky b/src/reference/ai_poc/anomaly_det/Eigen/SparseCholesky new file mode 100644 index 000000000..d2b1f1276 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/SparseCholesky @@ -0,0 +1,37 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2013 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSECHOLESKY_MODULE_H +#define EIGEN_SPARSECHOLESKY_MODULE_H + +#include "SparseCore" +#include "OrderingMethods" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** + * \defgroup SparseCholesky_Module SparseCholesky module + * + * This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) matrices. + * Those decompositions are accessible via the following classes: + * - SimplicialLLt, + * - SimplicialLDLt + * + * Such problems can also be solved using the ConjugateGradient solver from the IterativeLinearSolvers module. + * + * \code + * #include + * \endcode + */ + +#include "src/SparseCholesky/SimplicialCholesky.h" +#include "src/SparseCholesky/SimplicialCholesky_impl.h" +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSECHOLESKY_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/SparseCore b/src/reference/ai_poc/anomaly_det/Eigen/SparseCore new file mode 100644 index 000000000..76966c4c4 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/SparseCore @@ -0,0 +1,69 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSECORE_MODULE_H +#define EIGEN_SPARSECORE_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include +#include +#include +#include +#include + +/** + * \defgroup SparseCore_Module SparseCore module + * + * This module provides a sparse matrix representation, and basic associated matrix manipulations + * and operations. + * + * See the \ref TutorialSparse "Sparse tutorial" + * + * \code + * #include + * \endcode + * + * This module depends on: Core. + */ + +#include "src/SparseCore/SparseUtil.h" +#include "src/SparseCore/SparseMatrixBase.h" +#include "src/SparseCore/SparseAssign.h" +#include "src/SparseCore/CompressedStorage.h" +#include "src/SparseCore/AmbiVector.h" +#include "src/SparseCore/SparseCompressedBase.h" +#include "src/SparseCore/SparseMatrix.h" +#include "src/SparseCore/SparseMap.h" +#include "src/SparseCore/MappedSparseMatrix.h" +#include "src/SparseCore/SparseVector.h" +#include "src/SparseCore/SparseRef.h" +#include "src/SparseCore/SparseCwiseUnaryOp.h" +#include "src/SparseCore/SparseCwiseBinaryOp.h" +#include "src/SparseCore/SparseTranspose.h" +#include "src/SparseCore/SparseBlock.h" +#include "src/SparseCore/SparseDot.h" +#include "src/SparseCore/SparseRedux.h" +#include "src/SparseCore/SparseView.h" +#include "src/SparseCore/SparseDiagonalProduct.h" +#include "src/SparseCore/ConservativeSparseSparseProduct.h" +#include "src/SparseCore/SparseSparseProductWithPruning.h" +#include "src/SparseCore/SparseProduct.h" +#include "src/SparseCore/SparseDenseProduct.h" +#include "src/SparseCore/SparseSelfAdjointView.h" +#include "src/SparseCore/SparseTriangularView.h" +#include "src/SparseCore/TriangularSolver.h" +#include "src/SparseCore/SparsePermutation.h" +#include "src/SparseCore/SparseFuzzy.h" +#include "src/SparseCore/SparseSolverBase.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSECORE_MODULE_H + diff --git a/src/reference/ai_poc/anomaly_det/Eigen/SparseLU b/src/reference/ai_poc/anomaly_det/Eigen/SparseLU new file mode 100644 index 000000000..37c4a5c5a --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/SparseLU @@ -0,0 +1,50 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSELU_MODULE_H +#define EIGEN_SPARSELU_MODULE_H + +#include "SparseCore" + +/** + * \defgroup SparseLU_Module SparseLU module + * This module defines a supernodal factorization of general sparse matrices. + * The code is fully optimized for supernode-panel updates with specialized kernels. + * Please, see the documentation of the SparseLU class for more details. + */ + +// Ordering interface +#include "OrderingMethods" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include "src/SparseLU/SparseLU_gemm_kernel.h" + +#include "src/SparseLU/SparseLU_Structs.h" +#include "src/SparseLU/SparseLU_SupernodalMatrix.h" +#include "src/SparseLU/SparseLUImpl.h" +#include "src/SparseCore/SparseColEtree.h" +#include "src/SparseLU/SparseLU_Memory.h" +#include "src/SparseLU/SparseLU_heap_relax_snode.h" +#include "src/SparseLU/SparseLU_relax_snode.h" +#include "src/SparseLU/SparseLU_pivotL.h" +#include "src/SparseLU/SparseLU_panel_dfs.h" +#include "src/SparseLU/SparseLU_kernel_bmod.h" +#include "src/SparseLU/SparseLU_panel_bmod.h" +#include "src/SparseLU/SparseLU_column_dfs.h" +#include "src/SparseLU/SparseLU_column_bmod.h" +#include "src/SparseLU/SparseLU_copy_to_ucol.h" +#include "src/SparseLU/SparseLU_pruneL.h" +#include "src/SparseLU/SparseLU_Utils.h" +#include "src/SparseLU/SparseLU.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSELU_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/SparseQR b/src/reference/ai_poc/anomaly_det/Eigen/SparseQR new file mode 100644 index 000000000..f5fc5fa7f --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/SparseQR @@ -0,0 +1,36 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEQR_MODULE_H +#define EIGEN_SPARSEQR_MODULE_H + +#include "SparseCore" +#include "OrderingMethods" +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup SparseQR_Module SparseQR module + * \brief Provides QR decomposition for sparse matrices + * + * This module provides a simplicial version of the left-looking Sparse QR decomposition. + * The columns of the input matrix should be reordered to limit the fill-in during the + * decomposition. Built-in methods (COLAMD, AMD) or external methods (METIS) can be used to this end. + * See the \link OrderingMethods_Module OrderingMethods\endlink module for the list + * of built-in and external ordering methods. + * + * \code + * #include + * \endcode + * + * + */ + +#include "src/SparseCore/SparseColEtree.h" +#include "src/SparseQR/SparseQR.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif diff --git a/src/reference/ai_poc/anomaly_det/Eigen/StdDeque b/src/reference/ai_poc/anomaly_det/Eigen/StdDeque new file mode 100644 index 000000000..bc68397be --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/StdDeque @@ -0,0 +1,27 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDDEQUE_MODULE_H +#define EIGEN_STDDEQUE_MODULE_H + +#include "Core" +#include + +#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */ + +#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) + +#else + +#include "src/StlSupport/StdDeque.h" + +#endif + +#endif // EIGEN_STDDEQUE_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/StdList b/src/reference/ai_poc/anomaly_det/Eigen/StdList new file mode 100644 index 000000000..4c6262c08 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/StdList @@ -0,0 +1,26 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDLIST_MODULE_H +#define EIGEN_STDLIST_MODULE_H + +#include "Core" +#include + +#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */ + +#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) + +#else + +#include "src/StlSupport/StdList.h" + +#endif + +#endif // EIGEN_STDLIST_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/StdVector b/src/reference/ai_poc/anomaly_det/Eigen/StdVector new file mode 100644 index 000000000..0c4697ad5 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/StdVector @@ -0,0 +1,27 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDVECTOR_MODULE_H +#define EIGEN_STDVECTOR_MODULE_H + +#include "Core" +#include + +#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */ + +#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) + +#else + +#include "src/StlSupport/StdVector.h" + +#endif + +#endif // EIGEN_STDVECTOR_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/SuperLUSupport b/src/reference/ai_poc/anomaly_det/Eigen/SuperLUSupport new file mode 100644 index 000000000..59312a82d --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/SuperLUSupport @@ -0,0 +1,64 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H +#define EIGEN_SUPERLUSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#ifdef EMPTY +#define EIGEN_EMPTY_WAS_ALREADY_DEFINED +#endif + +typedef int int_t; +#include +#include +#include + +// slu_util.h defines a preprocessor token named EMPTY which is really polluting, +// so we remove it in favor of a SUPERLU_EMPTY token. +// If EMPTY was already defined then we don't undef it. + +#if defined(EIGEN_EMPTY_WAS_ALREADY_DEFINED) +# undef EIGEN_EMPTY_WAS_ALREADY_DEFINED +#elif defined(EMPTY) +# undef EMPTY +#endif + +#define SUPERLU_EMPTY (-1) + +namespace Eigen { struct SluMatrix; } + +/** \ingroup Support_modules + * \defgroup SuperLUSupport_Module SuperLUSupport module + * + * This module provides an interface to the SuperLU library. + * It provides the following factorization class: + * - class SuperLU: a supernodal sequential LU factorization. + * - class SuperILU: a supernodal sequential incomplete LU factorization (to be used as a preconditioner for iterative methods). + * + * \warning This wrapper requires at least versions 4.0 of SuperLU. The 3.x versions are not supported. + * + * \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined because it is too polluting. + * + * \code + * #include + * \endcode + * + * In order to use this module, the superlu headers must be accessible from the include paths, and your binary must be linked to the superlu library and its dependencies. + * The dependencies depend on how superlu has been compiled. + * For a cmake based project, you can use our FindSuperLU.cmake module to help you in this task. + * + */ + +#include "src/SuperLUSupport/SuperLUSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SUPERLUSUPPORT_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/UmfPackSupport b/src/reference/ai_poc/anomaly_det/Eigen/UmfPackSupport new file mode 100644 index 000000000..00eec8087 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/UmfPackSupport @@ -0,0 +1,40 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_UMFPACKSUPPORT_MODULE_H +#define EIGEN_UMFPACKSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { +#include +} + +/** \ingroup Support_modules + * \defgroup UmfPackSupport_Module UmfPackSupport module + * + * This module provides an interface to the UmfPack library which is part of the suitesparse package. + * It provides the following factorization class: + * - class UmfPackLU: a multifrontal sequential LU factorization. + * + * \code + * #include + * \endcode + * + * In order to use this module, the umfpack headers must be accessible from the include paths, and your binary must be linked to the umfpack library and its dependencies. + * The dependencies depend on how umfpack has been compiled. + * For a cmake based project, you can use our FindUmfPack.cmake module to help you in this task. + * + */ + +#include "src/UmfPackSupport/UmfPackSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_UMFPACKSUPPORT_MODULE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Cholesky/LDLT.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Cholesky/LDLT.h new file mode 100644 index 000000000..1013ca045 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Cholesky/LDLT.h @@ -0,0 +1,688 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// Copyright (C) 2009 Keir Mierle +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2011 Timothy E. Holy +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LDLT_H +#define EIGEN_LDLT_H + +namespace Eigen { + +namespace internal { + template struct traits > + : traits<_MatrixType> + { + typedef MatrixXpr XprKind; + typedef SolverStorage StorageKind; + typedef int StorageIndex; + enum { Flags = 0 }; + }; + + template struct LDLT_Traits; + + // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef + enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite }; +} + +/** \ingroup Cholesky_Module + * + * \class LDLT + * + * \brief Robust Cholesky decomposition of a matrix with pivoting + * + * \tparam _MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition + * \tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. + * The other triangular part won't be read. + * + * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite + * matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L + * is lower triangular with a unit diagonal and D is a diagonal matrix. + * + * The decomposition uses pivoting to ensure stability, so that D will have + * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root + * on D also stabilizes the computation. + * + * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky + * decomposition to determine whether a system of equations has a solution. + * + * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. + * + * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT + */ +template class LDLT + : public SolverBase > +{ + public: + typedef _MatrixType MatrixType; + typedef SolverBase Base; + friend class SolverBase; + + EIGEN_GENERIC_PUBLIC_INTERFACE(LDLT) + enum { + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + UpLo = _UpLo + }; + typedef Matrix TmpMatrixType; + + typedef Transpositions TranspositionType; + typedef PermutationMatrix PermutationType; + + typedef internal::LDLT_Traits Traits; + + /** \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via LDLT::compute(const MatrixType&). + */ + LDLT() + : m_matrix(), + m_transpositions(), + m_sign(internal::ZeroSign), + m_isInitialized(false) + {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa LDLT() + */ + explicit LDLT(Index size) + : m_matrix(size, size), + m_transpositions(size), + m_temporary(size), + m_sign(internal::ZeroSign), + m_isInitialized(false) + {} + + /** \brief Constructor with decomposition + * + * This calculates the decomposition for the input \a matrix. + * + * \sa LDLT(Index size) + */ + template + explicit LDLT(const EigenBase& matrix) + : m_matrix(matrix.rows(), matrix.cols()), + m_transpositions(matrix.rows()), + m_temporary(matrix.rows()), + m_sign(internal::ZeroSign), + m_isInitialized(false) + { + compute(matrix.derived()); + } + + /** \brief Constructs a LDLT factorization from a given matrix + * + * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. + * + * \sa LDLT(const EigenBase&) + */ + template + explicit LDLT(EigenBase& matrix) + : m_matrix(matrix.derived()), + m_transpositions(matrix.rows()), + m_temporary(matrix.rows()), + m_sign(internal::ZeroSign), + m_isInitialized(false) + { + compute(matrix.derived()); + } + + /** Clear any existing decomposition + * \sa rankUpdate(w,sigma) + */ + void setZero() + { + m_isInitialized = false; + } + + /** \returns a view of the upper triangular matrix U */ + inline typename Traits::MatrixU matrixU() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return Traits::getU(m_matrix); + } + + /** \returns a view of the lower triangular matrix L */ + inline typename Traits::MatrixL matrixL() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return Traits::getL(m_matrix); + } + + /** \returns the permutation matrix P as a transposition sequence. + */ + inline const TranspositionType& transpositionsP() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_transpositions; + } + + /** \returns the coefficients of the diagonal matrix D */ + inline Diagonal vectorD() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_matrix.diagonal(); + } + + /** \returns true if the matrix is positive (semidefinite) */ + inline bool isPositive() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign; + } + + /** \returns true if the matrix is negative (semidefinite) */ + inline bool isNegative(void) const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign; + } + + #ifdef EIGEN_PARSED_BY_DOXYGEN + /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * This function also supports in-place solves using the syntax x = decompositionObject.solve(x) . + * + * \note_about_checking_solutions + * + * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$ + * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, + * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then + * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the + * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function + * computes the least-square solution of \f$ A x = b \f$ if \f$ A \f$ is singular. + * + * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt() + */ + template + inline const Solve + solve(const MatrixBase& b) const; + #endif + + template + bool solveInPlace(MatrixBase &bAndX) const; + + template + LDLT& compute(const EigenBase& matrix); + + /** \returns an estimate of the reciprocal condition number of the matrix of + * which \c *this is the LDLT decomposition. + */ + RealScalar rcond() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return internal::rcond_estimate_helper(m_l1_norm, *this); + } + + template + LDLT& rankUpdate(const MatrixBase& w, const RealScalar& alpha=1); + + /** \returns the internal LDLT decomposition matrix + * + * TODO: document the storage layout + */ + inline const MatrixType& matrixLDLT() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_matrix; + } + + MatrixType reconstructedMatrix() const; + + /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint. + * + * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as: + * \code x = decomposition.adjoint().solve(b) \endcode + */ + const LDLT& adjoint() const { return *this; }; + + EIGEN_DEVICE_FUNC inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } + EIGEN_DEVICE_FUNC inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the factorization failed because of a zero pivot. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_info; + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + void _solve_impl(const RhsType &rhs, DstType &dst) const; + + template + void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const; + #endif + + protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + /** \internal + * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U. + * The strict upper part is used during the decomposition, the strict lower + * part correspond to the coefficients of L (its diagonal is equal to 1 and + * is not stored), and the diagonal entries correspond to D. + */ + MatrixType m_matrix; + RealScalar m_l1_norm; + TranspositionType m_transpositions; + TmpMatrixType m_temporary; + internal::SignMatrix m_sign; + bool m_isInitialized; + ComputationInfo m_info; +}; + +namespace internal { + +template struct ldlt_inplace; + +template<> struct ldlt_inplace +{ + template + static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) + { + using std::abs; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename TranspositionType::StorageIndex IndexType; + eigen_assert(mat.rows()==mat.cols()); + const Index size = mat.rows(); + bool found_zero_pivot = false; + bool ret = true; + + if (size <= 1) + { + transpositions.setIdentity(); + if(size==0) sign = ZeroSign; + else if (numext::real(mat.coeff(0,0)) > static_cast(0) ) sign = PositiveSemiDef; + else if (numext::real(mat.coeff(0,0)) < static_cast(0)) sign = NegativeSemiDef; + else sign = ZeroSign; + return true; + } + + for (Index k = 0; k < size; ++k) + { + // Find largest diagonal element + Index index_of_biggest_in_corner; + mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); + index_of_biggest_in_corner += k; + + transpositions.coeffRef(k) = IndexType(index_of_biggest_in_corner); + if(k != index_of_biggest_in_corner) + { + // apply the transposition while taking care to consider only + // the lower triangular part + Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element + mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k)); + mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s)); + std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner)); + for(Index i=k+1;i::IsComplex) + mat.coeffRef(index_of_biggest_in_corner,k) = numext::conj(mat.coeff(index_of_biggest_in_corner,k)); + } + + // partition the matrix: + // A00 | - | - + // lu = A10 | A11 | - + // A20 | A21 | A22 + Index rs = size - k - 1; + Block A21(mat,k+1,k,rs,1); + Block A10(mat,k,0,1,k); + Block A20(mat,k+1,0,rs,k); + + if(k>0) + { + temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint(); + mat.coeffRef(k,k) -= (A10 * temp.head(k)).value(); + if(rs>0) + A21.noalias() -= A20 * temp.head(k); + } + + // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot + // was smaller than the cutoff value. However, since LDLT is not rank-revealing + // we should only make sure that we do not introduce INF or NaN values. + // Remark that LAPACK also uses 0 as the cutoff value. + RealScalar realAkk = numext::real(mat.coeffRef(k,k)); + bool pivot_is_valid = (abs(realAkk) > RealScalar(0)); + + if(k==0 && !pivot_is_valid) + { + // The entire diagonal is zero, there is nothing more to do + // except filling the transpositions, and checking whether the matrix is zero. + sign = ZeroSign; + for(Index j = 0; j0) && pivot_is_valid) + A21 /= realAkk; + else if(rs>0) + ret = ret && (A21.array()==Scalar(0)).all(); + + if(found_zero_pivot && pivot_is_valid) ret = false; // factorization failed + else if(!pivot_is_valid) found_zero_pivot = true; + + if (sign == PositiveSemiDef) { + if (realAkk < static_cast(0)) sign = Indefinite; + } else if (sign == NegativeSemiDef) { + if (realAkk > static_cast(0)) sign = Indefinite; + } else if (sign == ZeroSign) { + if (realAkk > static_cast(0)) sign = PositiveSemiDef; + else if (realAkk < static_cast(0)) sign = NegativeSemiDef; + } + } + + return ret; + } + + // Reference for the algorithm: Davis and Hager, "Multiple Rank + // Modifications of a Sparse Cholesky Factorization" (Algorithm 1) + // Trivial rearrangements of their computations (Timothy E. Holy) + // allow their algorithm to work for rank-1 updates even if the + // original matrix is not of full rank. + // Here only rank-1 updates are implemented, to reduce the + // requirement for intermediate storage and improve accuracy + template + static bool updateInPlace(MatrixType& mat, MatrixBase& w, const typename MatrixType::RealScalar& sigma=1) + { + using numext::isfinite; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + + const Index size = mat.rows(); + eigen_assert(mat.cols() == size && w.size()==size); + + RealScalar alpha = 1; + + // Apply the update + for (Index j = 0; j < size; j++) + { + // Check for termination due to an original decomposition of low-rank + if (!(isfinite)(alpha)) + break; + + // Update the diagonal terms + RealScalar dj = numext::real(mat.coeff(j,j)); + Scalar wj = w.coeff(j); + RealScalar swj2 = sigma*numext::abs2(wj); + RealScalar gamma = dj*alpha + swj2; + + mat.coeffRef(j,j) += swj2/alpha; + alpha += swj2/dj; + + + // Update the terms of L + Index rs = size-j-1; + w.tail(rs) -= wj * mat.col(j).tail(rs); + if(gamma != 0) + mat.col(j).tail(rs) += (sigma*numext::conj(wj)/gamma)*w.tail(rs); + } + return true; + } + + template + static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, const typename MatrixType::RealScalar& sigma=1) + { + // Apply the permutation to the input w + tmp = transpositions * w; + + return ldlt_inplace::updateInPlace(mat,tmp,sigma); + } +}; + +template<> struct ldlt_inplace +{ + template + static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) + { + Transpose matt(mat); + return ldlt_inplace::unblocked(matt, transpositions, temp, sign); + } + + template + static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, const typename MatrixType::RealScalar& sigma=1) + { + Transpose matt(mat); + return ldlt_inplace::update(matt, transpositions, tmp, w.conjugate(), sigma); + } +}; + +template struct LDLT_Traits +{ + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } + static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } +}; + +template struct LDLT_Traits +{ + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); } + static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); } +}; + +} // end namespace internal + +/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix + */ +template +template +LDLT& LDLT::compute(const EigenBase& a) +{ + check_template_parameters(); + + eigen_assert(a.rows()==a.cols()); + const Index size = a.rows(); + + m_matrix = a.derived(); + + // Compute matrix L1 norm = max abs column sum. + m_l1_norm = RealScalar(0); + // TODO move this code to SelfAdjointView + for (Index col = 0; col < size; ++col) { + RealScalar abs_col_sum; + if (_UpLo == Lower) + abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>(); + else + abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>(); + if (abs_col_sum > m_l1_norm) + m_l1_norm = abs_col_sum; + } + + m_transpositions.resize(size); + m_isInitialized = false; + m_temporary.resize(size); + m_sign = internal::ZeroSign; + + m_info = internal::ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, m_sign) ? Success : NumericalIssue; + + m_isInitialized = true; + return *this; +} + +/** Update the LDLT decomposition: given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T. + * \param w a vector to be incorporated into the decomposition. + * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column vectors. Optional; default value is +1. + * \sa setZero() + */ +template +template +LDLT& LDLT::rankUpdate(const MatrixBase& w, const typename LDLT::RealScalar& sigma) +{ + typedef typename TranspositionType::StorageIndex IndexType; + const Index size = w.rows(); + if (m_isInitialized) + { + eigen_assert(m_matrix.rows()==size); + } + else + { + m_matrix.resize(size,size); + m_matrix.setZero(); + m_transpositions.resize(size); + for (Index i = 0; i < size; i++) + m_transpositions.coeffRef(i) = IndexType(i); + m_temporary.resize(size); + m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef; + m_isInitialized = true; + } + + internal::ldlt_inplace::update(m_matrix, m_transpositions, m_temporary, w, sigma); + + return *this; +} + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const +{ + _solve_impl_transposed(rhs, dst); +} + +template +template +void LDLT<_MatrixType,_UpLo>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const +{ + // dst = P b + dst = m_transpositions * rhs; + + // dst = L^-1 (P b) + // dst = L^-*T (P b) + matrixL().template conjugateIf().solveInPlace(dst); + + // dst = D^-* (L^-1 P b) + // dst = D^-1 (L^-*T P b) + // more precisely, use pseudo-inverse of D (see bug 241) + using std::abs; + const typename Diagonal::RealReturnType vecD(vectorD()); + // In some previous versions, tolerance was set to the max of 1/highest (or rather numeric_limits::min()) + // and the maximal diagonal entry * epsilon as motivated by LAPACK's xGELSS: + // RealScalar tolerance = numext::maxi(vecD.array().abs().maxCoeff() * NumTraits::epsilon(),RealScalar(1) / NumTraits::highest()); + // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest + // diagonal element is not well justified and leads to numerical issues in some cases. + // Moreover, Lapack's xSYTRS routines use 0 for the tolerance. + // Using numeric_limits::min() gives us more robustness to denormals. + RealScalar tolerance = (std::numeric_limits::min)(); + for (Index i = 0; i < vecD.size(); ++i) + { + if(abs(vecD(i)) > tolerance) + dst.row(i) /= vecD(i); + else + dst.row(i).setZero(); + } + + // dst = L^-* (D^-* L^-1 P b) + // dst = L^-T (D^-1 L^-*T P b) + matrixL().transpose().template conjugateIf().solveInPlace(dst); + + // dst = P^T (L^-* D^-* L^-1 P b) = A^-1 b + // dst = P^-T (L^-T D^-1 L^-*T P b) = A^-1 b + dst = m_transpositions.transpose() * dst; +} +#endif + +/** \internal use x = ldlt_object.solve(x); + * + * This is the \em in-place version of solve(). + * + * \param bAndX represents both the right-hand side matrix b and result x. + * + * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. + * + * This version avoids a copy when the right hand side matrix b is not + * needed anymore. + * + * \sa LDLT::solve(), MatrixBase::ldlt() + */ +template +template +bool LDLT::solveInPlace(MatrixBase &bAndX) const +{ + eigen_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_matrix.rows() == bAndX.rows()); + + bAndX = this->solve(bAndX); + + return true; +} + +/** \returns the matrix represented by the decomposition, + * i.e., it returns the product: P^T L D L^* P. + * This function is provided for debug purpose. */ +template +MatrixType LDLT::reconstructedMatrix() const +{ + eigen_assert(m_isInitialized && "LDLT is not initialized."); + const Index size = m_matrix.rows(); + MatrixType res(size,size); + + // P + res.setIdentity(); + res = transpositionsP() * res; + // L^* P + res = matrixU() * res; + // D(L^*P) + res = vectorD().real().asDiagonal() * res; + // L(DL^*P) + res = matrixL() * res; + // P^T (LDL^*P) + res = transpositionsP().transpose() * res; + + return res; +} + +/** \cholesky_module + * \returns the Cholesky decomposition with full pivoting without square root of \c *this + * \sa MatrixBase::ldlt() + */ +template +inline const LDLT::PlainObject, UpLo> +SelfAdjointView::ldlt() const +{ + return LDLT(m_matrix); +} + +/** \cholesky_module + * \returns the Cholesky decomposition with full pivoting without square root of \c *this + * \sa SelfAdjointView::ldlt() + */ +template +inline const LDLT::PlainObject> +MatrixBase::ldlt() const +{ + return LDLT(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_LDLT_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Cholesky/LLT.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Cholesky/LLT.h new file mode 100644 index 000000000..8c9b2b398 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Cholesky/LLT.h @@ -0,0 +1,558 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LLT_H +#define EIGEN_LLT_H + +namespace Eigen { + +namespace internal{ + +template struct traits > + : traits<_MatrixType> +{ + typedef MatrixXpr XprKind; + typedef SolverStorage StorageKind; + typedef int StorageIndex; + enum { Flags = 0 }; +}; + +template struct LLT_Traits; +} + +/** \ingroup Cholesky_Module + * + * \class LLT + * + * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features + * + * \tparam _MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition + * \tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. + * The other triangular part won't be read. + * + * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite + * matrix A such that A = LL^* = U^*U, where L is lower triangular. + * + * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b, + * for that purpose, we recommend the Cholesky decomposition without square root which is more stable + * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other + * situations like generalised eigen problems with hermitian matrices. + * + * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices, + * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations + * has a solution. + * + * Example: \include LLT_example.cpp + * Output: \verbinclude LLT_example.out + * + * \b Performance: for best performance, it is recommended to use a column-major storage format + * with the Lower triangular part (the default), or, equivalently, a row-major storage format + * with the Upper triangular part. Otherwise, you might get a 20% slowdown for the full factorization + * step, and rank-updates can be up to 3 times slower. + * + * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. + * + * Note that during the decomposition, only the lower (or upper, as defined by _UpLo) triangular part of A is considered. + * Therefore, the strict lower part does not have to store correct values. + * + * \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT + */ +template class LLT + : public SolverBase > +{ + public: + typedef _MatrixType MatrixType; + typedef SolverBase Base; + friend class SolverBase; + + EIGEN_GENERIC_PUBLIC_INTERFACE(LLT) + enum { + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + enum { + PacketSize = internal::packet_traits::size, + AlignmentMask = int(PacketSize)-1, + UpLo = _UpLo + }; + + typedef internal::LLT_Traits Traits; + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via LLT::compute(const MatrixType&). + */ + LLT() : m_matrix(), m_isInitialized(false) {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa LLT() + */ + explicit LLT(Index size) : m_matrix(size, size), + m_isInitialized(false) {} + + template + explicit LLT(const EigenBase& matrix) + : m_matrix(matrix.rows(), matrix.cols()), + m_isInitialized(false) + { + compute(matrix.derived()); + } + + /** \brief Constructs a LLT factorization from a given matrix + * + * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when + * \c MatrixType is a Eigen::Ref. + * + * \sa LLT(const EigenBase&) + */ + template + explicit LLT(EigenBase& matrix) + : m_matrix(matrix.derived()), + m_isInitialized(false) + { + compute(matrix.derived()); + } + + /** \returns a view of the upper triangular matrix U */ + inline typename Traits::MatrixU matrixU() const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return Traits::getU(m_matrix); + } + + /** \returns a view of the lower triangular matrix L */ + inline typename Traits::MatrixL matrixL() const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return Traits::getL(m_matrix); + } + + #ifdef EIGEN_PARSED_BY_DOXYGEN + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * Since this LLT class assumes anyway that the matrix A is invertible, the solution + * theoretically exists and is unique regardless of b. + * + * Example: \include LLT_solve.cpp + * Output: \verbinclude LLT_solve.out + * + * \sa solveInPlace(), MatrixBase::llt(), SelfAdjointView::llt() + */ + template + inline const Solve + solve(const MatrixBase& b) const; + #endif + + template + void solveInPlace(const MatrixBase &bAndX) const; + + template + LLT& compute(const EigenBase& matrix); + + /** \returns an estimate of the reciprocal condition number of the matrix of + * which \c *this is the Cholesky decomposition. + */ + RealScalar rcond() const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative"); + return internal::rcond_estimate_helper(m_l1_norm, *this); + } + + /** \returns the LLT decomposition matrix + * + * TODO: document the storage layout + */ + inline const MatrixType& matrixLLT() const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return m_matrix; + } + + MatrixType reconstructedMatrix() const; + + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the matrix.appears not to be positive definite. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return m_info; + } + + /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint. + * + * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as: + * \code x = decomposition.adjoint().solve(b) \endcode + */ + const LLT& adjoint() const EIGEN_NOEXCEPT { return *this; }; + + inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } + inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } + + template + LLT & rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + void _solve_impl(const RhsType &rhs, DstType &dst) const; + + template + void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const; + #endif + + protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + /** \internal + * Used to compute and store L + * The strict upper part is not used and even not initialized. + */ + MatrixType m_matrix; + RealScalar m_l1_norm; + bool m_isInitialized; + ComputationInfo m_info; +}; + +namespace internal { + +template struct llt_inplace; + +template +static Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) +{ + using std::sqrt; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::ColXpr ColXpr; + typedef typename internal::remove_all::type ColXprCleaned; + typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; + typedef Matrix TempVectorType; + typedef typename TempVectorType::SegmentReturnType TempVecSegment; + + Index n = mat.cols(); + eigen_assert(mat.rows()==n && vec.size()==n); + + TempVectorType temp; + + if(sigma>0) + { + // This version is based on Givens rotations. + // It is faster than the other one below, but only works for updates, + // i.e., for sigma > 0 + temp = sqrt(sigma) * vec; + + for(Index i=0; i g; + g.makeGivens(mat(i,i), -temp(i), &mat(i,i)); + + Index rs = n-i-1; + if(rs>0) + { + ColXprSegment x(mat.col(i).tail(rs)); + TempVecSegment y(temp.tail(rs)); + apply_rotation_in_the_plane(x, y, g); + } + } + } + else + { + temp = vec; + RealScalar beta = 1; + for(Index j=0; j struct llt_inplace +{ + typedef typename NumTraits::Real RealScalar; + template + static Index unblocked(MatrixType& mat) + { + using std::sqrt; + + eigen_assert(mat.rows()==mat.cols()); + const Index size = mat.rows(); + for(Index k = 0; k < size; ++k) + { + Index rs = size-k-1; // remaining size + + Block A21(mat,k+1,k,rs,1); + Block A10(mat,k,0,1,k); + Block A20(mat,k+1,0,rs,k); + + RealScalar x = numext::real(mat.coeff(k,k)); + if (k>0) x -= A10.squaredNorm(); + if (x<=RealScalar(0)) + return k; + mat.coeffRef(k,k) = x = sqrt(x); + if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint(); + if (rs>0) A21 /= x; + } + return -1; + } + + template + static Index blocked(MatrixType& m) + { + eigen_assert(m.rows()==m.cols()); + Index size = m.rows(); + if(size<32) + return unblocked(m); + + Index blockSize = size/8; + blockSize = (blockSize/16)*16; + blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128)); + + for (Index k=0; k A11(m,k, k, bs,bs); + Block A21(m,k+bs,k, rs,bs); + Block A22(m,k+bs,k+bs,rs,rs); + + Index ret; + if((ret=unblocked(A11))>=0) return k+ret; + if(rs>0) A11.adjoint().template triangularView().template solveInPlace(A21); + if(rs>0) A22.template selfadjointView().rankUpdate(A21,typename NumTraits::Literal(-1)); // bottleneck + } + return -1; + } + + template + static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) + { + return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); + } +}; + +template struct llt_inplace +{ + typedef typename NumTraits::Real RealScalar; + + template + static EIGEN_STRONG_INLINE Index unblocked(MatrixType& mat) + { + Transpose matt(mat); + return llt_inplace::unblocked(matt); + } + template + static EIGEN_STRONG_INLINE Index blocked(MatrixType& mat) + { + Transpose matt(mat); + return llt_inplace::blocked(matt); + } + template + static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) + { + Transpose matt(mat); + return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); + } +}; + +template struct LLT_Traits +{ + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } + static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } + static bool inplace_decomposition(MatrixType& m) + { return llt_inplace::blocked(m)==-1; } +}; + +template struct LLT_Traits +{ + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); } + static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); } + static bool inplace_decomposition(MatrixType& m) + { return llt_inplace::blocked(m)==-1; } +}; + +} // end namespace internal + +/** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix + * + * \returns a reference to *this + * + * Example: \include TutorialLinAlgComputeTwice.cpp + * Output: \verbinclude TutorialLinAlgComputeTwice.out + */ +template +template +LLT& LLT::compute(const EigenBase& a) +{ + check_template_parameters(); + + eigen_assert(a.rows()==a.cols()); + const Index size = a.rows(); + m_matrix.resize(size, size); + if (!internal::is_same_dense(m_matrix, a.derived())) + m_matrix = a.derived(); + + // Compute matrix L1 norm = max abs column sum. + m_l1_norm = RealScalar(0); + // TODO move this code to SelfAdjointView + for (Index col = 0; col < size; ++col) { + RealScalar abs_col_sum; + if (_UpLo == Lower) + abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>(); + else + abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>(); + if (abs_col_sum > m_l1_norm) + m_l1_norm = abs_col_sum; + } + + m_isInitialized = true; + bool ok = Traits::inplace_decomposition(m_matrix); + m_info = ok ? Success : NumericalIssue; + + return *this; +} + +/** Performs a rank one update (or dowdate) of the current decomposition. + * If A = LL^* before the rank one update, + * then after it we have LL^* = A + sigma * v v^* where \a v must be a vector + * of same dimension. + */ +template +template +LLT<_MatrixType,_UpLo> & LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType); + eigen_assert(v.size()==m_matrix.cols()); + eigen_assert(m_isInitialized); + if(internal::llt_inplace::rankUpdate(m_matrix,v,sigma)>=0) + m_info = NumericalIssue; + else + m_info = Success; + + return *this; +} + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const +{ + _solve_impl_transposed(rhs, dst); +} + +template +template +void LLT<_MatrixType,_UpLo>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const +{ + dst = rhs; + + matrixL().template conjugateIf().solveInPlace(dst); + matrixU().template conjugateIf().solveInPlace(dst); +} +#endif + +/** \internal use x = llt_object.solve(x); + * + * This is the \em in-place version of solve(). + * + * \param bAndX represents both the right-hand side matrix b and result x. + * + * This version avoids a copy when the right hand side matrix b is not needed anymore. + * + * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here. + * This function will const_cast it, so constness isn't honored here. + * + * \sa LLT::solve(), MatrixBase::llt() + */ +template +template +void LLT::solveInPlace(const MatrixBase &bAndX) const +{ + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_matrix.rows()==bAndX.rows()); + matrixL().solveInPlace(bAndX); + matrixU().solveInPlace(bAndX); +} + +/** \returns the matrix represented by the decomposition, + * i.e., it returns the product: L L^*. + * This function is provided for debug purpose. */ +template +MatrixType LLT::reconstructedMatrix() const +{ + eigen_assert(m_isInitialized && "LLT is not initialized."); + return matrixL() * matrixL().adjoint().toDenseMatrix(); +} + +/** \cholesky_module + * \returns the LLT decomposition of \c *this + * \sa SelfAdjointView::llt() + */ +template +inline const LLT::PlainObject> +MatrixBase::llt() const +{ + return LLT(derived()); +} + +/** \cholesky_module + * \returns the LLT decomposition of \c *this + * \sa SelfAdjointView::llt() + */ +template +inline const LLT::PlainObject, UpLo> +SelfAdjointView::llt() const +{ + return LLT(m_matrix); +} + +} // end namespace Eigen + +#endif // EIGEN_LLT_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Cholesky/LLT_LAPACKE.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Cholesky/LLT_LAPACKE.h new file mode 100644 index 000000000..bc6489e69 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Cholesky/LLT_LAPACKE.h @@ -0,0 +1,99 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to LAPACKe + * LLt decomposition based on LAPACKE_?potrf function. + ******************************************************************************** +*/ + +#ifndef EIGEN_LLT_LAPACKE_H +#define EIGEN_LLT_LAPACKE_H + +namespace Eigen { + +namespace internal { + +template struct lapacke_llt; + +#define EIGEN_LAPACKE_LLT(EIGTYPE, BLASTYPE, LAPACKE_PREFIX) \ +template<> struct lapacke_llt \ +{ \ + template \ + static inline Index potrf(MatrixType& m, char uplo) \ + { \ + lapack_int matrix_order; \ + lapack_int size, lda, info, StorageOrder; \ + EIGTYPE* a; \ + eigen_assert(m.rows()==m.cols()); \ + /* Set up parameters for ?potrf */ \ + size = convert_index(m.rows()); \ + StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \ + matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ + a = &(m.coeffRef(0,0)); \ + lda = convert_index(m.outerStride()); \ +\ + info = LAPACKE_##LAPACKE_PREFIX##potrf( matrix_order, uplo, size, (BLASTYPE*)a, lda ); \ + info = (info==0) ? -1 : info>0 ? info-1 : size; \ + return info; \ + } \ +}; \ +template<> struct llt_inplace \ +{ \ + template \ + static Index blocked(MatrixType& m) \ + { \ + return lapacke_llt::potrf(m, 'L'); \ + } \ + template \ + static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ + { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \ +}; \ +template<> struct llt_inplace \ +{ \ + template \ + static Index blocked(MatrixType& m) \ + { \ + return lapacke_llt::potrf(m, 'U'); \ + } \ + template \ + static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ + { \ + Transpose matt(mat); \ + return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); \ + } \ +}; + +EIGEN_LAPACKE_LLT(double, double, d) +EIGEN_LAPACKE_LLT(float, float, s) +EIGEN_LAPACKE_LLT(dcomplex, lapack_complex_double, z) +EIGEN_LAPACKE_LLT(scomplex, lapack_complex_float, c) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_LLT_LAPACKE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/CholmodSupport/CholmodSupport.h b/src/reference/ai_poc/anomaly_det/Eigen/src/CholmodSupport/CholmodSupport.h new file mode 100644 index 000000000..adaf52858 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/CholmodSupport/CholmodSupport.h @@ -0,0 +1,682 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CHOLMODSUPPORT_H +#define EIGEN_CHOLMODSUPPORT_H + +namespace Eigen { + +namespace internal { + +template struct cholmod_configure_matrix; + +template<> struct cholmod_configure_matrix { + template + static void run(CholmodType& mat) { + mat.xtype = CHOLMOD_REAL; + mat.dtype = CHOLMOD_DOUBLE; + } +}; + +template<> struct cholmod_configure_matrix > { + template + static void run(CholmodType& mat) { + mat.xtype = CHOLMOD_COMPLEX; + mat.dtype = CHOLMOD_DOUBLE; + } +}; + +// Other scalar types are not yet supported by Cholmod +// template<> struct cholmod_configure_matrix { +// template +// static void run(CholmodType& mat) { +// mat.xtype = CHOLMOD_REAL; +// mat.dtype = CHOLMOD_SINGLE; +// } +// }; +// +// template<> struct cholmod_configure_matrix > { +// template +// static void run(CholmodType& mat) { +// mat.xtype = CHOLMOD_COMPLEX; +// mat.dtype = CHOLMOD_SINGLE; +// } +// }; + +} // namespace internal + +/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object. + * Note that the data are shared. + */ +template +cholmod_sparse viewAsCholmod(Ref > mat) +{ + cholmod_sparse res; + res.nzmax = mat.nonZeros(); + res.nrow = mat.rows(); + res.ncol = mat.cols(); + res.p = mat.outerIndexPtr(); + res.i = mat.innerIndexPtr(); + res.x = mat.valuePtr(); + res.z = 0; + res.sorted = 1; + if(mat.isCompressed()) + { + res.packed = 1; + res.nz = 0; + } + else + { + res.packed = 0; + res.nz = mat.innerNonZeroPtr(); + } + + res.dtype = 0; + res.stype = -1; + + if (internal::is_same<_StorageIndex,int>::value) + { + res.itype = CHOLMOD_INT; + } + else if (internal::is_same<_StorageIndex,SuiteSparse_long>::value) + { + res.itype = CHOLMOD_LONG; + } + else + { + eigen_assert(false && "Index type not supported yet"); + } + + // setup res.xtype + internal::cholmod_configure_matrix<_Scalar>::run(res); + + res.stype = 0; + + return res; +} + +template +const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat) +{ + cholmod_sparse res = viewAsCholmod(Ref >(mat.const_cast_derived())); + return res; +} + +template +const cholmod_sparse viewAsCholmod(const SparseVector<_Scalar,_Options,_Index>& mat) +{ + cholmod_sparse res = viewAsCholmod(Ref >(mat.const_cast_derived())); + return res; +} + +/** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix. + * The data are not copied but shared. */ +template +cholmod_sparse viewAsCholmod(const SparseSelfAdjointView, UpLo>& mat) +{ + cholmod_sparse res = viewAsCholmod(Ref >(mat.matrix().const_cast_derived())); + + if(UpLo==Upper) res.stype = 1; + if(UpLo==Lower) res.stype = -1; + // swap stype for rowmajor matrices (only works for real matrices) + EIGEN_STATIC_ASSERT((_Options & RowMajorBit) == 0 || NumTraits<_Scalar>::IsComplex == 0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + if(_Options & RowMajorBit) res.stype *=-1; + + return res; +} + +/** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix. + * The data are not copied but shared. */ +template +cholmod_dense viewAsCholmod(MatrixBase& mat) +{ + EIGEN_STATIC_ASSERT((internal::traits::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + typedef typename Derived::Scalar Scalar; + + cholmod_dense res; + res.nrow = mat.rows(); + res.ncol = mat.cols(); + res.nzmax = res.nrow * res.ncol; + res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride(); + res.x = (void*)(mat.derived().data()); + res.z = 0; + + internal::cholmod_configure_matrix::run(res); + + return res; +} + +/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix. + * The data are not copied but shared. */ +template +MappedSparseMatrix viewAsEigen(cholmod_sparse& cm) +{ + return MappedSparseMatrix + (cm.nrow, cm.ncol, static_cast(cm.p)[cm.ncol], + static_cast(cm.p), static_cast(cm.i),static_cast(cm.x) ); +} + +namespace internal { + +// template specializations for int and long that call the correct cholmod method + +#define EIGEN_CHOLMOD_SPECIALIZE0(ret, name) \ + template inline ret cm_ ## name (cholmod_common &Common) { return cholmod_ ## name (&Common); } \ + template<> inline ret cm_ ## name (cholmod_common &Common) { return cholmod_l_ ## name (&Common); } + +#define EIGEN_CHOLMOD_SPECIALIZE1(ret, name, t1, a1) \ + template inline ret cm_ ## name (t1& a1, cholmod_common &Common) { return cholmod_ ## name (&a1, &Common); } \ + template<> inline ret cm_ ## name (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); } + +EIGEN_CHOLMOD_SPECIALIZE0(int, start) +EIGEN_CHOLMOD_SPECIALIZE0(int, finish) + +EIGEN_CHOLMOD_SPECIALIZE1(int, free_factor, cholmod_factor*, L) +EIGEN_CHOLMOD_SPECIALIZE1(int, free_dense, cholmod_dense*, X) +EIGEN_CHOLMOD_SPECIALIZE1(int, free_sparse, cholmod_sparse*, A) + +EIGEN_CHOLMOD_SPECIALIZE1(cholmod_factor*, analyze, cholmod_sparse, A) + +template inline cholmod_dense* cm_solve (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_solve (sys, &L, &B, &Common); } +template<> inline cholmod_dense* cm_solve (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_l_solve (sys, &L, &B, &Common); } + +template inline cholmod_sparse* cm_spsolve (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_spsolve (sys, &L, &B, &Common); } +template<> inline cholmod_sparse* cm_spsolve (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); } + +template +inline int cm_factorize_p (cholmod_sparse* A, double beta[2], _StorageIndex* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_factorize_p (A, beta, fset, fsize, L, &Common); } +template<> +inline int cm_factorize_p (cholmod_sparse* A, double beta[2], SuiteSparse_long* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); } + +#undef EIGEN_CHOLMOD_SPECIALIZE0 +#undef EIGEN_CHOLMOD_SPECIALIZE1 + +} // namespace internal + + +enum CholmodMode { + CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt +}; + + +/** \ingroup CholmodSupport_Module + * \class CholmodBase + * \brief The base class for the direct Cholesky factorization of Cholmod + * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT + */ +template +class CholmodBase : public SparseSolverBase +{ + protected: + typedef SparseSolverBase Base; + using Base::derived; + using Base::m_isInitialized; + public: + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef MatrixType CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + public: + + CholmodBase() + : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) + { + EIGEN_STATIC_ASSERT((internal::is_same::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY); + m_shiftOffset[0] = m_shiftOffset[1] = 0.0; + internal::cm_start(m_cholmod); + } + + explicit CholmodBase(const MatrixType& matrix) + : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) + { + EIGEN_STATIC_ASSERT((internal::is_same::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY); + m_shiftOffset[0] = m_shiftOffset[1] = 0.0; + internal::cm_start(m_cholmod); + compute(matrix); + } + + ~CholmodBase() + { + if(m_cholmodFactor) + internal::cm_free_factor(m_cholmodFactor, m_cholmod); + internal::cm_finish(m_cholmod); + } + + inline StorageIndex cols() const { return internal::convert_index(m_cholmodFactor->n); } + inline StorageIndex rows() const { return internal::convert_index(m_cholmodFactor->n); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + Derived& compute(const MatrixType& matrix) + { + analyzePattern(matrix); + factorize(matrix); + return derived(); + } + + /** Performs a symbolic decomposition on the sparsity pattern of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + if(m_cholmodFactor) + { + internal::cm_free_factor(m_cholmodFactor, m_cholmod); + m_cholmodFactor = 0; + } + cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView()); + m_cholmodFactor = internal::cm_analyze(A, m_cholmod); + + this->m_isInitialized = true; + this->m_info = Success; + m_analysisIsOk = true; + m_factorizationIsOk = false; + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix) + { + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView()); + internal::cm_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, m_cholmod); + + // If the factorization failed, minor is the column at which it did. On success minor == n. + this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue); + m_factorizationIsOk = true; + } + + /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations. + * See the Cholmod user guide for details. */ + cholmod_common& cholmod() { return m_cholmod; } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal */ + template + void _solve_impl(const MatrixBase &b, MatrixBase &dest) const + { + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + const Index size = m_cholmodFactor->n; + EIGEN_UNUSED_VARIABLE(size); + eigen_assert(size==b.rows()); + + // Cholmod needs column-major storage without inner-stride, which corresponds to the default behavior of Ref. + Ref > b_ref(b.derived()); + + cholmod_dense b_cd = viewAsCholmod(b_ref); + cholmod_dense* x_cd = internal::cm_solve(CHOLMOD_A, *m_cholmodFactor, b_cd, m_cholmod); + if(!x_cd) + { + this->m_info = NumericalIssue; + return; + } + // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) + // NOTE Actually, the copy can be avoided by calling cholmod_solve2 instead of cholmod_solve + dest = Matrix::Map(reinterpret_cast(x_cd->x),b.rows(),b.cols()); + internal::cm_free_dense(x_cd, m_cholmod); + } + + /** \internal */ + template + void _solve_impl(const SparseMatrixBase &b, SparseMatrixBase &dest) const + { + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + const Index size = m_cholmodFactor->n; + EIGEN_UNUSED_VARIABLE(size); + eigen_assert(size==b.rows()); + + // note: cs stands for Cholmod Sparse + Ref > b_ref(b.const_cast_derived()); + cholmod_sparse b_cs = viewAsCholmod(b_ref); + cholmod_sparse* x_cs = internal::cm_spsolve(CHOLMOD_A, *m_cholmodFactor, b_cs, m_cholmod); + if(!x_cs) + { + this->m_info = NumericalIssue; + return; + } + // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) + // NOTE cholmod_spsolve in fact just calls the dense solver for blocks of 4 columns at a time (similar to Eigen's sparse solver) + dest.derived() = viewAsEigen(*x_cs); + internal::cm_free_sparse(x_cs, m_cholmod); + } + #endif // EIGEN_PARSED_BY_DOXYGEN + + + /** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization. + * + * During the numerical factorization, an offset term is added to the diagonal coefficients:\n + * \c d_ii = \a offset + \c d_ii + * + * The default is \a offset=0. + * + * \returns a reference to \c *this. + */ + Derived& setShift(const RealScalar& offset) + { + m_shiftOffset[0] = double(offset); + return derived(); + } + + /** \returns the determinant of the underlying matrix from the current factorization */ + Scalar determinant() const + { + using std::exp; + return exp(logDeterminant()); + } + + /** \returns the log determinant of the underlying matrix from the current factorization */ + Scalar logDeterminant() const + { + using std::log; + using numext::real; + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + + RealScalar logDet = 0; + Scalar *x = static_cast(m_cholmodFactor->x); + if (m_cholmodFactor->is_super) + { + // Supernodal factorization stored as a packed list of dense column-major blocs, + // as described by the following structure: + + // super[k] == index of the first column of the j-th super node + StorageIndex *super = static_cast(m_cholmodFactor->super); + // pi[k] == offset to the description of row indices + StorageIndex *pi = static_cast(m_cholmodFactor->pi); + // px[k] == offset to the respective dense block + StorageIndex *px = static_cast(m_cholmodFactor->px); + + Index nb_super_nodes = m_cholmodFactor->nsuper; + for (Index k=0; k < nb_super_nodes; ++k) + { + StorageIndex ncols = super[k + 1] - super[k]; + StorageIndex nrows = pi[k + 1] - pi[k]; + + Map, 0, InnerStride<> > sk(x + px[k], ncols, InnerStride<>(nrows+1)); + logDet += sk.real().log().sum(); + } + } + else + { + // Simplicial factorization stored as standard CSC matrix. + StorageIndex *p = static_cast(m_cholmodFactor->p); + Index size = m_cholmodFactor->n; + for (Index k=0; kis_ll) + logDet *= 2.0; + return logDet; + }; + + template + void dumpMemory(Stream& /*s*/) + {} + + protected: + mutable cholmod_common m_cholmod; + cholmod_factor* m_cholmodFactor; + double m_shiftOffset[2]; + mutable ComputationInfo m_info; + int m_factorizationIsOk; + int m_analysisIsOk; +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodSimplicialLLT + * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization + * using the Cholmod library. + * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical interest. + * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * \implsparsesolverconcept + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \warning Only double precision real and complex scalar types are supported by Cholmod. + * + * \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLLT + */ +template +class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodSimplicialLLT() : Base() { init(); } + + CholmodSimplicialLLT(const MatrixType& matrix) : Base() + { + init(); + this->compute(matrix); + } + + ~CholmodSimplicialLLT() {} + protected: + void init() + { + m_cholmod.final_asis = 0; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + m_cholmod.final_ll = 1; + } +}; + + +/** \ingroup CholmodSupport_Module + * \class CholmodSimplicialLDLT + * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization + * using the Cholmod library. + * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical interest. + * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * \implsparsesolverconcept + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \warning Only double precision real and complex scalar types are supported by Cholmod. + * + * \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLDLT + */ +template +class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodSimplicialLDLT() : Base() { init(); } + + CholmodSimplicialLDLT(const MatrixType& matrix) : Base() + { + init(); + this->compute(matrix); + } + + ~CholmodSimplicialLDLT() {} + protected: + void init() + { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + } +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodSupernodalLLT + * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization + * using the Cholmod library. + * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM. + * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * \implsparsesolverconcept + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \warning Only double precision real and complex scalar types are supported by Cholmod. + * + * \sa \ref TutorialSparseSolverConcept + */ +template +class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodSupernodalLLT() : Base() { init(); } + + CholmodSupernodalLLT(const MatrixType& matrix) : Base() + { + init(); + this->compute(matrix); + } + + ~CholmodSupernodalLLT() {} + protected: + void init() + { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SUPERNODAL; + } +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodDecomposition + * \brief A general Cholesky factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization + * using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * This variant permits to change the underlying Cholesky method at runtime. + * On the other hand, it does not provide access to the result of the factorization. + * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * \implsparsesolverconcept + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \warning Only double precision real and complex scalar types are supported by Cholmod. + * + * \sa \ref TutorialSparseSolverConcept + */ +template +class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodDecomposition() : Base() { init(); } + + CholmodDecomposition(const MatrixType& matrix) : Base() + { + init(); + this->compute(matrix); + } + + ~CholmodDecomposition() {} + + void setMode(CholmodMode mode) + { + switch(mode) + { + case CholmodAuto: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_AUTO; + break; + case CholmodSimplicialLLt: + m_cholmod.final_asis = 0; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + m_cholmod.final_ll = 1; + break; + case CholmodSupernodalLLt: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SUPERNODAL; + break; + case CholmodLDLt: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + break; + default: + break; + } + } + protected: + void init() + { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_AUTO; + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CHOLMODSUPPORT_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ArithmeticSequence.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ArithmeticSequence.h new file mode 100644 index 000000000..b6200fac1 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ArithmeticSequence.h @@ -0,0 +1,413 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARITHMETIC_SEQUENCE_H +#define EIGEN_ARITHMETIC_SEQUENCE_H + +namespace Eigen { + +namespace internal { + +#if (!EIGEN_HAS_CXX11) || !((!EIGEN_COMP_GNUC) || EIGEN_COMP_GNUC>=48) +template struct aseq_negate {}; + +template<> struct aseq_negate { + typedef Index type; +}; + +template struct aseq_negate > { + typedef FixedInt<-N> type; +}; + +// Compilation error in the following case: +template<> struct aseq_negate > {}; + +template::value, + bool SizeIsSymbolic =symbolic::is_symbolic::value> +struct aseq_reverse_first_type { + typedef Index type; +}; + +template +struct aseq_reverse_first_type { + typedef symbolic::AddExpr > >, + symbolic::ValueExpr > + > type; +}; + +template +struct aseq_reverse_first_type_aux { + typedef Index type; +}; + +template +struct aseq_reverse_first_type_aux::type> { + typedef FixedInt<(SizeType::value-1)*IncrType::value> type; +}; + +template +struct aseq_reverse_first_type { + typedef typename aseq_reverse_first_type_aux::type Aux; + typedef symbolic::AddExpr > type; +}; + +template +struct aseq_reverse_first_type { + typedef symbolic::AddExpr > >, + symbolic::ValueExpr >, + symbolic::ValueExpr<> > type; +}; +#endif + +// Helper to cleanup the type of the increment: +template struct cleanup_seq_incr { + typedef typename cleanup_index_type::type type; +}; + +} + +//-------------------------------------------------------------------------------- +// seq(first,last,incr) and seqN(first,size,incr) +//-------------------------------------------------------------------------------- + +template > +class ArithmeticSequence; + +template +ArithmeticSequence::type, + typename internal::cleanup_index_type::type, + typename internal::cleanup_seq_incr::type > +seqN(FirstType first, SizeType size, IncrType incr); + +/** \class ArithmeticSequence + * \ingroup Core_Module + * + * This class represents an arithmetic progression \f$ a_0, a_1, a_2, ..., a_{n-1}\f$ defined by + * its \em first value \f$ a_0 \f$, its \em size (aka length) \em n, and the \em increment (aka stride) + * that is equal to \f$ a_{i+1}-a_{i}\f$ for any \em i. + * + * It is internally used as the return type of the Eigen::seq and Eigen::seqN functions, and as the input arguments + * of DenseBase::operator()(const RowIndices&, const ColIndices&), and most of the time this is the + * only way it is used. + * + * \tparam FirstType type of the first element, usually an Index, + * but internally it can be a symbolic expression + * \tparam SizeType type representing the size of the sequence, usually an Index + * or a compile time integral constant. Internally, it can also be a symbolic expression + * \tparam IncrType type of the increment, can be a runtime Index, or a compile time integral constant (default is compile-time 1) + * + * \sa Eigen::seq, Eigen::seqN, DenseBase::operator()(const RowIndices&, const ColIndices&), class IndexedView + */ +template +class ArithmeticSequence +{ +public: + ArithmeticSequence(FirstType first, SizeType size) : m_first(first), m_size(size) {} + ArithmeticSequence(FirstType first, SizeType size, IncrType incr) : m_first(first), m_size(size), m_incr(incr) {} + + enum { + SizeAtCompileTime = internal::get_fixed_value::value, + IncrAtCompileTime = internal::get_fixed_value::value + }; + + /** \returns the size, i.e., number of elements, of the sequence */ + Index size() const { return m_size; } + + /** \returns the first element \f$ a_0 \f$ in the sequence */ + Index first() const { return m_first; } + + /** \returns the value \f$ a_i \f$ at index \a i in the sequence. */ + Index operator[](Index i) const { return m_first + i * m_incr; } + + const FirstType& firstObject() const { return m_first; } + const SizeType& sizeObject() const { return m_size; } + const IncrType& incrObject() const { return m_incr; } + +protected: + FirstType m_first; + SizeType m_size; + IncrType m_incr; + +public: + +#if EIGEN_HAS_CXX11 && ((!EIGEN_COMP_GNUC) || EIGEN_COMP_GNUC>=48) + auto reverse() const -> decltype(Eigen::seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr)) { + return seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr); + } +#else +protected: + typedef typename internal::aseq_negate::type ReverseIncrType; + typedef typename internal::aseq_reverse_first_type::type ReverseFirstType; +public: + ArithmeticSequence + reverse() const { + return seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr); + } +#endif +}; + +/** \returns an ArithmeticSequence starting at \a first, of length \a size, and increment \a incr + * + * \sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */ +template +ArithmeticSequence::type,typename internal::cleanup_index_type::type,typename internal::cleanup_seq_incr::type > +seqN(FirstType first, SizeType size, IncrType incr) { + return ArithmeticSequence::type,typename internal::cleanup_index_type::type,typename internal::cleanup_seq_incr::type>(first,size,incr); +} + +/** \returns an ArithmeticSequence starting at \a first, of length \a size, and unit increment + * + * \sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType) */ +template +ArithmeticSequence::type,typename internal::cleanup_index_type::type > +seqN(FirstType first, SizeType size) { + return ArithmeticSequence::type,typename internal::cleanup_index_type::type>(first,size); +} + +#ifdef EIGEN_PARSED_BY_DOXYGEN + +/** \returns an ArithmeticSequence starting at \a f, up (or down) to \a l, and with positive (or negative) increment \a incr + * + * It is essentially an alias to: + * \code + * seqN(f, (l-f+incr)/incr, incr); + * \endcode + * + * \sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType) + */ +template +auto seq(FirstType f, LastType l, IncrType incr); + +/** \returns an ArithmeticSequence starting at \a f, up (or down) to \a l, and unit increment + * + * It is essentially an alias to: + * \code + * seqN(f,l-f+1); + * \endcode + * + * \sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) + */ +template +auto seq(FirstType f, LastType l); + +#else // EIGEN_PARSED_BY_DOXYGEN + +#if EIGEN_HAS_CXX11 +template +auto seq(FirstType f, LastType l) -> decltype(seqN(typename internal::cleanup_index_type::type(f), + ( typename internal::cleanup_index_type::type(l) + - typename internal::cleanup_index_type::type(f)+fix<1>()))) +{ + return seqN(typename internal::cleanup_index_type::type(f), + (typename internal::cleanup_index_type::type(l) + -typename internal::cleanup_index_type::type(f)+fix<1>())); +} + +template +auto seq(FirstType f, LastType l, IncrType incr) + -> decltype(seqN(typename internal::cleanup_index_type::type(f), + ( typename internal::cleanup_index_type::type(l) + - typename internal::cleanup_index_type::type(f)+typename internal::cleanup_seq_incr::type(incr) + ) / typename internal::cleanup_seq_incr::type(incr), + typename internal::cleanup_seq_incr::type(incr))) +{ + typedef typename internal::cleanup_seq_incr::type CleanedIncrType; + return seqN(typename internal::cleanup_index_type::type(f), + ( typename internal::cleanup_index_type::type(l) + -typename internal::cleanup_index_type::type(f)+CleanedIncrType(incr)) / CleanedIncrType(incr), + CleanedIncrType(incr)); +} + +#else // EIGEN_HAS_CXX11 + +template +typename internal::enable_if::value || symbolic::is_symbolic::value), + ArithmeticSequence::type,Index> >::type +seq(FirstType f, LastType l) +{ + return seqN(typename internal::cleanup_index_type::type(f), + Index((typename internal::cleanup_index_type::type(l)-typename internal::cleanup_index_type::type(f)+fix<1>()))); +} + +template +typename internal::enable_if::value, + ArithmeticSequence,symbolic::ValueExpr<> >, + symbolic::ValueExpr > > > >::type +seq(const symbolic::BaseExpr &f, LastType l) +{ + return seqN(f.derived(),(typename internal::cleanup_index_type::type(l)-f.derived()+fix<1>())); +} + +template +typename internal::enable_if::value, + ArithmeticSequence::type, + symbolic::AddExpr >, + symbolic::ValueExpr > > > >::type +seq(FirstType f, const symbolic::BaseExpr &l) +{ + return seqN(typename internal::cleanup_index_type::type(f),(l.derived()-typename internal::cleanup_index_type::type(f)+fix<1>())); +} + +template +ArithmeticSequence >,symbolic::ValueExpr > > > +seq(const symbolic::BaseExpr &f, const symbolic::BaseExpr &l) +{ + return seqN(f.derived(),(l.derived()-f.derived()+fix<1>())); +} + + +template +typename internal::enable_if::value || symbolic::is_symbolic::value), + ArithmeticSequence::type,Index,typename internal::cleanup_seq_incr::type> >::type +seq(FirstType f, LastType l, IncrType incr) +{ + typedef typename internal::cleanup_seq_incr::type CleanedIncrType; + return seqN(typename internal::cleanup_index_type::type(f), + Index((typename internal::cleanup_index_type::type(l)-typename internal::cleanup_index_type::type(f)+CleanedIncrType(incr))/CleanedIncrType(incr)), incr); +} + +template +typename internal::enable_if::value, + ArithmeticSequence, + symbolic::ValueExpr<> >, + symbolic::ValueExpr::type> >, + symbolic::ValueExpr::type> >, + typename internal::cleanup_seq_incr::type> >::type +seq(const symbolic::BaseExpr &f, LastType l, IncrType incr) +{ + typedef typename internal::cleanup_seq_incr::type CleanedIncrType; + return seqN(f.derived(),(typename internal::cleanup_index_type::type(l)-f.derived()+CleanedIncrType(incr))/CleanedIncrType(incr), incr); +} + +template +typename internal::enable_if::value, + ArithmeticSequence::type, + symbolic::QuotientExpr >, + symbolic::ValueExpr::type> >, + symbolic::ValueExpr::type> >, + typename internal::cleanup_seq_incr::type> >::type +seq(FirstType f, const symbolic::BaseExpr &l, IncrType incr) +{ + typedef typename internal::cleanup_seq_incr::type CleanedIncrType; + return seqN(typename internal::cleanup_index_type::type(f), + (l.derived()-typename internal::cleanup_index_type::type(f)+CleanedIncrType(incr))/CleanedIncrType(incr), incr); +} + +template +ArithmeticSequence >, + symbolic::ValueExpr::type> >, + symbolic::ValueExpr::type> >, + typename internal::cleanup_seq_incr::type> +seq(const symbolic::BaseExpr &f, const symbolic::BaseExpr &l, IncrType incr) +{ + typedef typename internal::cleanup_seq_incr::type CleanedIncrType; + return seqN(f.derived(),(l.derived()-f.derived()+CleanedIncrType(incr))/CleanedIncrType(incr), incr); +} +#endif // EIGEN_HAS_CXX11 + +#endif // EIGEN_PARSED_BY_DOXYGEN + + +#if EIGEN_HAS_CXX11 || defined(EIGEN_PARSED_BY_DOXYGEN) +/** \cpp11 + * \returns a symbolic ArithmeticSequence representing the last \a size elements with increment \a incr. + * + * It is a shortcut for: \code seqN(last-(size-fix<1>)*incr, size, incr) \endcode + * + * \sa lastN(SizeType), seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */ +template +auto lastN(SizeType size, IncrType incr) +-> decltype(seqN(Eigen::last-(size-fix<1>())*incr, size, incr)) +{ + return seqN(Eigen::last-(size-fix<1>())*incr, size, incr); +} + +/** \cpp11 + * \returns a symbolic ArithmeticSequence representing the last \a size elements with a unit increment. + * + * It is a shortcut for: \code seq(last+fix<1>-size, last) \endcode + * + * \sa lastN(SizeType,IncrType, seqN(FirstType,SizeType), seq(FirstType,LastType) */ +template +auto lastN(SizeType size) +-> decltype(seqN(Eigen::last+fix<1>()-size, size)) +{ + return seqN(Eigen::last+fix<1>()-size, size); +} +#endif + +namespace internal { + +// Convert a symbolic span into a usable one (i.e., remove last/end "keywords") +template +struct make_size_type { + typedef typename internal::conditional::value, Index, T>::type type; +}; + +template +struct IndexedViewCompatibleType, XprSize> { + typedef ArithmeticSequence::type,IncrType> type; +}; + +template +ArithmeticSequence::type,IncrType> +makeIndexedViewCompatible(const ArithmeticSequence& ids, Index size,SpecializedType) { + return ArithmeticSequence::type,IncrType>( + eval_expr_given_size(ids.firstObject(),size),eval_expr_given_size(ids.sizeObject(),size),ids.incrObject()); +} + +template +struct get_compile_time_incr > { + enum { value = get_fixed_value::value }; +}; + +} // end namespace internal + +/** \namespace Eigen::indexing + * \ingroup Core_Module + * + * The sole purpose of this namespace is to be able to import all functions + * and symbols that are expected to be used within operator() for indexing + * and slicing. If you already imported the whole Eigen namespace: + * \code using namespace Eigen; \endcode + * then you are already all set. Otherwise, if you don't want/cannot import + * the whole Eigen namespace, the following line: + * \code using namespace Eigen::indexing; \endcode + * is equivalent to: + * \code + using Eigen::all; + using Eigen::seq; + using Eigen::seqN; + using Eigen::lastN; // c++11 only + using Eigen::last; + using Eigen::lastp1; + using Eigen::fix; + \endcode + */ +namespace indexing { + using Eigen::all; + using Eigen::seq; + using Eigen::seqN; + #if EIGEN_HAS_CXX11 + using Eigen::lastN; + #endif + using Eigen::last; + using Eigen::lastp1; + using Eigen::fix; +} + +} // end namespace Eigen + +#endif // EIGEN_ARITHMETIC_SEQUENCE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Array.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Array.h new file mode 100644 index 000000000..20c789b10 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Array.h @@ -0,0 +1,417 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARRAY_H +#define EIGEN_ARRAY_H + +namespace Eigen { + +namespace internal { +template +struct traits > : traits > +{ + typedef ArrayXpr XprKind; + typedef ArrayBase > XprBase; +}; +} + +/** \class Array + * \ingroup Core_Module + * + * \brief General-purpose arrays with easy API for coefficient-wise operations + * + * The %Array class is very similar to the Matrix class. It provides + * general-purpose one- and two-dimensional arrays. The difference between the + * %Array and the %Matrix class is primarily in the API: the API for the + * %Array class provides easy access to coefficient-wise operations, while the + * API for the %Matrix class provides easy access to linear-algebra + * operations. + * + * See documentation of class Matrix for detailed information on the template parameters + * storage layout. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN. + * + * \sa \blank \ref TutorialArrayClass, \ref TopicClassHierarchy + */ +template +class Array + : public PlainObjectBase > +{ + public: + + typedef PlainObjectBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Array) + + enum { Options = _Options }; + typedef typename Base::PlainObject PlainObject; + + protected: + template + friend struct internal::conservative_resize_like_impl; + + using Base::m_storage; + + public: + + using Base::base; + using Base::coeff; + using Base::coeffRef; + + /** + * The usage of + * using Base::operator=; + * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped + * the usage of 'using'. This should be done only for operator=. + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array& operator=(const EigenBase &other) + { + return Base::operator=(other); + } + + /** Set all the entries to \a value. + * \sa DenseBase::setConstant(), DenseBase::fill() + */ + /* This overload is needed because the usage of + * using Base::operator=; + * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped + * the usage of 'using'. This should be done only for operator=. + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array& operator=(const Scalar &value) + { + Base::setConstant(value); + return *this; + } + + /** Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array& operator=(const DenseBase& other) + { + return Base::_set(other); + } + + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array& operator=(const Array& other) + { + return Base::_set(other); + } + + /** Default constructor. + * + * For fixed-size matrices, does nothing. + * + * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix + * is called a null matrix. This constructor is the unique way to create null matrices: resizing + * a matrix to 0 is not supported. + * + * \sa resize(Index,Index) + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array() : Base() + { + Base::_check_template_params(); + EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + // FIXME is it still needed ?? + /** \internal */ + EIGEN_DEVICE_FUNC + Array(internal::constructor_without_unaligned_array_assert) + : Base(internal::constructor_without_unaligned_array_assert()) + { + Base::_check_template_params(); + EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + } +#endif + +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC + Array(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible::value) + : Base(std::move(other)) + { + Base::_check_template_params(); + } + EIGEN_DEVICE_FUNC + Array& operator=(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable::value) + { + Base::operator=(std::move(other)); + return *this; + } +#endif + + #if EIGEN_HAS_CXX11 + /** \copydoc PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + * + * Example: \include Array_variadic_ctor_cxx11.cpp + * Output: \verbinclude Array_variadic_ctor_cxx11.out + * + * \sa Array(const std::initializer_list>&) + * \sa Array(const Scalar&), Array(const Scalar&,const Scalar&) + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + : Base(a0, a1, a2, a3, args...) {} + + /** \brief Constructs an array and initializes it from the coefficients given as initializer-lists grouped by row. \cpp11 + * + * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients: + * + * Example: \include Array_initializer_list_23_cxx11.cpp + * Output: \verbinclude Array_initializer_list_23_cxx11.out + * + * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is triggered. + * + * In the case of a compile-time column 1D array, implicit transposition from a single row is allowed. + * Therefore Array{{1,2,3,4,5}} is legal and the more verbose syntax + * Array{{1},{2},{3},{4},{5}} can be avoided: + * + * Example: \include Array_initializer_list_vector_cxx11.cpp + * Output: \verbinclude Array_initializer_list_vector_cxx11.out + * + * In the case of fixed-sized arrays, the initializer list sizes must exactly match the array sizes, + * and implicit transposition is allowed for compile-time 1D arrays only. + * + * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array(const std::initializer_list>& list) : Base(list) {} + #endif // end EIGEN_HAS_CXX11 + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE explicit Array(const T& x) + { + Base::_check_template_params(); + Base::template _init1(x); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1) + { + Base::_check_template_params(); + this->template _init2(val0, val1); + } + + #else + /** \brief Constructs a fixed-sized array initialized with coefficients starting at \a data */ + EIGEN_DEVICE_FUNC explicit Array(const Scalar *data); + /** Constructs a vector or row-vector with given dimension. \only_for_vectors + * + * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, + * it is redundant to pass the dimension here, so it makes more sense to use the default + * constructor Array() instead. + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE explicit Array(Index dim); + /** constructs an initialized 1x1 Array with the given coefficient + * \sa const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args */ + Array(const Scalar& value); + /** constructs an uninitialized array with \a rows rows and \a cols columns. + * + * This is useful for dynamic-size arrays. For fixed-size arrays, + * it is redundant to pass these parameters, so one should use the default constructor + * Array() instead. */ + Array(Index rows, Index cols); + /** constructs an initialized 2D vector with given coefficients + * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) */ + Array(const Scalar& val0, const Scalar& val1); + #endif // end EIGEN_PARSED_BY_DOXYGEN + + /** constructs an initialized 3D vector with given coefficients + * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2) + { + Base::_check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3) + m_storage.data()[0] = val0; + m_storage.data()[1] = val1; + m_storage.data()[2] = val2; + } + /** constructs an initialized 4D vector with given coefficients + * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3) + { + Base::_check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4) + m_storage.data()[0] = val0; + m_storage.data()[1] = val1; + m_storage.data()[2] = val2; + m_storage.data()[3] = val3; + } + + /** Copy constructor */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array(const Array& other) + : Base(other) + { } + + private: + struct PrivateType {}; + public: + + /** \sa MatrixBase::operator=(const EigenBase&) */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Array(const EigenBase &other, + typename internal::enable_if::value, + PrivateType>::type = PrivateType()) + : Base(other.derived()) + { } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index innerStride() const EIGEN_NOEXCEPT{ return 1; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index outerStride() const EIGEN_NOEXCEPT { return this->innerSize(); } + + #ifdef EIGEN_ARRAY_PLUGIN + #include EIGEN_ARRAY_PLUGIN + #endif + + private: + + template + friend struct internal::matrix_swap_impl; +}; + +/** \defgroup arraytypedefs Global array typedefs + * \ingroup Core_Module + * + * %Eigen defines several typedef shortcuts for most common 1D and 2D array types. + * + * The general patterns are the following: + * + * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, + * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd + * for complex double. + * + * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of floats. + * + * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is + * a fixed-size 1D array of 4 complex floats. + * + * With \cpp11, template alias are also defined for common sizes. + * They follow the same pattern as above except that the scalar type suffix is replaced by a + * template parameter, i.e.: + * - `ArrayRowsCols` where `Rows` and `Cols` can be \c 2,\c 3,\c 4, or \c X for fixed or dynamic size. + * - `ArraySize` where `Size` can be \c 2,\c 3,\c 4 or \c X for fixed or dynamic size 1D arrays. + * + * \sa class Array + */ + +#define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ +/** \ingroup arraytypedefs */ \ +typedef Array Array##SizeSuffix##SizeSuffix##TypeSuffix; \ +/** \ingroup arraytypedefs */ \ +typedef Array Array##SizeSuffix##TypeSuffix; + +#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ +/** \ingroup arraytypedefs */ \ +typedef Array Array##Size##X##TypeSuffix; \ +/** \ingroup arraytypedefs */ \ +typedef Array Array##X##Size##TypeSuffix; + +#define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ +EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \ +EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \ +EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \ +EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4) + +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int, i) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float, f) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double, d) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex, cf) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex, cd) + +#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES +#undef EIGEN_MAKE_ARRAY_TYPEDEFS +#undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS + +#if EIGEN_HAS_CXX11 + +#define EIGEN_MAKE_ARRAY_TYPEDEFS(Size, SizeSuffix) \ +/** \ingroup arraytypedefs */ \ +/** \brief \cpp11 */ \ +template \ +using Array##SizeSuffix##SizeSuffix = Array; \ +/** \ingroup arraytypedefs */ \ +/** \brief \cpp11 */ \ +template \ +using Array##SizeSuffix = Array; + +#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Size) \ +/** \ingroup arraytypedefs */ \ +/** \brief \cpp11 */ \ +template \ +using Array##Size##X = Array; \ +/** \ingroup arraytypedefs */ \ +/** \brief \cpp11 */ \ +template \ +using Array##X##Size = Array; + +EIGEN_MAKE_ARRAY_TYPEDEFS(2, 2) +EIGEN_MAKE_ARRAY_TYPEDEFS(3, 3) +EIGEN_MAKE_ARRAY_TYPEDEFS(4, 4) +EIGEN_MAKE_ARRAY_TYPEDEFS(Dynamic, X) +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(2) +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(3) +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(4) + +#undef EIGEN_MAKE_ARRAY_TYPEDEFS +#undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS + +#endif // EIGEN_HAS_CXX11 + +#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ +using Eigen::Matrix##SizeSuffix##TypeSuffix; \ +using Eigen::Vector##SizeSuffix##TypeSuffix; \ +using Eigen::RowVector##SizeSuffix##TypeSuffix; + +#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \ + +#define EIGEN_USING_ARRAY_TYPEDEFS \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd) + +} // end namespace Eigen + +#endif // EIGEN_ARRAY_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ArrayBase.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ArrayBase.h new file mode 100644 index 000000000..ea3dd1c3b --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ArrayBase.h @@ -0,0 +1,226 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARRAYBASE_H +#define EIGEN_ARRAYBASE_H + +namespace Eigen { + +template class MatrixWrapper; + +/** \class ArrayBase + * \ingroup Core_Module + * + * \brief Base class for all 1D and 2D array, and related expressions + * + * An array is similar to a dense vector or matrix. While matrices are mathematical + * objects with well defined linear algebra operators, an array is just a collection + * of scalar values arranged in a one or two dimensionnal fashion. As the main consequence, + * all operations applied to an array are performed coefficient wise. Furthermore, + * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient + * constructors allowing to easily write generic code working for both scalar values + * and arrays. + * + * This class is the base that is inherited by all array expression types. + * + * \tparam Derived is the derived type, e.g., an array or an expression type. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN. + * + * \sa class MatrixBase, \ref TopicClassHierarchy + */ +template class ArrayBase + : public DenseBase +{ + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** The base class for a given storage type. */ + typedef ArrayBase StorageBaseType; + + typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + + typedef DenseBase Base; + using Base::RowsAtCompileTime; + using Base::ColsAtCompileTime; + using Base::SizeAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::IsVectorAtCompileTime; + using Base::Flags; + + using Base::derived; + using Base::const_cast_derived; + using Base::rows; + using Base::cols; + using Base::size; + using Base::coeff; + using Base::coeffRef; + using Base::lazyAssign; + using Base::operator-; + using Base::operator=; + using Base::operator+=; + using Base::operator-=; + using Base::operator*=; + using Base::operator/=; + + typedef typename Base::CoeffReturnType CoeffReturnType; + +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Base::PlainObject PlainObject; + + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp,PlainObject> ConstantReturnType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase +#define EIGEN_DOC_UNARY_ADDONS(X,Y) +# include "../plugins/MatrixCwiseUnaryOps.h" +# include "../plugins/ArrayCwiseUnaryOps.h" +# include "../plugins/CommonCwiseBinaryOps.h" +# include "../plugins/MatrixCwiseBinaryOps.h" +# include "../plugins/ArrayCwiseBinaryOps.h" +# ifdef EIGEN_ARRAYBASE_PLUGIN +# include EIGEN_ARRAYBASE_PLUGIN +# endif +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS +#undef EIGEN_DOC_UNARY_ADDONS + + /** Special case of the template operator=, in order to prevent the compiler + * from generating a default operator= (issue hit with g++ 4.1) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator=(const ArrayBase& other) + { + internal::call_assignment(derived(), other.derived()); + return derived(); + } + + /** Set all the entries to \a value. + * \sa DenseBase::setConstant(), DenseBase::fill() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator=(const Scalar &value) + { Base::setConstant(value); return derived(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator+=(const Scalar& scalar); + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator-=(const Scalar& scalar); + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator+=(const ArrayBase& other); + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator-=(const ArrayBase& other); + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator*=(const ArrayBase& other); + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator/=(const ArrayBase& other); + + public: + EIGEN_DEVICE_FUNC + ArrayBase& array() { return *this; } + EIGEN_DEVICE_FUNC + const ArrayBase& array() const { return *this; } + + /** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array + * \sa MatrixBase::array() */ + EIGEN_DEVICE_FUNC + MatrixWrapper matrix() { return MatrixWrapper(derived()); } + EIGEN_DEVICE_FUNC + const MatrixWrapper matrix() const { return MatrixWrapper(derived()); } + +// template +// inline void evalTo(Dest& dst) const { dst = matrix(); } + + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(ArrayBase) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(ArrayBase) + + private: + explicit ArrayBase(Index); + ArrayBase(Index,Index); + template explicit ArrayBase(const ArrayBase&); + protected: + // mixing arrays and matrices is not legal + template Derived& operator+=(const MatrixBase& ) + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} + // mixing arrays and matrices is not legal + template Derived& operator-=(const MatrixBase& ) + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} +}; + +/** replaces \c *this by \c *this - \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & +ArrayBase::operator-=(const ArrayBase &other) +{ + call_assignment(derived(), other.derived(), internal::sub_assign_op()); + return derived(); +} + +/** replaces \c *this by \c *this + \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & +ArrayBase::operator+=(const ArrayBase& other) +{ + call_assignment(derived(), other.derived(), internal::add_assign_op()); + return derived(); +} + +/** replaces \c *this by \c *this * \a other coefficient wise. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & +ArrayBase::operator*=(const ArrayBase& other) +{ + call_assignment(derived(), other.derived(), internal::mul_assign_op()); + return derived(); +} + +/** replaces \c *this by \c *this / \a other coefficient wise. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & +ArrayBase::operator/=(const ArrayBase& other) +{ + call_assignment(derived(), other.derived(), internal::div_assign_op()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_ARRAYBASE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ArrayWrapper.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ArrayWrapper.h new file mode 100644 index 000000000..2e9555b53 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ArrayWrapper.h @@ -0,0 +1,209 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARRAYWRAPPER_H +#define EIGEN_ARRAYWRAPPER_H + +namespace Eigen { + +/** \class ArrayWrapper + * \ingroup Core_Module + * + * \brief Expression of a mathematical vector or matrix as an array object + * + * This class is the return type of MatrixBase::array(), and most of the time + * this is the only way it is use. + * + * \sa MatrixBase::array(), class MatrixWrapper + */ + +namespace internal { +template +struct traits > + : public traits::type > +{ + typedef ArrayXpr XprKind; + // Let's remove NestByRefBit + enum { + Flags0 = traits::type >::Flags, + LvalueBitFlag = is_lvalue::value ? LvalueBit : 0, + Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag + }; +}; +} + +template +class ArrayWrapper : public ArrayBase > +{ + public: + typedef ArrayBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper) + typedef typename internal::remove_all::type NestedExpression; + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + typedef typename internal::ref_selector::non_const_type NestedExpressionType; + + using Base::coeffRef; + + EIGEN_DEVICE_FUNC + explicit EIGEN_STRONG_INLINE ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index outerStride() const EIGEN_NOEXCEPT { return m_expression.outerStride(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index innerStride() const EIGEN_NOEXCEPT { return m_expression.innerStride(); } + + EIGEN_DEVICE_FUNC + inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } + EIGEN_DEVICE_FUNC + inline const Scalar* data() const { return m_expression.data(); } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index rowId, Index colId) const + { + return m_expression.coeffRef(rowId, colId); + } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index index) const + { + return m_expression.coeffRef(index); + } + + template + EIGEN_DEVICE_FUNC + inline void evalTo(Dest& dst) const { dst = m_expression; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const + { + return m_expression; + } + + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index) */ + EIGEN_DEVICE_FUNC + void resize(Index newSize) { m_expression.resize(newSize); } + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index,Index)*/ + EIGEN_DEVICE_FUNC + void resize(Index rows, Index cols) { m_expression.resize(rows,cols); } + + protected: + NestedExpressionType m_expression; +}; + +/** \class MatrixWrapper + * \ingroup Core_Module + * + * \brief Expression of an array as a mathematical vector or matrix + * + * This class is the return type of ArrayBase::matrix(), and most of the time + * this is the only way it is use. + * + * \sa MatrixBase::matrix(), class ArrayWrapper + */ + +namespace internal { +template +struct traits > + : public traits::type > +{ + typedef MatrixXpr XprKind; + // Let's remove NestByRefBit + enum { + Flags0 = traits::type >::Flags, + LvalueBitFlag = is_lvalue::value ? LvalueBit : 0, + Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag + }; +}; +} + +template +class MatrixWrapper : public MatrixBase > +{ + public: + typedef MatrixBase > Base; + EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper) + typedef typename internal::remove_all::type NestedExpression; + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + typedef typename internal::ref_selector::non_const_type NestedExpressionType; + + using Base::coeffRef; + + EIGEN_DEVICE_FUNC + explicit inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index outerStride() const EIGEN_NOEXCEPT { return m_expression.outerStride(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index innerStride() const EIGEN_NOEXCEPT { return m_expression.innerStride(); } + + EIGEN_DEVICE_FUNC + inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } + EIGEN_DEVICE_FUNC + inline const Scalar* data() const { return m_expression.data(); } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index rowId, Index colId) const + { + return m_expression.derived().coeffRef(rowId, colId); + } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index index) const + { + return m_expression.coeffRef(index); + } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const + { + return m_expression; + } + + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index) */ + EIGEN_DEVICE_FUNC + void resize(Index newSize) { m_expression.resize(newSize); } + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index,Index)*/ + EIGEN_DEVICE_FUNC + void resize(Index rows, Index cols) { m_expression.resize(rows,cols); } + + protected: + NestedExpressionType m_expression; +}; + +} // end namespace Eigen + +#endif // EIGEN_ARRAYWRAPPER_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Assign.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Assign.h new file mode 100644 index 000000000..655412efd --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Assign.h @@ -0,0 +1,90 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Michael Olbrich +// Copyright (C) 2006-2010 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ASSIGN_H +#define EIGEN_ASSIGN_H + +namespace Eigen { + +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase + ::lazyAssign(const DenseBase& other) +{ + enum{ + SameType = internal::is_same::value + }; + + EIGEN_STATIC_ASSERT_LVALUE(Derived) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) + EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + eigen_assert(rows() == other.rows() && cols() == other.cols()); + internal::call_assignment_no_alias(derived(),other.derived()); + + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) +{ + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) +{ + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const MatrixBase& other) +{ + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const DenseBase& other) +{ + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const EigenBase& other) +{ + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const ReturnByValue& other) +{ + other.derived().evalTo(derived()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_ASSIGN_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/AssignEvaluator.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/AssignEvaluator.h new file mode 100644 index 000000000..7d76f0c25 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/AssignEvaluator.h @@ -0,0 +1,1010 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// Copyright (C) 2011-2014 Gael Guennebaud +// Copyright (C) 2011-2012 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ASSIGN_EVALUATOR_H +#define EIGEN_ASSIGN_EVALUATOR_H + +namespace Eigen { + +// This implementation is based on Assign.h + +namespace internal { + +/*************************************************************************** +* Part 1 : the logic deciding a strategy for traversal and unrolling * +***************************************************************************/ + +// copy_using_evaluator_traits is based on assign_traits + +template +struct copy_using_evaluator_traits +{ + typedef typename DstEvaluator::XprType Dst; + typedef typename Dst::Scalar DstScalar; + + enum { + DstFlags = DstEvaluator::Flags, + SrcFlags = SrcEvaluator::Flags + }; + +public: + enum { + DstAlignment = DstEvaluator::Alignment, + SrcAlignment = SrcEvaluator::Alignment, + DstHasDirectAccess = (DstFlags & DirectAccessBit) == DirectAccessBit, + JointAlignment = EIGEN_PLAIN_ENUM_MIN(DstAlignment,SrcAlignment) + }; + +private: + enum { + InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime) + : int(DstFlags)&RowMajorBit ? int(Dst::ColsAtCompileTime) + : int(Dst::RowsAtCompileTime), + InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime) + : int(DstFlags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime) + : int(Dst::MaxRowsAtCompileTime), + RestrictedInnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(InnerSize,MaxPacketSize), + RestrictedLinearSize = EIGEN_SIZE_MIN_PREFER_FIXED(Dst::SizeAtCompileTime,MaxPacketSize), + OuterStride = int(outer_stride_at_compile_time::ret), + MaxSizeAtCompileTime = Dst::SizeAtCompileTime + }; + + // TODO distinguish between linear traversal and inner-traversals + typedef typename find_best_packet::type LinearPacketType; + typedef typename find_best_packet::type InnerPacketType; + + enum { + LinearPacketSize = unpacket_traits::size, + InnerPacketSize = unpacket_traits::size + }; + +public: + enum { + LinearRequiredAlignment = unpacket_traits::alignment, + InnerRequiredAlignment = unpacket_traits::alignment + }; + +private: + enum { + DstIsRowMajor = DstFlags&RowMajorBit, + SrcIsRowMajor = SrcFlags&RowMajorBit, + StorageOrdersAgree = (int(DstIsRowMajor) == int(SrcIsRowMajor)), + MightVectorize = bool(StorageOrdersAgree) + && (int(DstFlags) & int(SrcFlags) & ActualPacketAccessBit) + && bool(functor_traits::PacketAccess), + MayInnerVectorize = MightVectorize + && int(InnerSize)!=Dynamic && int(InnerSize)%int(InnerPacketSize)==0 + && int(OuterStride)!=Dynamic && int(OuterStride)%int(InnerPacketSize)==0 + && (EIGEN_UNALIGNED_VECTORIZE || int(JointAlignment)>=int(InnerRequiredAlignment)), + MayLinearize = bool(StorageOrdersAgree) && (int(DstFlags) & int(SrcFlags) & LinearAccessBit), + MayLinearVectorize = bool(MightVectorize) && bool(MayLinearize) && bool(DstHasDirectAccess) + && (EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment)>=int(LinearRequiredAlignment)) || MaxSizeAtCompileTime == Dynamic), + /* If the destination isn't aligned, we have to do runtime checks and we don't unroll, + so it's only good for large enough sizes. */ + MaySliceVectorize = bool(MightVectorize) && bool(DstHasDirectAccess) + && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=(EIGEN_UNALIGNED_VECTORIZE?InnerPacketSize:(3*InnerPacketSize))) + /* slice vectorization can be slow, so we only want it if the slices are big, which is + indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block + in a fixed-size matrix + However, with EIGEN_UNALIGNED_VECTORIZE and unrolling, slice vectorization is still worth it */ + }; + +public: + enum { + Traversal = int(Dst::SizeAtCompileTime) == 0 ? int(AllAtOnceTraversal) // If compile-size is zero, traversing will fail at compile-time. + : (int(MayLinearVectorize) && (LinearPacketSize>InnerPacketSize)) ? int(LinearVectorizedTraversal) + : int(MayInnerVectorize) ? int(InnerVectorizedTraversal) + : int(MayLinearVectorize) ? int(LinearVectorizedTraversal) + : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) + : int(MayLinearize) ? int(LinearTraversal) + : int(DefaultTraversal), + Vectorized = int(Traversal) == InnerVectorizedTraversal + || int(Traversal) == LinearVectorizedTraversal + || int(Traversal) == SliceVectorizedTraversal + }; + + typedef typename conditional::type PacketType; + +private: + enum { + ActualPacketSize = int(Traversal)==LinearVectorizedTraversal ? LinearPacketSize + : Vectorized ? InnerPacketSize + : 1, + UnrollingLimit = EIGEN_UNROLLING_LIMIT * ActualPacketSize, + MayUnrollCompletely = int(Dst::SizeAtCompileTime) != Dynamic + && int(Dst::SizeAtCompileTime) * (int(DstEvaluator::CoeffReadCost)+int(SrcEvaluator::CoeffReadCost)) <= int(UnrollingLimit), + MayUnrollInner = int(InnerSize) != Dynamic + && int(InnerSize) * (int(DstEvaluator::CoeffReadCost)+int(SrcEvaluator::CoeffReadCost)) <= int(UnrollingLimit) + }; + +public: + enum { + Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal)) + ? ( + int(MayUnrollCompletely) ? int(CompleteUnrolling) + : int(MayUnrollInner) ? int(InnerUnrolling) + : int(NoUnrolling) + ) + : int(Traversal) == int(LinearVectorizedTraversal) + ? ( bool(MayUnrollCompletely) && ( EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment)>=int(LinearRequiredAlignment))) + ? int(CompleteUnrolling) + : int(NoUnrolling) ) + : int(Traversal) == int(LinearTraversal) + ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) + : int(NoUnrolling) ) +#if EIGEN_UNALIGNED_VECTORIZE + : int(Traversal) == int(SliceVectorizedTraversal) + ? ( bool(MayUnrollInner) ? int(InnerUnrolling) + : int(NoUnrolling) ) +#endif + : int(NoUnrolling) + }; + +#ifdef EIGEN_DEBUG_ASSIGN + static void debug() + { + std::cerr << "DstXpr: " << typeid(typename DstEvaluator::XprType).name() << std::endl; + std::cerr << "SrcXpr: " << typeid(typename SrcEvaluator::XprType).name() << std::endl; + std::cerr.setf(std::ios::hex, std::ios::basefield); + std::cerr << "DstFlags" << " = " << DstFlags << " (" << demangle_flags(DstFlags) << " )" << std::endl; + std::cerr << "SrcFlags" << " = " << SrcFlags << " (" << demangle_flags(SrcFlags) << " )" << std::endl; + std::cerr.unsetf(std::ios::hex); + EIGEN_DEBUG_VAR(DstAlignment) + EIGEN_DEBUG_VAR(SrcAlignment) + EIGEN_DEBUG_VAR(LinearRequiredAlignment) + EIGEN_DEBUG_VAR(InnerRequiredAlignment) + EIGEN_DEBUG_VAR(JointAlignment) + EIGEN_DEBUG_VAR(InnerSize) + EIGEN_DEBUG_VAR(InnerMaxSize) + EIGEN_DEBUG_VAR(LinearPacketSize) + EIGEN_DEBUG_VAR(InnerPacketSize) + EIGEN_DEBUG_VAR(ActualPacketSize) + EIGEN_DEBUG_VAR(StorageOrdersAgree) + EIGEN_DEBUG_VAR(MightVectorize) + EIGEN_DEBUG_VAR(MayLinearize) + EIGEN_DEBUG_VAR(MayInnerVectorize) + EIGEN_DEBUG_VAR(MayLinearVectorize) + EIGEN_DEBUG_VAR(MaySliceVectorize) + std::cerr << "Traversal" << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl; + EIGEN_DEBUG_VAR(SrcEvaluator::CoeffReadCost) + EIGEN_DEBUG_VAR(DstEvaluator::CoeffReadCost) + EIGEN_DEBUG_VAR(Dst::SizeAtCompileTime) + EIGEN_DEBUG_VAR(UnrollingLimit) + EIGEN_DEBUG_VAR(MayUnrollCompletely) + EIGEN_DEBUG_VAR(MayUnrollInner) + std::cerr << "Unrolling" << " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl; + std::cerr << std::endl; + } +#endif +}; + +/*************************************************************************** +* Part 2 : meta-unrollers +***************************************************************************/ + +/************************ +*** Default traversal *** +************************/ + +template +struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling +{ + // FIXME: this is not very clean, perhaps this information should be provided by the kernel? + typedef typename Kernel::DstEvaluatorType DstEvaluatorType; + typedef typename DstEvaluatorType::XprType DstXprType; + + enum { + outer = Index / DstXprType::InnerSizeAtCompileTime, + inner = Index % DstXprType::InnerSizeAtCompileTime + }; + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + kernel.assignCoeffByOuterInner(outer, inner); + copy_using_evaluator_DefaultTraversal_CompleteUnrolling::run(kernel); + } +}; + +template +struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } +}; + +template +struct copy_using_evaluator_DefaultTraversal_InnerUnrolling +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer) + { + kernel.assignCoeffByOuterInner(outer, Index_); + copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); + } +}; + +template +struct copy_using_evaluator_DefaultTraversal_InnerUnrolling +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index) { } +}; + +/*********************** +*** Linear traversal *** +***********************/ + +template +struct copy_using_evaluator_LinearTraversal_CompleteUnrolling +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) + { + kernel.assignCoeff(Index); + copy_using_evaluator_LinearTraversal_CompleteUnrolling::run(kernel); + } +}; + +template +struct copy_using_evaluator_LinearTraversal_CompleteUnrolling +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } +}; + +/************************** +*** Inner vectorization *** +**************************/ + +template +struct copy_using_evaluator_innervec_CompleteUnrolling +{ + // FIXME: this is not very clean, perhaps this information should be provided by the kernel? + typedef typename Kernel::DstEvaluatorType DstEvaluatorType; + typedef typename DstEvaluatorType::XprType DstXprType; + typedef typename Kernel::PacketType PacketType; + + enum { + outer = Index / DstXprType::InnerSizeAtCompileTime, + inner = Index % DstXprType::InnerSizeAtCompileTime, + SrcAlignment = Kernel::AssignmentTraits::SrcAlignment, + DstAlignment = Kernel::AssignmentTraits::DstAlignment + }; + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + kernel.template assignPacketByOuterInner(outer, inner); + enum { NextIndex = Index + unpacket_traits::size }; + copy_using_evaluator_innervec_CompleteUnrolling::run(kernel); + } +}; + +template +struct copy_using_evaluator_innervec_CompleteUnrolling +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } +}; + +template +struct copy_using_evaluator_innervec_InnerUnrolling +{ + typedef typename Kernel::PacketType PacketType; + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer) + { + kernel.template assignPacketByOuterInner(outer, Index_); + enum { NextIndex = Index_ + unpacket_traits::size }; + copy_using_evaluator_innervec_InnerUnrolling::run(kernel, outer); + } +}; + +template +struct copy_using_evaluator_innervec_InnerUnrolling +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, Index) { } +}; + +/*************************************************************************** +* Part 3 : implementation of all cases +***************************************************************************/ + +// dense_assignment_loop is based on assign_impl + +template +struct dense_assignment_loop; + +/************************ +***** Special Cases ***** +************************/ + +// Zero-sized assignment is a no-op. +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static void EIGEN_STRONG_INLINE run(Kernel& /*kernel*/) + { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + EIGEN_STATIC_ASSERT(int(DstXprType::SizeAtCompileTime) == 0, + EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT) + } +}; + +/************************ +*** Default traversal *** +************************/ + +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static void EIGEN_STRONG_INLINE run(Kernel &kernel) + { + for(Index outer = 0; outer < kernel.outerSize(); ++outer) { + for(Index inner = 0; inner < kernel.innerSize(); ++inner) { + kernel.assignCoeffByOuterInner(outer, inner); + } + } + } +}; + +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + copy_using_evaluator_DefaultTraversal_CompleteUnrolling::run(kernel); + } +}; + +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + + const Index outerSize = kernel.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) + copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); + } +}; + +/*************************** +*** Linear vectorization *** +***************************/ + + +// The goal of unaligned_dense_assignment_loop is simply to factorize the handling +// of the non vectorizable beginning and ending parts + +template +struct unaligned_dense_assignment_loop +{ + // if IsAligned = true, then do nothing + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index, Index) {} +}; + +template <> +struct unaligned_dense_assignment_loop +{ + // MSVC must not inline this functions. If it does, it fails to optimize the + // packet access path. + // FIXME check which version exhibits this issue +#if EIGEN_COMP_MSVC + template + static EIGEN_DONT_INLINE void run(Kernel &kernel, + Index start, + Index end) +#else + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, + Index start, + Index end) +#endif + { + for (Index index = start; index < end; ++index) + kernel.assignCoeff(index); + } +}; + +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + const Index size = kernel.size(); + typedef typename Kernel::Scalar Scalar; + typedef typename Kernel::PacketType PacketType; + enum { + requestedAlignment = Kernel::AssignmentTraits::LinearRequiredAlignment, + packetSize = unpacket_traits::size, + dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment)>=int(requestedAlignment), + dstAlignment = packet_traits::AlignedOnScalar ? int(requestedAlignment) + : int(Kernel::AssignmentTraits::DstAlignment), + srcAlignment = Kernel::AssignmentTraits::JointAlignment + }; + const Index alignedStart = dstIsAligned ? 0 : internal::first_aligned(kernel.dstDataPtr(), size); + const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; + + unaligned_dense_assignment_loop::run(kernel, 0, alignedStart); + + for(Index index = alignedStart; index < alignedEnd; index += packetSize) + kernel.template assignPacket(index); + + unaligned_dense_assignment_loop<>::run(kernel, alignedEnd, size); + } +}; + +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + typedef typename Kernel::PacketType PacketType; + + enum { size = DstXprType::SizeAtCompileTime, + packetSize =unpacket_traits::size, + alignedSize = (int(size)/packetSize)*packetSize }; + + copy_using_evaluator_innervec_CompleteUnrolling::run(kernel); + copy_using_evaluator_DefaultTraversal_CompleteUnrolling::run(kernel); + } +}; + +/************************** +*** Inner vectorization *** +**************************/ + +template +struct dense_assignment_loop +{ + typedef typename Kernel::PacketType PacketType; + enum { + SrcAlignment = Kernel::AssignmentTraits::SrcAlignment, + DstAlignment = Kernel::AssignmentTraits::DstAlignment + }; + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + const Index innerSize = kernel.innerSize(); + const Index outerSize = kernel.outerSize(); + const Index packetSize = unpacket_traits::size; + for(Index outer = 0; outer < outerSize; ++outer) + for(Index inner = 0; inner < innerSize; inner+=packetSize) + kernel.template assignPacketByOuterInner(outer, inner); + } +}; + +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + copy_using_evaluator_innervec_CompleteUnrolling::run(kernel); + } +}; + +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + typedef typename Kernel::AssignmentTraits Traits; + const Index outerSize = kernel.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) + copy_using_evaluator_innervec_InnerUnrolling::run(kernel, outer); + } +}; + +/*********************** +*** Linear traversal *** +***********************/ + +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + const Index size = kernel.size(); + for(Index i = 0; i < size; ++i) + kernel.assignCoeff(i); + } +}; + +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + copy_using_evaluator_LinearTraversal_CompleteUnrolling::run(kernel); + } +}; + +/************************** +*** Slice vectorization *** +***************************/ + +template +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + typedef typename Kernel::Scalar Scalar; + typedef typename Kernel::PacketType PacketType; + enum { + packetSize = unpacket_traits::size, + requestedAlignment = int(Kernel::AssignmentTraits::InnerRequiredAlignment), + alignable = packet_traits::AlignedOnScalar || int(Kernel::AssignmentTraits::DstAlignment)>=sizeof(Scalar), + dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment)>=int(requestedAlignment), + dstAlignment = alignable ? int(requestedAlignment) + : int(Kernel::AssignmentTraits::DstAlignment) + }; + const Scalar *dst_ptr = kernel.dstDataPtr(); + if((!bool(dstIsAligned)) && (UIntPtr(dst_ptr) % sizeof(Scalar))>0) + { + // the pointer is not aligned-on scalar, so alignment is not possible + return dense_assignment_loop::run(kernel); + } + const Index packetAlignedMask = packetSize - 1; + const Index innerSize = kernel.innerSize(); + const Index outerSize = kernel.outerSize(); + const Index alignedStep = alignable ? (packetSize - kernel.outerStride() % packetSize) & packetAlignedMask : 0; + Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize); + + for(Index outer = 0; outer < outerSize; ++outer) + { + const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); + // do the non-vectorizable part of the assignment + for(Index inner = 0; inner(outer, inner); + + // do the non-vectorizable part of the assignment + for(Index inner = alignedEnd; inner +struct dense_assignment_loop +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) + { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + typedef typename Kernel::PacketType PacketType; + + enum { innerSize = DstXprType::InnerSizeAtCompileTime, + packetSize =unpacket_traits::size, + vectorizableSize = (int(innerSize) / int(packetSize)) * int(packetSize), + size = DstXprType::SizeAtCompileTime }; + + for(Index outer = 0; outer < kernel.outerSize(); ++outer) + { + copy_using_evaluator_innervec_InnerUnrolling::run(kernel, outer); + copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); + } + } +}; +#endif + + +/*************************************************************************** +* Part 4 : Generic dense assignment kernel +***************************************************************************/ + +// This class generalize the assignment of a coefficient (or packet) from one dense evaluator +// to another dense writable evaluator. +// It is parametrized by the two evaluators, and the actual assignment functor. +// This abstraction level permits to keep the evaluation loops as simple and as generic as possible. +// One can customize the assignment using this generic dense_assignment_kernel with different +// functors, or by completely overloading it, by-passing a functor. +template +class generic_dense_assignment_kernel +{ +protected: + typedef typename DstEvaluatorTypeT::XprType DstXprType; + typedef typename SrcEvaluatorTypeT::XprType SrcXprType; +public: + + typedef DstEvaluatorTypeT DstEvaluatorType; + typedef SrcEvaluatorTypeT SrcEvaluatorType; + typedef typename DstEvaluatorType::Scalar Scalar; + typedef copy_using_evaluator_traits AssignmentTraits; + typedef typename AssignmentTraits::PacketType PacketType; + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + generic_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr) + : m_dst(dst), m_src(src), m_functor(func), m_dstExpr(dstExpr) + { + #ifdef EIGEN_DEBUG_ASSIGN + AssignmentTraits::debug(); + #endif + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT { return m_dstExpr.size(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index innerSize() const EIGEN_NOEXCEPT { return m_dstExpr.innerSize(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerSize() const EIGEN_NOEXCEPT { return m_dstExpr.outerSize(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_dstExpr.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_dstExpr.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerStride() const EIGEN_NOEXCEPT { return m_dstExpr.outerStride(); } + + EIGEN_DEVICE_FUNC DstEvaluatorType& dstEvaluator() EIGEN_NOEXCEPT { return m_dst; } + EIGEN_DEVICE_FUNC const SrcEvaluatorType& srcEvaluator() const EIGEN_NOEXCEPT { return m_src; } + + /// Assign src(row,col) to dst(row,col) through the assignment functor. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index row, Index col) + { + m_functor.assignCoeff(m_dst.coeffRef(row,col), m_src.coeff(row,col)); + } + + /// \sa assignCoeff(Index,Index) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index index) + { + m_functor.assignCoeff(m_dst.coeffRef(index), m_src.coeff(index)); + } + + /// \sa assignCoeff(Index,Index) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeffByOuterInner(Index outer, Index inner) + { + Index row = rowIndexByOuterInner(outer, inner); + Index col = colIndexByOuterInner(outer, inner); + assignCoeff(row, col); + } + + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index row, Index col) + { + m_functor.template assignPacket(&m_dst.coeffRef(row,col), m_src.template packet(row,col)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index index) + { + m_functor.template assignPacket(&m_dst.coeffRef(index), m_src.template packet(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner) + { + Index row = rowIndexByOuterInner(outer, inner); + Index col = colIndexByOuterInner(outer, inner); + assignPacket(row, col); + } + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) + { + typedef typename DstEvaluatorType::ExpressionTraits Traits; + return int(Traits::RowsAtCompileTime) == 1 ? 0 + : int(Traits::ColsAtCompileTime) == 1 ? inner + : int(DstEvaluatorType::Flags)&RowMajorBit ? outer + : inner; + } + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) + { + typedef typename DstEvaluatorType::ExpressionTraits Traits; + return int(Traits::ColsAtCompileTime) == 1 ? 0 + : int(Traits::RowsAtCompileTime) == 1 ? inner + : int(DstEvaluatorType::Flags)&RowMajorBit ? inner + : outer; + } + + EIGEN_DEVICE_FUNC const Scalar* dstDataPtr() const + { + return m_dstExpr.data(); + } + +protected: + DstEvaluatorType& m_dst; + const SrcEvaluatorType& m_src; + const Functor &m_functor; + // TODO find a way to avoid the needs of the original expression + DstXprType& m_dstExpr; +}; + +// Special kernel used when computing small products whose operands have dynamic dimensions. It ensures that the +// PacketSize used is no larger than 4, thereby increasing the chance that vectorized instructions will be used +// when computing the product. + +template +class restricted_packet_dense_assignment_kernel : public generic_dense_assignment_kernel +{ +protected: + typedef generic_dense_assignment_kernel Base; + public: + typedef typename Base::Scalar Scalar; + typedef typename Base::DstXprType DstXprType; + typedef copy_using_evaluator_traits AssignmentTraits; + typedef typename AssignmentTraits::PacketType PacketType; + + EIGEN_DEVICE_FUNC restricted_packet_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr) + : Base(dst, src, func, dstExpr) + { + } + }; + +/*************************************************************************** +* Part 5 : Entry point for dense rectangular assignment +***************************************************************************/ + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void resize_if_allowed(DstXprType &dst, const SrcXprType& src, const Functor &/*func*/) +{ + EIGEN_ONLY_USED_FOR_DEBUG(dst); + EIGEN_ONLY_USED_FOR_DEBUG(src); + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void resize_if_allowed(DstXprType &dst, const SrcXprType& src, const internal::assign_op &/*func*/) +{ + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if(((dst.rows()!=dstRows) || (dst.cols()!=dstCols))) + dst.resize(dstRows, dstCols); + eigen_assert(dst.rows() == dstRows && dst.cols() == dstCols); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src, const Functor &func) +{ + typedef evaluator DstEvaluatorType; + typedef evaluator SrcEvaluatorType; + + SrcEvaluatorType srcEvaluator(src); + + // NOTE To properly handle A = (A*A.transpose())/s with A rectangular, + // we need to resize the destination after the source evaluator has been created. + resize_if_allowed(dst, src, func); + + DstEvaluatorType dstEvaluator(dst); + + typedef generic_dense_assignment_kernel Kernel; + Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); + + dense_assignment_loop::run(kernel); +} + +// Specialization for filling the destination with a constant value. +#ifndef EIGEN_GPU_COMPILE_PHASE +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const Eigen::CwiseNullaryOp, DstXprType>& src, const internal::assign_op& func) +{ + resize_if_allowed(dst, src, func); + std::fill_n(dst.data(), dst.size(), src.functor()()); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src) +{ + call_dense_assignment_loop(dst, src, internal::assign_op()); +} + +/*************************************************************************** +* Part 6 : Generic assignment +***************************************************************************/ + +// Based on the respective shapes of the destination and source, +// the class AssignmentKind determine the kind of assignment mechanism. +// AssignmentKind must define a Kind typedef. +template struct AssignmentKind; + +// Assignment kind defined in this file: +struct Dense2Dense {}; +struct EigenBase2EigenBase {}; + +template struct AssignmentKind { typedef EigenBase2EigenBase Kind; }; +template<> struct AssignmentKind { typedef Dense2Dense Kind; }; + +// This is the main assignment class +template< typename DstXprType, typename SrcXprType, typename Functor, + typename Kind = typename AssignmentKind< typename evaluator_traits::Shape , typename evaluator_traits::Shape >::Kind, + typename EnableIf = void> +struct Assignment; + + +// The only purpose of this call_assignment() function is to deal with noalias() / "assume-aliasing" and automatic transposition. +// Indeed, I (Gael) think that this concept of "assume-aliasing" was a mistake, and it makes thing quite complicated. +// So this intermediate function removes everything related to "assume-aliasing" such that Assignment +// does not has to bother about these annoying details. + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_assignment(Dst& dst, const Src& src) +{ + call_assignment(dst, src, internal::assign_op()); +} +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_assignment(const Dst& dst, const Src& src) +{ + call_assignment(dst, src, internal::assign_op()); +} + +// Deal with "assume-aliasing" +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if< evaluator_assume_aliasing::value, void*>::type = 0) +{ + typename plain_matrix_type::type tmp(src); + call_assignment_no_alias(dst, tmp, func); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if::value, void*>::type = 0) +{ + call_assignment_no_alias(dst, src, func); +} + +// by-pass "assume-aliasing" +// When there is no aliasing, we require that 'dst' has been properly resized +template class StorageBase, typename Src, typename Func> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_assignment(NoAlias& dst, const Src& src, const Func& func) +{ + call_assignment_no_alias(dst.expression(), src, func); +} + + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_assignment_no_alias(Dst& dst, const Src& src, const Func& func) +{ + enum { + NeedToTranspose = ( (int(Dst::RowsAtCompileTime) == 1 && int(Src::ColsAtCompileTime) == 1) + || (int(Dst::ColsAtCompileTime) == 1 && int(Src::RowsAtCompileTime) == 1) + ) && int(Dst::SizeAtCompileTime) != 1 + }; + + typedef typename internal::conditional, Dst>::type ActualDstTypeCleaned; + typedef typename internal::conditional, Dst&>::type ActualDstType; + ActualDstType actualDst(dst); + + // TODO check whether this is the right place to perform these checks: + EIGEN_STATIC_ASSERT_LVALUE(Dst) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned,Src) + EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar); + + Assignment::run(actualDst, src, func); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_restricted_packet_assignment_no_alias(Dst& dst, const Src& src, const Func& func) +{ + typedef evaluator DstEvaluatorType; + typedef evaluator SrcEvaluatorType; + typedef restricted_packet_dense_assignment_kernel Kernel; + + EIGEN_STATIC_ASSERT_LVALUE(Dst) + EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename Dst::Scalar,typename Src::Scalar); + + SrcEvaluatorType srcEvaluator(src); + resize_if_allowed(dst, src, func); + + DstEvaluatorType dstEvaluator(dst); + Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); + + dense_assignment_loop::run(kernel); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_assignment_no_alias(Dst& dst, const Src& src) +{ + call_assignment_no_alias(dst, src, internal::assign_op()); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src, const Func& func) +{ + // TODO check whether this is the right place to perform these checks: + EIGEN_STATIC_ASSERT_LVALUE(Dst) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst,Src) + EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename Dst::Scalar,typename Src::Scalar); + + Assignment::run(dst, src, func); +} +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src) +{ + call_assignment_no_alias_no_transpose(dst, src, internal::assign_op()); +} + +// forward declaration +template void check_for_aliasing(const Dst &dst, const Src &src); + +// Generic Dense to Dense assignment +// Note that the last template argument "Weak" is needed to make it possible to perform +// both partial specialization+SFINAE without ambiguous specialization +template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak> +struct Assignment +{ + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const Functor &func) + { +#ifndef EIGEN_NO_DEBUG + internal::check_for_aliasing(dst, src); +#endif + + call_dense_assignment_loop(dst, src, func); + } +}; + +// Generic assignment through evalTo. +// TODO: not sure we have to keep that one, but it helps porting current code to new evaluator mechanism. +// Note that the last template argument "Weak" is needed to make it possible to perform +// both partial specialization+SFINAE without ambiguous specialization +template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak> +struct Assignment +{ + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &/*func*/) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + src.evalTo(dst); + } + + // NOTE The following two functions are templated to avoid their instantiation if not needed + // This is needed because some expressions supports evalTo only and/or have 'void' as scalar type. + template + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op &/*func*/) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + src.addTo(dst); + } + + template + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op &/*func*/) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + src.subTo(dst); + } +}; + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ASSIGN_EVALUATOR_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Assign_MKL.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Assign_MKL.h new file mode 100644 index 000000000..c6140d185 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Assign_MKL.h @@ -0,0 +1,178 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + Copyright (C) 2015 Gael Guennebaud + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin() + ******************************************************************************** +*/ + +#ifndef EIGEN_ASSIGN_VML_H +#define EIGEN_ASSIGN_VML_H + +namespace Eigen { + +namespace internal { + +template +class vml_assign_traits +{ + private: + enum { + DstHasDirectAccess = Dst::Flags & DirectAccessBit, + SrcHasDirectAccess = Src::Flags & DirectAccessBit, + StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)), + InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime) + : int(Dst::Flags)&RowMajorBit ? int(Dst::ColsAtCompileTime) + : int(Dst::RowsAtCompileTime), + InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime) + : int(Dst::Flags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime) + : int(Dst::MaxRowsAtCompileTime), + MaxSizeAtCompileTime = Dst::SizeAtCompileTime, + + MightEnableVml = StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess && Src::InnerStrideAtCompileTime==1 && Dst::InnerStrideAtCompileTime==1, + MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit), + VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize, + LargeEnough = VmlSize==Dynamic || VmlSize>=EIGEN_MKL_VML_THRESHOLD + }; + public: + enum { + EnableVml = MightEnableVml && LargeEnough, + Traversal = MightLinearize ? LinearTraversal : DefaultTraversal + }; +}; + +#define EIGEN_PP_EXPAND(ARG) ARG +#if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1) +#define EIGEN_VMLMODE_EXPAND_xLA , VML_HA +#else +#define EIGEN_VMLMODE_EXPAND_xLA , VML_LA +#endif + +#define EIGEN_VMLMODE_EXPAND_x_ + +#define EIGEN_VMLMODE_PREFIX_xLA vm +#define EIGEN_VMLMODE_PREFIX_x_ v +#define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_x,VMLMODE) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \ + template< typename DstXprType, typename SrcXprNested> \ + struct Assignment, SrcXprNested>, assign_op, \ + Dense2Dense, typename enable_if::EnableVml>::type> { \ + typedef CwiseUnaryOp, SrcXprNested> SrcXprType; \ + static void run(DstXprType &dst, const SrcXprType &src, const assign_op &func) { \ + resize_if_allowed(dst, src, func); \ + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \ + if(vml_assign_traits::Traversal==LinearTraversal) { \ + VMLOP(dst.size(), (const VMLTYPE*)src.nestedExpression().data(), \ + (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE) ); \ + } else { \ + const Index outerSize = dst.outerSize(); \ + for(Index outer = 0; outer < outerSize; ++outer) { \ + const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.nestedExpression().coeffRef(outer,0)) : \ + &(src.nestedExpression().coeffRef(0, outer)); \ + EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); \ + VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, \ + (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \ + } \ + } \ + } \ + }; \ + + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),s##VMLOP), float, float, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),d##VMLOP), double, double, VMLMODE) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),c##VMLOP), scomplex, MKL_Complex8, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),z##VMLOP), dcomplex, MKL_Complex16, VMLMODE) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE) + + +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sin, Sin, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(asin, Asin, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sinh, Sinh, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(cos, Cos, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(acos, Acos, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(cosh, Cosh, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(tan, Tan, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(atan, Atan, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(tanh, Tanh, LA) +// EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs, Abs, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(exp, Exp, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(log, Ln, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(log10, Log10, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sqrt, Sqrt, _) + +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(arg, Arg, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(round, Round, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(floor, Floor, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil, Ceil, _) + +#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \ + template< typename DstXprType, typename SrcXprNested, typename Plain> \ + struct Assignment, SrcXprNested, \ + const CwiseNullaryOp,Plain> >, assign_op, \ + Dense2Dense, typename enable_if::EnableVml>::type> { \ + typedef CwiseBinaryOp, SrcXprNested, \ + const CwiseNullaryOp,Plain> > SrcXprType; \ + static void run(DstXprType &dst, const SrcXprType &src, const assign_op &func) { \ + resize_if_allowed(dst, src, func); \ + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \ + VMLTYPE exponent = reinterpret_cast(src.rhs().functor().m_other); \ + if(vml_assign_traits::Traversal==LinearTraversal) \ + { \ + VMLOP( dst.size(), (const VMLTYPE*)src.lhs().data(), exponent, \ + (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE) ); \ + } else { \ + const Index outerSize = dst.outerSize(); \ + for(Index outer = 0; outer < outerSize; ++outer) { \ + const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.lhs().coeffRef(outer,0)) : \ + &(src.lhs().coeffRef(0, outer)); \ + EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); \ + VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, exponent, \ + (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \ + } \ + } \ + } \ + }; + +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmsPowx, float, float, LA) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdPowx, double, double, LA) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcPowx, scomplex, MKL_Complex8, LA) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzPowx, dcomplex, MKL_Complex16, LA) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ASSIGN_VML_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/BandMatrix.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/BandMatrix.h new file mode 100644 index 000000000..878c0240a --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/BandMatrix.h @@ -0,0 +1,353 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BANDMATRIX_H +#define EIGEN_BANDMATRIX_H + +namespace Eigen { + +namespace internal { + +template +class BandMatrixBase : public EigenBase +{ + public: + + enum { + Flags = internal::traits::Flags, + CoeffReadCost = internal::traits::CoeffReadCost, + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, + Supers = internal::traits::Supers, + Subs = internal::traits::Subs, + Options = internal::traits::Options + }; + typedef typename internal::traits::Scalar Scalar; + typedef Matrix DenseMatrixType; + typedef typename DenseMatrixType::StorageIndex StorageIndex; + typedef typename internal::traits::CoefficientsType CoefficientsType; + typedef EigenBase Base; + + protected: + enum { + DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) + ? 1 + Supers + Subs + : Dynamic, + SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime) + }; + + public: + + using Base::derived; + using Base::rows; + using Base::cols; + + /** \returns the number of super diagonals */ + inline Index supers() const { return derived().supers(); } + + /** \returns the number of sub diagonals */ + inline Index subs() const { return derived().subs(); } + + /** \returns an expression of the underlying coefficient matrix */ + inline const CoefficientsType& coeffs() const { return derived().coeffs(); } + + /** \returns an expression of the underlying coefficient matrix */ + inline CoefficientsType& coeffs() { return derived().coeffs(); } + + /** \returns a vector expression of the \a i -th column, + * only the meaningful part is returned. + * \warning the internal storage must be column major. */ + inline Block col(Index i) + { + EIGEN_STATIC_ASSERT((int(Options) & int(RowMajor)) == 0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + Index start = 0; + Index len = coeffs().rows(); + if (i<=supers()) + { + start = supers()-i; + len = (std::min)(rows(),std::max(0,coeffs().rows() - (supers()-i))); + } + else if (i>=rows()-subs()) + len = std::max(0,coeffs().rows() - (i + 1 - rows() + subs())); + return Block(coeffs(), start, i, len, 1); + } + + /** \returns a vector expression of the main diagonal */ + inline Block diagonal() + { return Block(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } + + /** \returns a vector expression of the main diagonal (const version) */ + inline const Block diagonal() const + { return Block(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } + + template struct DiagonalIntReturnType { + enum { + ReturnOpposite = (int(Options) & int(SelfAdjoint)) && (((Index) > 0 && Supers == 0) || ((Index) < 0 && Subs == 0)), + Conjugate = ReturnOpposite && NumTraits::IsComplex, + ActualIndex = ReturnOpposite ? -Index : Index, + DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic) + ? Dynamic + : (ActualIndex<0 + ? EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime, RowsAtCompileTime + ActualIndex) + : EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime - ActualIndex)) + }; + typedef Block BuildType; + typedef typename internal::conditional,BuildType >, + BuildType>::type Type; + }; + + /** \returns a vector expression of the \a N -th sub or super diagonal */ + template inline typename DiagonalIntReturnType::Type diagonal() + { + return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); + } + + /** \returns a vector expression of the \a N -th sub or super diagonal */ + template inline const typename DiagonalIntReturnType::Type diagonal() const + { + return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); + } + + /** \returns a vector expression of the \a i -th sub or super diagonal */ + inline Block diagonal(Index i) + { + eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); + return Block(coeffs(), supers()-i, std::max(0,i), 1, diagonalLength(i)); + } + + /** \returns a vector expression of the \a i -th sub or super diagonal */ + inline const Block diagonal(Index i) const + { + eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); + return Block(coeffs(), supers()-i, std::max(0,i), 1, diagonalLength(i)); + } + + template inline void evalTo(Dest& dst) const + { + dst.resize(rows(),cols()); + dst.setZero(); + dst.diagonal() = diagonal(); + for (Index i=1; i<=supers();++i) + dst.diagonal(i) = diagonal(i); + for (Index i=1; i<=subs();++i) + dst.diagonal(-i) = diagonal(-i); + } + + DenseMatrixType toDenseMatrix() const + { + DenseMatrixType res(rows(),cols()); + evalTo(res); + return res; + } + + protected: + + inline Index diagonalLength(Index i) const + { return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); } +}; + +/** + * \class BandMatrix + * \ingroup Core_Module + * + * \brief Represents a rectangular matrix with a banded storage + * + * \tparam _Scalar Numeric type, i.e. float, double, int + * \tparam _Rows Number of rows, or \b Dynamic + * \tparam _Cols Number of columns, or \b Dynamic + * \tparam _Supers Number of super diagonal + * \tparam _Subs Number of sub diagonal + * \tparam _Options A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint + * The former controls \ref TopicStorageOrders "storage order", and defaults to + * column-major. The latter controls whether the matrix represents a selfadjoint + * matrix in which case either Supers of Subs have to be null. + * + * \sa class TridiagonalMatrix + */ + +template +struct traits > +{ + typedef _Scalar Scalar; + typedef Dense StorageKind; + typedef Eigen::Index StorageIndex; + enum { + CoeffReadCost = NumTraits::ReadCost, + RowsAtCompileTime = _Rows, + ColsAtCompileTime = _Cols, + MaxRowsAtCompileTime = _Rows, + MaxColsAtCompileTime = _Cols, + Flags = LvalueBit, + Supers = _Supers, + Subs = _Subs, + Options = _Options, + DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic + }; + typedef Matrix CoefficientsType; +}; + +template +class BandMatrix : public BandMatrixBase > +{ + public: + + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageIndex StorageIndex; + typedef typename internal::traits::CoefficientsType CoefficientsType; + + explicit inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs) + : m_coeffs(1+supers+subs,cols), + m_rows(rows), m_supers(supers), m_subs(subs) + { + } + + /** \returns the number of columns */ + inline EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); } + + /** \returns the number of rows */ + inline EIGEN_CONSTEXPR Index cols() const { return m_coeffs.cols(); } + + /** \returns the number of super diagonals */ + inline EIGEN_CONSTEXPR Index supers() const { return m_supers.value(); } + + /** \returns the number of sub diagonals */ + inline EIGEN_CONSTEXPR Index subs() const { return m_subs.value(); } + + inline const CoefficientsType& coeffs() const { return m_coeffs; } + inline CoefficientsType& coeffs() { return m_coeffs; } + + protected: + + CoefficientsType m_coeffs; + internal::variable_if_dynamic m_rows; + internal::variable_if_dynamic m_supers; + internal::variable_if_dynamic m_subs; +}; + +template +class BandMatrixWrapper; + +template +struct traits > +{ + typedef typename _CoefficientsType::Scalar Scalar; + typedef typename _CoefficientsType::StorageKind StorageKind; + typedef typename _CoefficientsType::StorageIndex StorageIndex; + enum { + CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost, + RowsAtCompileTime = _Rows, + ColsAtCompileTime = _Cols, + MaxRowsAtCompileTime = _Rows, + MaxColsAtCompileTime = _Cols, + Flags = LvalueBit, + Supers = _Supers, + Subs = _Subs, + Options = _Options, + DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic + }; + typedef _CoefficientsType CoefficientsType; +}; + +template +class BandMatrixWrapper : public BandMatrixBase > +{ + public: + + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::CoefficientsType CoefficientsType; + typedef typename internal::traits::StorageIndex StorageIndex; + + explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs) + : m_coeffs(coeffs), + m_rows(rows), m_supers(supers), m_subs(subs) + { + EIGEN_UNUSED_VARIABLE(cols); + //internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows()); + } + + /** \returns the number of columns */ + inline EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); } + + /** \returns the number of rows */ + inline EIGEN_CONSTEXPR Index cols() const { return m_coeffs.cols(); } + + /** \returns the number of super diagonals */ + inline EIGEN_CONSTEXPR Index supers() const { return m_supers.value(); } + + /** \returns the number of sub diagonals */ + inline EIGEN_CONSTEXPR Index subs() const { return m_subs.value(); } + + inline const CoefficientsType& coeffs() const { return m_coeffs; } + + protected: + + const CoefficientsType& m_coeffs; + internal::variable_if_dynamic m_rows; + internal::variable_if_dynamic m_supers; + internal::variable_if_dynamic m_subs; +}; + +/** + * \class TridiagonalMatrix + * \ingroup Core_Module + * + * \brief Represents a tridiagonal matrix with a compact banded storage + * + * \tparam Scalar Numeric type, i.e. float, double, int + * \tparam Size Number of rows and cols, or \b Dynamic + * \tparam Options Can be 0 or \b SelfAdjoint + * + * \sa class BandMatrix + */ +template +class TridiagonalMatrix : public BandMatrix +{ + typedef BandMatrix Base; + typedef typename Base::StorageIndex StorageIndex; + public: + explicit TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {} + + inline typename Base::template DiagonalIntReturnType<1>::Type super() + { return Base::template diagonal<1>(); } + inline const typename Base::template DiagonalIntReturnType<1>::Type super() const + { return Base::template diagonal<1>(); } + inline typename Base::template DiagonalIntReturnType<-1>::Type sub() + { return Base::template diagonal<-1>(); } + inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const + { return Base::template diagonal<-1>(); } + protected: +}; + + +struct BandShape {}; + +template +struct evaluator_traits > + : public evaluator_traits_base > +{ + typedef BandShape Shape; +}; + +template +struct evaluator_traits > + : public evaluator_traits_base > +{ + typedef BandShape Shape; +}; + +template<> struct AssignmentKind { typedef EigenBase2EigenBase Kind; }; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BANDMATRIX_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Block.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Block.h new file mode 100644 index 000000000..3206d6633 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Block.h @@ -0,0 +1,448 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BLOCK_H +#define EIGEN_BLOCK_H + +namespace Eigen { + +namespace internal { +template +struct traits > : traits +{ + typedef typename traits::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename ref_selector::type XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; + enum{ + MatrixRows = traits::RowsAtCompileTime, + MatrixCols = traits::ColsAtCompileTime, + RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows, + ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols, + MaxRowsAtCompileTime = BlockRows==0 ? 0 + : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) + : int(traits::MaxRowsAtCompileTime), + MaxColsAtCompileTime = BlockCols==0 ? 0 + : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) + : int(traits::MaxColsAtCompileTime), + + XprTypeIsRowMajor = (int(traits::Flags)&RowMajorBit) != 0, + IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 + : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 + : XprTypeIsRowMajor, + HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), + InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + InnerStrideAtCompileTime = HasSameStorageOrderAsXprType + ? int(inner_stride_at_compile_time::ret) + : int(outer_stride_at_compile_time::ret), + OuterStrideAtCompileTime = HasSameStorageOrderAsXprType + ? int(outer_stride_at_compile_time::ret) + : int(inner_stride_at_compile_time::ret), + + // FIXME, this traits is rather specialized for dense object and it needs to be cleaned further + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, + Flags = (traits::Flags & (DirectAccessBit | (InnerPanel?CompressedAccessBit:0))) | FlagsLvalueBit | FlagsRowMajorBit, + // FIXME DirectAccessBit should not be handled by expressions + // + // Alignment is needed by MapBase's assertions + // We can sefely set it to false here. Internal alignment errors will be detected by an eigen_internal_assert in the respective evaluator + Alignment = 0 + }; +}; + +template::ret> class BlockImpl_dense; + +} // end namespace internal + +template class BlockImpl; + +/** \class Block + * \ingroup Core_Module + * + * \brief Expression of a fixed-size or dynamic-size block + * + * \tparam XprType the type of the expression in which we are taking a block + * \tparam BlockRows the number of rows of the block we are taking at compile time (optional) + * \tparam BlockCols the number of columns of the block we are taking at compile time (optional) + * \tparam InnerPanel is true, if the block maps to a set of rows of a row major matrix or + * to set of columns of a column major matrix (optional). The parameter allows to determine + * at compile time whether aligned access is possible on the block expression. + * + * This class represents an expression of either a fixed-size or dynamic-size block. It is the return + * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block(Index,Index) and + * most of the time this is the only way it is used. + * + * However, if you want to directly maniputate block expressions, + * for instance if you want to write a function returning such an expression, you + * will need to use this class. + * + * Here is an example illustrating the dynamic case: + * \include class_Block.cpp + * Output: \verbinclude class_Block.out + * + * \note Even though this expression has dynamic size, in the case where \a XprType + * has fixed size, this expression inherits a fixed maximal size which means that evaluating + * it does not cause a dynamic memory allocation. + * + * Here is an example illustrating the fixed-size case: + * \include class_FixedBlock.cpp + * Output: \verbinclude class_FixedBlock.out + * + * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock + */ +template class Block + : public BlockImpl::StorageKind> +{ + typedef BlockImpl::StorageKind> Impl; + public: + //typedef typename Impl::Base Base; + typedef Impl Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(Block) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) + + typedef typename internal::remove_all::type NestedExpression; + + /** Column or Row constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Block(XprType& xpr, Index i) : Impl(xpr,i) + { + eigen_assert( (i>=0) && ( + ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i= 0 && BlockRows >= 0 && startRow + BlockRows <= xpr.rows() + && startCol >= 0 && BlockCols >= 0 && startCol + BlockCols <= xpr.cols()); + } + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Block(XprType& xpr, + Index startRow, Index startCol, + Index blockRows, Index blockCols) + : Impl(xpr, startRow, startCol, blockRows, blockCols) + { + eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) + && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); + eigen_assert(startRow >= 0 && blockRows >= 0 && startRow <= xpr.rows() - blockRows + && startCol >= 0 && blockCols >= 0 && startCol <= xpr.cols() - blockCols); + } +}; + +// The generic default implementation for dense block simplu forward to the internal::BlockImpl_dense +// that must be specialized for direct and non-direct access... +template +class BlockImpl + : public internal::BlockImpl_dense +{ + typedef internal::BlockImpl_dense Impl; + typedef typename XprType::StorageIndex StorageIndex; + public: + typedef Impl Base; + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) {} + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) + : Impl(xpr, startRow, startCol, blockRows, blockCols) {} +}; + +namespace internal { + +/** \internal Internal implementation of dense Blocks in the general case. */ +template class BlockImpl_dense + : public internal::dense_xpr_base >::type +{ + typedef Block BlockType; + typedef typename internal::ref_selector::non_const_type XprTypeNested; + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(BlockType) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense) + + // class InnerIterator; // FIXME apparently never used + + /** Column or Row constructor + */ + EIGEN_DEVICE_FUNC + inline BlockImpl_dense(XprType& xpr, Index i) + : m_xpr(xpr), + // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime, + // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1, + // all other cases are invalid. + // The case a 1x1 matrix seems ambiguous, but the result is the same anyway. + m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), + m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), + m_blockRows(BlockRows==1 ? 1 : xpr.rows()), + m_blockCols(BlockCols==1 ? 1 : xpr.cols()) + {} + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC + inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol) + : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), + m_blockRows(BlockRows), m_blockCols(BlockCols) + {} + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC + inline BlockImpl_dense(XprType& xpr, + Index startRow, Index startCol, + Index blockRows, Index blockCols) + : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), + m_blockRows(blockRows), m_blockCols(blockCols) + {} + + EIGEN_DEVICE_FUNC inline Index rows() const { return m_blockRows.value(); } + EIGEN_DEVICE_FUNC inline Index cols() const { return m_blockCols.value(); } + + EIGEN_DEVICE_FUNC + inline Scalar& coeffRef(Index rowId, Index colId) + { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + return m_xpr.coeffRef(rowId + m_startRow.value(), colId + m_startCol.value()); + } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index rowId, Index colId) const + { + return m_xpr.derived().coeffRef(rowId + m_startRow.value(), colId + m_startCol.value()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const + { + return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value()); + } + + EIGEN_DEVICE_FUNC + inline Scalar& coeffRef(Index index) + { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index index) const + { + return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + EIGEN_DEVICE_FUNC + inline const CoeffReturnType coeff(Index index) const + { + return m_xpr.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + template + inline PacketScalar packet(Index rowId, Index colId) const + { + return m_xpr.template packet(rowId + m_startRow.value(), colId + m_startCol.value()); + } + + template + inline void writePacket(Index rowId, Index colId, const PacketScalar& val) + { + m_xpr.template writePacket(rowId + m_startRow.value(), colId + m_startCol.value(), val); + } + + template + inline PacketScalar packet(Index index) const + { + return m_xpr.template packet + (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + template + inline void writePacket(Index index, const PacketScalar& val) + { + m_xpr.template writePacket + (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val); + } + + #ifdef EIGEN_PARSED_BY_DOXYGEN + /** \sa MapBase::data() */ + EIGEN_DEVICE_FUNC inline const Scalar* data() const; + EIGEN_DEVICE_FUNC inline Index innerStride() const; + EIGEN_DEVICE_FUNC inline Index outerStride() const; + #endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& nestedExpression() const + { + return m_xpr; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + XprType& nestedExpression() { return m_xpr; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + StorageIndex startRow() const EIGEN_NOEXCEPT + { + return m_startRow.value(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + StorageIndex startCol() const EIGEN_NOEXCEPT + { + return m_startCol.value(); + } + + protected: + + XprTypeNested m_xpr; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; +}; + +/** \internal Internal implementation of dense Blocks in the direct access case.*/ +template +class BlockImpl_dense + : public MapBase > +{ + typedef Block BlockType; + typedef typename internal::ref_selector::non_const_type XprTypeNested; + enum { + XprTypeIsRowMajor = (int(traits::Flags)&RowMajorBit) != 0 + }; + public: + + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(BlockType) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense) + + /** Column or Row constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + BlockImpl_dense(XprType& xpr, Index i) + : Base(xpr.data() + i * ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor)) + || ((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && ( XprTypeIsRowMajor)) ? xpr.innerStride() : xpr.outerStride()), + BlockRows==1 ? 1 : xpr.rows(), + BlockCols==1 ? 1 : xpr.cols()), + m_xpr(xpr), + m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), + m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0) + { + init(); + } + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + BlockImpl_dense(XprType& xpr, Index startRow, Index startCol) + : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol)), + m_xpr(xpr), m_startRow(startRow), m_startCol(startCol) + { + init(); + } + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + BlockImpl_dense(XprType& xpr, + Index startRow, Index startCol, + Index blockRows, Index blockCols) + : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol), blockRows, blockCols), + m_xpr(xpr), m_startRow(startRow), m_startCol(startCol) + { + init(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& nestedExpression() const EIGEN_NOEXCEPT + { + return m_xpr; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + XprType& nestedExpression() { return m_xpr; } + + /** \sa MapBase::innerStride() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index innerStride() const EIGEN_NOEXCEPT + { + return internal::traits::HasSameStorageOrderAsXprType + ? m_xpr.innerStride() + : m_xpr.outerStride(); + } + + /** \sa MapBase::outerStride() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index outerStride() const EIGEN_NOEXCEPT + { + return internal::traits::HasSameStorageOrderAsXprType + ? m_xpr.outerStride() + : m_xpr.innerStride(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + StorageIndex startRow() const EIGEN_NOEXCEPT { return m_startRow.value(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + StorageIndex startCol() const EIGEN_NOEXCEPT { return m_startCol.value(); } + + #ifndef __SUNPRO_CC + // FIXME sunstudio is not friendly with the above friend... + // META-FIXME there is no 'friend' keyword around here. Is this obsolete? + protected: + #endif + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal used by allowAligned() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols) + : Base(data, blockRows, blockCols), m_xpr(xpr) + { + init(); + } + #endif + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void init() + { + m_outerStride = internal::traits::HasSameStorageOrderAsXprType + ? m_xpr.outerStride() + : m_xpr.innerStride(); + } + + XprTypeNested m_xpr; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + Index m_outerStride; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BLOCK_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/BooleanRedux.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/BooleanRedux.h new file mode 100644 index 000000000..852de8b90 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/BooleanRedux.h @@ -0,0 +1,162 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ALLANDANY_H +#define EIGEN_ALLANDANY_H + +namespace Eigen { + +namespace internal { + +template +struct all_unroller +{ + enum { + col = (UnrollCount-1) / Rows, + row = (UnrollCount-1) % Rows + }; + + EIGEN_DEVICE_FUNC static inline bool run(const Derived &mat) + { + return all_unroller::run(mat) && mat.coeff(row, col); + } +}; + +template +struct all_unroller +{ + EIGEN_DEVICE_FUNC static inline bool run(const Derived &/*mat*/) { return true; } +}; + +template +struct all_unroller +{ + EIGEN_DEVICE_FUNC static inline bool run(const Derived &) { return false; } +}; + +template +struct any_unroller +{ + enum { + col = (UnrollCount-1) / Rows, + row = (UnrollCount-1) % Rows + }; + + EIGEN_DEVICE_FUNC static inline bool run(const Derived &mat) + { + return any_unroller::run(mat) || mat.coeff(row, col); + } +}; + +template +struct any_unroller +{ + EIGEN_DEVICE_FUNC static inline bool run(const Derived & /*mat*/) { return false; } +}; + +template +struct any_unroller +{ + EIGEN_DEVICE_FUNC static inline bool run(const Derived &) { return false; } +}; + +} // end namespace internal + +/** \returns true if all coefficients are true + * + * Example: \include MatrixBase_all.cpp + * Output: \verbinclude MatrixBase_all.out + * + * \sa any(), Cwise::operator<() + */ +template +EIGEN_DEVICE_FUNC inline bool DenseBase::all() const +{ + typedef internal::evaluator Evaluator; + enum { + unroll = SizeAtCompileTime != Dynamic + && SizeAtCompileTime * (int(Evaluator::CoeffReadCost) + int(NumTraits::AddCost)) <= EIGEN_UNROLLING_LIMIT + }; + Evaluator evaluator(derived()); + if(unroll) + return internal::all_unroller::RowsAtCompileTime>::run(evaluator); + else + { + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) + if (!evaluator.coeff(i, j)) return false; + return true; + } +} + +/** \returns true if at least one coefficient is true + * + * \sa all() + */ +template +EIGEN_DEVICE_FUNC inline bool DenseBase::any() const +{ + typedef internal::evaluator Evaluator; + enum { + unroll = SizeAtCompileTime != Dynamic + && SizeAtCompileTime * (int(Evaluator::CoeffReadCost) + int(NumTraits::AddCost)) <= EIGEN_UNROLLING_LIMIT + }; + Evaluator evaluator(derived()); + if(unroll) + return internal::any_unroller::RowsAtCompileTime>::run(evaluator); + else + { + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) + if (evaluator.coeff(i, j)) return true; + return false; + } +} + +/** \returns the number of coefficients which evaluate to true + * + * \sa all(), any() + */ +template +EIGEN_DEVICE_FUNC inline Eigen::Index DenseBase::count() const +{ + return derived().template cast().template cast().sum(); +} + +/** \returns true is \c *this contains at least one Not A Number (NaN). + * + * \sa allFinite() + */ +template +inline bool DenseBase::hasNaN() const +{ +#if EIGEN_COMP_MSVC || (defined __FAST_MATH__) + return derived().array().isNaN().any(); +#else + return !((derived().array()==derived().array()).all()); +#endif +} + +/** \returns true if \c *this contains only finite numbers, i.e., no NaN and no +/-INF values. + * + * \sa hasNaN() + */ +template +inline bool DenseBase::allFinite() const +{ +#if EIGEN_COMP_MSVC || (defined __FAST_MATH__) + return derived().array().isFinite().all(); +#else + return !((derived()-derived()).hasNaN()); +#endif +} + +} // end namespace Eigen + +#endif // EIGEN_ALLANDANY_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CommaInitializer.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CommaInitializer.h new file mode 100644 index 000000000..c0e29c75c --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CommaInitializer.h @@ -0,0 +1,164 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMMAINITIALIZER_H +#define EIGEN_COMMAINITIALIZER_H + +namespace Eigen { + +/** \class CommaInitializer + * \ingroup Core_Module + * + * \brief Helper class used by the comma initializer operator + * + * This class is internally used to implement the comma initializer feature. It is + * the return type of MatrixBase::operator<<, and most of the time this is the only + * way it is used. + * + * \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished() + */ +template +struct CommaInitializer +{ + typedef typename XprType::Scalar Scalar; + + EIGEN_DEVICE_FUNC + inline CommaInitializer(XprType& xpr, const Scalar& s) + : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) + { + eigen_assert(m_xpr.rows() > 0 && m_xpr.cols() > 0 + && "Cannot comma-initialize a 0x0 matrix (operator<<)"); + m_xpr.coeffRef(0,0) = s; + } + + template + EIGEN_DEVICE_FUNC + inline CommaInitializer(XprType& xpr, const DenseBase& other) + : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) + { + eigen_assert(m_xpr.rows() >= other.rows() && m_xpr.cols() >= other.cols() + && "Cannot comma-initialize a 0x0 matrix (operator<<)"); + m_xpr.block(0, 0, other.rows(), other.cols()) = other; + } + + /* Copy/Move constructor which transfers ownership. This is crucial in + * absence of return value optimization to avoid assertions during destruction. */ + // FIXME in C++11 mode this could be replaced by a proper RValue constructor + EIGEN_DEVICE_FUNC + inline CommaInitializer(const CommaInitializer& o) + : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) { + // Mark original object as finished. In absence of R-value references we need to const_cast: + const_cast(o).m_row = m_xpr.rows(); + const_cast(o).m_col = m_xpr.cols(); + const_cast(o).m_currentBlockRows = 0; + } + + /* inserts a scalar value in the target matrix */ + EIGEN_DEVICE_FUNC + CommaInitializer& operator,(const Scalar& s) + { + if (m_col==m_xpr.cols()) + { + m_row+=m_currentBlockRows; + m_col = 0; + m_currentBlockRows = 1; + eigen_assert(m_row + EIGEN_DEVICE_FUNC + CommaInitializer& operator,(const DenseBase& other) + { + if (m_col==m_xpr.cols() && (other.cols()!=0 || other.rows()!=m_currentBlockRows)) + { + m_row+=m_currentBlockRows; + m_col = 0; + m_currentBlockRows = other.rows(); + eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows() + && "Too many rows passed to comma initializer (operator<<)"); + } + eigen_assert((m_col + other.cols() <= m_xpr.cols()) + && "Too many coefficients passed to comma initializer (operator<<)"); + eigen_assert(m_currentBlockRows==other.rows()); + m_xpr.template block + (m_row, m_col, other.rows(), other.cols()) = other; + m_col += other.cols(); + return *this; + } + + EIGEN_DEVICE_FUNC + inline ~CommaInitializer() +#if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS + EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception) +#endif + { + finished(); + } + + /** \returns the built matrix once all its coefficients have been set. + * Calling finished is 100% optional. Its purpose is to write expressions + * like this: + * \code + * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); + * \endcode + */ + EIGEN_DEVICE_FUNC + inline XprType& finished() { + eigen_assert(((m_row+m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0) + && m_col == m_xpr.cols() + && "Too few coefficients passed to comma initializer (operator<<)"); + return m_xpr; + } + + XprType& m_xpr; // target expression + Index m_row; // current row id + Index m_col; // current col id + Index m_currentBlockRows; // current block height +}; + +/** \anchor MatrixBaseCommaInitRef + * Convenient operator to set the coefficients of a matrix. + * + * The coefficients must be provided in a row major order and exactly match + * the size of the matrix. Otherwise an assertion is raised. + * + * Example: \include MatrixBase_set.cpp + * Output: \verbinclude MatrixBase_set.out + * + * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order. + * + * \sa CommaInitializer::finished(), class CommaInitializer + */ +template +EIGEN_DEVICE_FUNC inline CommaInitializer DenseBase::operator<< (const Scalar& s) +{ + return CommaInitializer(*static_cast(this), s); +} + +/** \sa operator<<(const Scalar&) */ +template +template +EIGEN_DEVICE_FUNC inline CommaInitializer +DenseBase::operator<<(const DenseBase& other) +{ + return CommaInitializer(*static_cast(this), other); +} + +} // end namespace Eigen + +#endif // EIGEN_COMMAINITIALIZER_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ConditionEstimator.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ConditionEstimator.h new file mode 100644 index 000000000..51a2e5f1b --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ConditionEstimator.h @@ -0,0 +1,175 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CONDITIONESTIMATOR_H +#define EIGEN_CONDITIONESTIMATOR_H + +namespace Eigen { + +namespace internal { + +template +struct rcond_compute_sign { + static inline Vector run(const Vector& v) { + const RealVector v_abs = v.cwiseAbs(); + return (v_abs.array() == static_cast(0)) + .select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs)); + } +}; + +// Partial specialization to avoid elementwise division for real vectors. +template +struct rcond_compute_sign { + static inline Vector run(const Vector& v) { + return (v.array() < static_cast(0)) + .select(-Vector::Ones(v.size()), Vector::Ones(v.size())); + } +}; + +/** + * \returns an estimate of ||inv(matrix)||_1 given a decomposition of + * \a matrix that implements .solve() and .adjoint().solve() methods. + * + * This function implements Algorithms 4.1 and 5.1 from + * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf + * which also forms the basis for the condition number estimators in + * LAPACK. Since at most 10 calls to the solve method of dec are + * performed, the total cost is O(dims^2), as opposed to O(dims^3) + * needed to compute the inverse matrix explicitly. + * + * The most common usage is in estimating the condition number + * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be + * computed directly in O(n^2) operations. + * + * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and + * LLT. + * + * \sa FullPivLU, PartialPivLU, LDLT, LLT. + */ +template +typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec) +{ + typedef typename Decomposition::MatrixType MatrixType; + typedef typename Decomposition::Scalar Scalar; + typedef typename Decomposition::RealScalar RealScalar; + typedef typename internal::plain_col_type::type Vector; + typedef typename internal::plain_col_type::type RealVector; + const bool is_complex = (NumTraits::IsComplex != 0); + + eigen_assert(dec.rows() == dec.cols()); + const Index n = dec.rows(); + if (n == 0) + return 0; + + // Disable Index to float conversion warning +#ifdef __INTEL_COMPILER + #pragma warning push + #pragma warning ( disable : 2259 ) +#endif + Vector v = dec.solve(Vector::Ones(n) / Scalar(n)); +#ifdef __INTEL_COMPILER + #pragma warning pop +#endif + + // lower_bound is a lower bound on + // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 + // and is the objective maximized by the ("super-") gradient ascent + // algorithm below. + RealScalar lower_bound = v.template lpNorm<1>(); + if (n == 1) + return lower_bound; + + // Gradient ascent algorithm follows: We know that the optimum is achieved at + // one of the simplices v = e_i, so in each iteration we follow a + // super-gradient to move towards the optimal one. + RealScalar old_lower_bound = lower_bound; + Vector sign_vector(n); + Vector old_sign_vector; + Index v_max_abs_index = -1; + Index old_v_max_abs_index = v_max_abs_index; + for (int k = 0; k < 4; ++k) + { + sign_vector = internal::rcond_compute_sign::run(v); + if (k > 0 && !is_complex && sign_vector == old_sign_vector) { + // Break if the solution stagnated. + break; + } + // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )| + v = dec.adjoint().solve(sign_vector); + v.real().cwiseAbs().maxCoeff(&v_max_abs_index); + if (v_max_abs_index == old_v_max_abs_index) { + // Break if the solution stagnated. + break; + } + // Move to the new simplex e_j, where j = v_max_abs_index. + v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j. + lower_bound = v.template lpNorm<1>(); + if (lower_bound <= old_lower_bound) { + // Break if the gradient step did not increase the lower_bound. + break; + } + if (!is_complex) { + old_sign_vector = sign_vector; + } + old_v_max_abs_index = v_max_abs_index; + old_lower_bound = lower_bound; + } + // The following calculates an independent estimate of ||matrix||_1 by + // multiplying matrix by a vector with entries of slowly increasing + // magnitude and alternating sign: + // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. + // This improvement to Hager's algorithm above is due to Higham. It was + // added to make the algorithm more robust in certain corner cases where + // large elements in the matrix might otherwise escape detection due to + // exact cancellation (especially when op and op_adjoint correspond to a + // sequence of backsubstitutions and permutations), which could cause + // Hager's algorithm to vastly underestimate ||matrix||_1. + Scalar alternating_sign(RealScalar(1)); + for (Index i = 0; i < n; ++i) { + // The static_cast is needed when Scalar is a complex and RealScalar implements expression templates + v[i] = alternating_sign * static_cast(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1)))); + alternating_sign = -alternating_sign; + } + v = dec.solve(v); + const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n)); + return numext::maxi(lower_bound, alternate_lower_bound); +} + +/** \brief Reciprocal condition number estimator. + * + * Computing a decomposition of a dense matrix takes O(n^3) operations, while + * this method estimates the condition number quickly and reliably in O(n^2) + * operations. + * + * \returns an estimate of the reciprocal condition number + * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and + * its decomposition. Supports the following decompositions: FullPivLU, + * PartialPivLU, LDLT, and LLT. + * + * \sa FullPivLU, PartialPivLU, LDLT, LLT. + */ +template +typename Decomposition::RealScalar +rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) +{ + typedef typename Decomposition::RealScalar RealScalar; + eigen_assert(dec.rows() == dec.cols()); + if (dec.rows() == 0) return NumTraits::infinity(); + if (matrix_norm == RealScalar(0)) return RealScalar(0); + if (dec.rows() == 1) return RealScalar(1); + const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec); + return (inverse_matrix_norm == RealScalar(0) ? RealScalar(0) + : (RealScalar(1) / inverse_matrix_norm) / matrix_norm); +} + +} // namespace internal + +} // namespace Eigen + +#endif diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CoreEvaluators.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CoreEvaluators.h new file mode 100644 index 000000000..0ff8c8deb --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CoreEvaluators.h @@ -0,0 +1,1741 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// Copyright (C) 2011-2014 Gael Guennebaud +// Copyright (C) 2011-2012 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_COREEVALUATORS_H +#define EIGEN_COREEVALUATORS_H + +namespace Eigen { + +namespace internal { + +// This class returns the evaluator kind from the expression storage kind. +// Default assumes index based accessors +template +struct storage_kind_to_evaluator_kind { + typedef IndexBased Kind; +}; + +// This class returns the evaluator shape from the expression storage kind. +// It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc. +template struct storage_kind_to_shape; + +template<> struct storage_kind_to_shape { typedef DenseShape Shape; }; +template<> struct storage_kind_to_shape { typedef SolverShape Shape; }; +template<> struct storage_kind_to_shape { typedef PermutationShape Shape; }; +template<> struct storage_kind_to_shape { typedef TranspositionsShape Shape; }; + +// Evaluators have to be specialized with respect to various criteria such as: +// - storage/structure/shape +// - scalar type +// - etc. +// Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators. +// We currently distinguish the following kind of evaluators: +// - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate) +// - binary_evaluator for expression taking two arguments (CwiseBinaryOp) +// - ternary_evaluator for expression taking three arguments (CwiseTernaryOp) +// - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching. +// - mapbase_evaluator for Map, Block, Ref +// - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator) + +template< typename T, + typename Arg1Kind = typename evaluator_traits::Kind, + typename Arg2Kind = typename evaluator_traits::Kind, + typename Arg3Kind = typename evaluator_traits::Kind, + typename Arg1Scalar = typename traits::Scalar, + typename Arg2Scalar = typename traits::Scalar, + typename Arg3Scalar = typename traits::Scalar> struct ternary_evaluator; + +template< typename T, + typename LhsKind = typename evaluator_traits::Kind, + typename RhsKind = typename evaluator_traits::Kind, + typename LhsScalar = typename traits::Scalar, + typename RhsScalar = typename traits::Scalar> struct binary_evaluator; + +template< typename T, + typename Kind = typename evaluator_traits::Kind, + typename Scalar = typename T::Scalar> struct unary_evaluator; + +// evaluator_traits contains traits for evaluator + +template +struct evaluator_traits_base +{ + // by default, get evaluator kind and shape from storage + typedef typename storage_kind_to_evaluator_kind::StorageKind>::Kind Kind; + typedef typename storage_kind_to_shape::StorageKind>::Shape Shape; +}; + +// Default evaluator traits +template +struct evaluator_traits : public evaluator_traits_base +{ +}; + +template::Shape > +struct evaluator_assume_aliasing { + static const bool value = false; +}; + +// By default, we assume a unary expression: +template +struct evaluator : public unary_evaluator +{ + typedef unary_evaluator Base; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator(const T& xpr) : Base(xpr) {} +}; + + +// TODO: Think about const-correctness +template +struct evaluator + : evaluator +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator(const T& xpr) : evaluator(xpr) {} +}; + +// ---------- base class for all evaluators ---------- + +template +struct evaluator_base +{ + // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices. + typedef traits ExpressionTraits; + + enum { + Alignment = 0 + }; + // noncopyable: + // Don't make this class inherit noncopyable as this kills EBO (Empty Base Optimization) + // and make complex evaluator much larger than then should do. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator_base() {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~evaluator_base() {} +private: + EIGEN_DEVICE_FUNC evaluator_base(const evaluator_base&); + EIGEN_DEVICE_FUNC const evaluator_base& operator=(const evaluator_base&); +}; + +// -------------------- Matrix and Array -------------------- +// +// evaluator is a common base class for the +// Matrix and Array evaluators. +// Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense, +// so no need for more sophisticated dispatching. + +// this helper permits to completely eliminate m_outerStride if it is known at compiletime. +template class plainobjectbase_evaluator_data { +public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr) + { +#ifndef EIGEN_INTERNAL_DEBUGGING + EIGEN_UNUSED_VARIABLE(outerStride); +#endif + eigen_internal_assert(outerStride==OuterStride); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index outerStride() const EIGEN_NOEXCEPT { return OuterStride; } + const Scalar *data; +}; + +template class plainobjectbase_evaluator_data { +public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index outerStride() const { return m_outerStride; } + const Scalar *data; +protected: + Index m_outerStride; +}; + +template +struct evaluator > + : evaluator_base +{ + typedef PlainObjectBase PlainObjectType; + typedef typename PlainObjectType::Scalar Scalar; + typedef typename PlainObjectType::CoeffReturnType CoeffReturnType; + + enum { + IsRowMajor = PlainObjectType::IsRowMajor, + IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime, + RowsAtCompileTime = PlainObjectType::RowsAtCompileTime, + ColsAtCompileTime = PlainObjectType::ColsAtCompileTime, + + CoeffReadCost = NumTraits::ReadCost, + Flags = traits::EvaluatorFlags, + Alignment = traits::Alignment + }; + enum { + // We do not need to know the outer stride for vectors + OuterStrideAtCompileTime = IsVectorAtCompileTime ? 0 + : int(IsRowMajor) ? ColsAtCompileTime + : RowsAtCompileTime + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + evaluator() + : m_d(0,OuterStrideAtCompileTime) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator(const PlainObjectType& m) + : m_d(m.data(),IsVectorAtCompileTime ? 0 : m.outerStride()) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + if (IsRowMajor) + return m_d.data[row * m_d.outerStride() + col]; + else + return m_d.data[row + col * m_d.outerStride()]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return m_d.data[index]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index row, Index col) + { + if (IsRowMajor) + return const_cast(m_d.data)[row * m_d.outerStride() + col]; + else + return const_cast(m_d.data)[row + col * m_d.outerStride()]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index index) + { + return const_cast(m_d.data)[index]; + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index row, Index col) const + { + if (IsRowMajor) + return ploadt(m_d.data + row * m_d.outerStride() + col); + else + return ploadt(m_d.data + row + col * m_d.outerStride()); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index index) const + { + return ploadt(m_d.data + index); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index row, Index col, const PacketType& x) + { + if (IsRowMajor) + return pstoret + (const_cast(m_d.data) + row * m_d.outerStride() + col, x); + else + return pstoret + (const_cast(m_d.data) + row + col * m_d.outerStride(), x); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketType& x) + { + return pstoret(const_cast(m_d.data) + index, x); + } + +protected: + + plainobjectbase_evaluator_data m_d; +}; + +template +struct evaluator > + : evaluator > > +{ + typedef Matrix XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + evaluator() {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator(const XprType& m) + : evaluator >(m) + { } +}; + +template +struct evaluator > + : evaluator > > +{ + typedef Array XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + evaluator() {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator(const XprType& m) + : evaluator >(m) + { } +}; + +// -------------------- Transpose -------------------- + +template +struct unary_evaluator, IndexBased> + : evaluator_base > +{ + typedef Transpose XprType; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + Flags = evaluator::Flags ^ RowMajorBit, + Alignment = evaluator::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {} + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + return m_argImpl.coeff(col, row); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return m_argImpl.coeff(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(col, row); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename XprType::Scalar& coeffRef(Index index) + { + return m_argImpl.coeffRef(index); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index row, Index col) const + { + return m_argImpl.template packet(col, row); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index index) const + { + return m_argImpl.template packet(index); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index row, Index col, const PacketType& x) + { + m_argImpl.template writePacket(col, row, x); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketType& x) + { + m_argImpl.template writePacket(index, x); + } + +protected: + evaluator m_argImpl; +}; + +// -------------------- CwiseNullaryOp -------------------- +// Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator. +// Likewise, there is not need to more sophisticated dispatching here. + +template::value, + bool has_unary = has_unary_operator::value, + bool has_binary = has_binary_operator::value> +struct nullary_wrapper +{ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp(i,j); } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp(i); } +}; + +template +struct nullary_wrapper +{ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp(); } +}; + +template +struct nullary_wrapper +{ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp(i,j); } +}; + +// We need the following specialization for vector-only functors assigned to a runtime vector, +// for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd. +// In this case, i==0 and j is used for the actual iteration. +template +struct nullary_wrapper +{ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { + eigen_assert(i==0 || j==0); + return op(i+j); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { + eigen_assert(i==0 || j==0); + return op.template packetOp(i+j); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp(i); } +}; + +template +struct nullary_wrapper {}; + +#if 0 && EIGEN_COMP_MSVC>0 +// Disable this ugly workaround. This is now handled in traits::match, +// but this piece of code might still become handly if some other weird compilation +// erros pop up again. + +// MSVC exhibits a weird compilation error when +// compiling: +// Eigen::MatrixXf A = MatrixXf::Random(3,3); +// Ref R = 2.f*A; +// and that has_*ary_operator> have not been instantiated yet. +// The "problem" is that evaluator<2.f*A> is instantiated by traits::match<2.f*A> +// and at that time has_*ary_operator returns true regardless of T. +// Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>. +// The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(), +// and packet() are really instantiated as implemented below: + +// This is a simple wrapper around Index to enforce the re-instantiation of +// has_*ary_operator when needed. +template struct nullary_wrapper_workaround_msvc { + nullary_wrapper_workaround_msvc(const T&); + operator T()const; +}; + +template +struct nullary_wrapper +{ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { + return nullary_wrapper >::value, + has_unary_operator >::value, + has_binary_operator >::value>().operator()(op,i,j); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { + return nullary_wrapper >::value, + has_unary_operator >::value, + has_binary_operator >::value>().operator()(op,i); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { + return nullary_wrapper >::value, + has_unary_operator >::value, + has_binary_operator >::value>().template packetOp(op,i,j); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { + return nullary_wrapper >::value, + has_unary_operator >::value, + has_binary_operator >::value>().template packetOp(op,i); + } +}; +#endif // MSVC workaround + +template +struct evaluator > + : evaluator_base > +{ + typedef CwiseNullaryOp XprType; + typedef typename internal::remove_all::type PlainObjectTypeCleaned; + + enum { + CoeffReadCost = internal::functor_traits::Cost, + + Flags = (evaluator::Flags + & ( HereditaryBits + | (functor_has_linear_access::ret ? LinearAccessBit : 0) + | (functor_traits::PacketAccess ? PacketAccessBit : 0))) + | (functor_traits::IsRepeatable ? 0 : EvalBeforeNestingBit), + Alignment = AlignedMax + }; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n) + : m_functor(n.functor()), m_wrapper() + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(IndexType row, IndexType col) const + { + return m_wrapper(m_functor, row, col); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(IndexType index) const + { + return m_wrapper(m_functor,index); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(IndexType row, IndexType col) const + { + return m_wrapper.template packetOp(m_functor, row, col); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(IndexType index) const + { + return m_wrapper.template packetOp(m_functor, index); + } + +protected: + const NullaryOp m_functor; + const internal::nullary_wrapper m_wrapper; +}; + +// -------------------- CwiseUnaryOp -------------------- + +template +struct unary_evaluator, IndexBased > + : evaluator_base > +{ + typedef CwiseUnaryOp XprType; + + enum { + CoeffReadCost = int(evaluator::CoeffReadCost) + int(functor_traits::Cost), + + Flags = evaluator::Flags + & (HereditaryBits | LinearAccessBit | (functor_traits::PacketAccess ? PacketAccessBit : 0)), + Alignment = evaluator::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit unary_evaluator(const XprType& op) : m_d(op) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + return m_d.func()(m_d.argImpl.coeff(row, col)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return m_d.func()(m_d.argImpl.coeff(index)); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index row, Index col) const + { + return m_d.func().packetOp(m_d.argImpl.template packet(row, col)); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index index) const + { + return m_d.func().packetOp(m_d.argImpl.template packet(index)); + } + +protected: + + // this helper permits to completely eliminate the functor if it is empty + struct Data + { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const UnaryOp& func() const { return op; } + UnaryOp op; + evaluator argImpl; + }; + + Data m_d; +}; + +// -------------------- CwiseTernaryOp -------------------- + +// this is a ternary expression +template +struct evaluator > + : public ternary_evaluator > +{ + typedef CwiseTernaryOp XprType; + typedef ternary_evaluator > Base; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} +}; + +template +struct ternary_evaluator, IndexBased, IndexBased> + : evaluator_base > +{ + typedef CwiseTernaryOp XprType; + + enum { + CoeffReadCost = int(evaluator::CoeffReadCost) + int(evaluator::CoeffReadCost) + int(evaluator::CoeffReadCost) + int(functor_traits::Cost), + + Arg1Flags = evaluator::Flags, + Arg2Flags = evaluator::Flags, + Arg3Flags = evaluator::Flags, + SameType = is_same::value && is_same::value, + StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit), + Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & ( + HereditaryBits + | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) & + ( (StorageOrdersAgree ? LinearAccessBit : 0) + | (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) + ) + ) + ), + Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit), + Alignment = EIGEN_PLAIN_ENUM_MIN( + EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment, evaluator::Alignment), + evaluator::Alignment) + }; + + EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_d(xpr) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + return m_d.func()(m_d.arg1Impl.coeff(row, col), m_d.arg2Impl.coeff(row, col), m_d.arg3Impl.coeff(row, col)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return m_d.func()(m_d.arg1Impl.coeff(index), m_d.arg2Impl.coeff(index), m_d.arg3Impl.coeff(index)); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index row, Index col) const + { + return m_d.func().packetOp(m_d.arg1Impl.template packet(row, col), + m_d.arg2Impl.template packet(row, col), + m_d.arg3Impl.template packet(row, col)); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index index) const + { + return m_d.func().packetOp(m_d.arg1Impl.template packet(index), + m_d.arg2Impl.template packet(index), + m_d.arg3Impl.template packet(index)); + } + +protected: + // this helper permits to completely eliminate the functor if it is empty + struct Data + { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Data(const XprType& xpr) : op(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TernaryOp& func() const { return op; } + TernaryOp op; + evaluator arg1Impl; + evaluator arg2Impl; + evaluator arg3Impl; + }; + + Data m_d; +}; + +// -------------------- CwiseBinaryOp -------------------- + +// this is a binary expression +template +struct evaluator > + : public binary_evaluator > +{ + typedef CwiseBinaryOp XprType; + typedef binary_evaluator > Base; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator(const XprType& xpr) : Base(xpr) {} +}; + +template +struct binary_evaluator, IndexBased, IndexBased> + : evaluator_base > +{ + typedef CwiseBinaryOp XprType; + + enum { + CoeffReadCost = int(evaluator::CoeffReadCost) + int(evaluator::CoeffReadCost) + int(functor_traits::Cost), + + LhsFlags = evaluator::Flags, + RhsFlags = evaluator::Flags, + SameType = is_same::value, + StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit), + Flags0 = (int(LhsFlags) | int(RhsFlags)) & ( + HereditaryBits + | (int(LhsFlags) & int(RhsFlags) & + ( (StorageOrdersAgree ? LinearAccessBit : 0) + | (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) + ) + ) + ), + Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), + Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment,evaluator::Alignment) + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit binary_evaluator(const XprType& xpr) : m_d(xpr) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + return m_d.func()(m_d.lhsImpl.coeff(row, col), m_d.rhsImpl.coeff(row, col)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return m_d.func()(m_d.lhsImpl.coeff(index), m_d.rhsImpl.coeff(index)); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index row, Index col) const + { + return m_d.func().packetOp(m_d.lhsImpl.template packet(row, col), + m_d.rhsImpl.template packet(row, col)); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index index) const + { + return m_d.func().packetOp(m_d.lhsImpl.template packet(index), + m_d.rhsImpl.template packet(index)); + } + +protected: + + // this helper permits to completely eliminate the functor if it is empty + struct Data + { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Data(const XprType& xpr) : op(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const BinaryOp& func() const { return op; } + BinaryOp op; + evaluator lhsImpl; + evaluator rhsImpl; + }; + + Data m_d; +}; + +// -------------------- CwiseUnaryView -------------------- + +template +struct unary_evaluator, IndexBased> + : evaluator_base > +{ + typedef CwiseUnaryView XprType; + + enum { + CoeffReadCost = int(evaluator::CoeffReadCost) + int(functor_traits::Cost), + + Flags = (evaluator::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)), + + Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost... + }; + + EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_d(op) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + return m_d.func()(m_d.argImpl.coeff(row, col)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return m_d.func()(m_d.argImpl.coeff(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index row, Index col) + { + return m_d.func()(m_d.argImpl.coeffRef(row, col)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index index) + { + return m_d.func()(m_d.argImpl.coeffRef(index)); + } + +protected: + + // this helper permits to completely eliminate the functor if it is empty + struct Data + { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const UnaryOp& func() const { return op; } + UnaryOp op; + evaluator argImpl; + }; + + Data m_d; +}; + +// -------------------- Map -------------------- + +// FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ? +// but that might complicate template specialization +template +struct mapbase_evaluator; + +template +struct mapbase_evaluator : evaluator_base +{ + typedef Derived XprType; + typedef typename XprType::PointerType PointerType; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + enum { + IsRowMajor = XprType::RowsAtCompileTime, + ColsAtCompileTime = XprType::ColsAtCompileTime, + CoeffReadCost = NumTraits::ReadCost + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit mapbase_evaluator(const XprType& map) + : m_data(const_cast(map.data())), + m_innerStride(map.innerStride()), + m_outerStride(map.outerStride()) + { + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator::Flags&PacketAccessBit, internal::inner_stride_at_compile_time::ret==1), + PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + return m_data[col * colStride() + row * rowStride()]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return m_data[index * m_innerStride.value()]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index row, Index col) + { + return m_data[col * colStride() + row * rowStride()]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index index) + { + return m_data[index * m_innerStride.value()]; + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index row, Index col) const + { + PointerType ptr = m_data + row * rowStride() + col * colStride(); + return internal::ploadt(ptr); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index index) const + { + return internal::ploadt(m_data + index * m_innerStride.value()); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index row, Index col, const PacketType& x) + { + PointerType ptr = m_data + row * rowStride() + col * colStride(); + return internal::pstoret(ptr, x); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketType& x) + { + internal::pstoret(m_data + index * m_innerStride.value(), x); + } +protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index rowStride() const EIGEN_NOEXCEPT { + return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index colStride() const EIGEN_NOEXCEPT { + return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); + } + + PointerType m_data; + const internal::variable_if_dynamic m_innerStride; + const internal::variable_if_dynamic m_outerStride; +}; + +template +struct evaluator > + : public mapbase_evaluator, PlainObjectType> +{ + typedef Map XprType; + typedef typename XprType::Scalar Scalar; + // TODO: should check for smaller packet types once we can handle multi-sized packet types + typedef typename packet_traits::type PacketScalar; + + enum { + InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 + ? int(PlainObjectType::InnerStrideAtCompileTime) + : int(StrideType::InnerStrideAtCompileTime), + OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 + ? int(PlainObjectType::OuterStrideAtCompileTime) + : int(StrideType::OuterStrideAtCompileTime), + HasNoInnerStride = InnerStrideAtCompileTime == 1, + HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, + HasNoStride = HasNoInnerStride && HasNoOuterStride, + IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic, + + PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit), + LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit), + Flags = int( evaluator::Flags) & (LinearAccessMask&PacketAccessMask), + + Alignment = int(MapOptions)&int(AlignedMask) + }; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map) + : mapbase_evaluator(map) + { } +}; + +// -------------------- Ref -------------------- + +template +struct evaluator > + : public mapbase_evaluator, PlainObjectType> +{ + typedef Ref XprType; + + enum { + Flags = evaluator >::Flags, + Alignment = evaluator >::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator(const XprType& ref) + : mapbase_evaluator(ref) + { } +}; + +// -------------------- Block -------------------- + +template::ret> struct block_evaluator; + +template +struct evaluator > + : block_evaluator +{ + typedef Block XprType; + typedef typename XprType::Scalar Scalar; + // TODO: should check for smaller packet types once we can handle multi-sized packet types + typedef typename packet_traits::type PacketScalar; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + + RowsAtCompileTime = traits::RowsAtCompileTime, + ColsAtCompileTime = traits::ColsAtCompileTime, + MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = traits::MaxColsAtCompileTime, + + ArgTypeIsRowMajor = (int(evaluator::Flags)&RowMajorBit) != 0, + IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1 + : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0 + : ArgTypeIsRowMajor, + HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor), + InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + InnerStrideAtCompileTime = HasSameStorageOrderAsArgType + ? int(inner_stride_at_compile_time::ret) + : int(outer_stride_at_compile_time::ret), + OuterStrideAtCompileTime = HasSameStorageOrderAsArgType + ? int(outer_stride_at_compile_time::ret) + : int(inner_stride_at_compile_time::ret), + MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0, + + FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator::Flags&LinearAccessBit))) ? LinearAccessBit : 0, + FlagsRowMajorBit = XprType::Flags&RowMajorBit, + Flags0 = evaluator::Flags & ( (HereditaryBits & ~RowMajorBit) | + DirectAccessBit | + MaskPacketAccessBit), + Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit, + + PacketAlignment = unpacket_traits::alignment, + Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) + && (OuterStrideAtCompileTime!=0) + && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0, + Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment, Alignment0) + }; + typedef block_evaluator block_evaluator_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator(const XprType& block) : block_evaluator_type(block) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } +}; + +// no direct-access => dispatch to a unary evaluator +template +struct block_evaluator + : unary_evaluator > +{ + typedef Block XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit block_evaluator(const XprType& block) + : unary_evaluator(block) + {} +}; + +template +struct unary_evaluator, IndexBased> + : evaluator_base > +{ + typedef Block XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit unary_evaluator(const XprType& block) + : m_argImpl(block.nestedExpression()), + m_startRow(block.startRow()), + m_startCol(block.startCol()), + m_linear_offset(ForwardLinearAccess?(ArgType::IsRowMajor ? block.startRow()*block.nestedExpression().cols() + block.startCol() : block.startCol()*block.nestedExpression().rows() + block.startRow()):0) + { } + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + enum { + RowsAtCompileTime = XprType::RowsAtCompileTime, + ForwardLinearAccess = (InnerPanel || int(XprType::IsRowMajor)==int(ArgType::IsRowMajor)) && bool(evaluator::Flags&LinearAccessBit) + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return linear_coeff_impl(index, bool_constant()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index index) + { + return linear_coeffRef_impl(index, bool_constant()); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index row, Index col) const + { + return m_argImpl.template packet(m_startRow.value() + row, m_startCol.value() + col); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index index) const + { + if (ForwardLinearAccess) + return m_argImpl.template packet(m_linear_offset.value() + index); + else + return packet(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index row, Index col, const PacketType& x) + { + return m_argImpl.template writePacket(m_startRow.value() + row, m_startCol.value() + col, x); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketType& x) + { + if (ForwardLinearAccess) + return m_argImpl.template writePacket(m_linear_offset.value() + index, x); + else + return writePacket(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0, + x); + } + +protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType linear_coeff_impl(Index index, internal::true_type /* ForwardLinearAccess */) const + { + return m_argImpl.coeff(m_linear_offset.value() + index); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType linear_coeff_impl(Index index, internal::false_type /* not ForwardLinearAccess */) const + { + return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& linear_coeffRef_impl(Index index, internal::true_type /* ForwardLinearAccess */) + { + return m_argImpl.coeffRef(m_linear_offset.value() + index); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& linear_coeffRef_impl(Index index, internal::false_type /* not ForwardLinearAccess */) + { + return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); + } + + evaluator m_argImpl; + const variable_if_dynamic m_startRow; + const variable_if_dynamic m_startCol; + const variable_if_dynamic m_linear_offset; +}; + +// TODO: This evaluator does not actually use the child evaluator; +// all action is via the data() as returned by the Block expression. + +template +struct block_evaluator + : mapbase_evaluator, + typename Block::PlainObject> +{ + typedef Block XprType; + typedef typename XprType::Scalar Scalar; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit block_evaluator(const XprType& block) + : mapbase_evaluator(block) + { + // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime + eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator::Alignment)) == 0) && "data is not aligned"); + } +}; + + +// -------------------- Select -------------------- +// NOTE shall we introduce a ternary_evaluator? + +// TODO enable vectorization for Select +template +struct evaluator > + : evaluator_base > +{ + typedef Select XprType; + enum { + CoeffReadCost = evaluator::CoeffReadCost + + EIGEN_PLAIN_ENUM_MAX(evaluator::CoeffReadCost, + evaluator::CoeffReadCost), + + Flags = (unsigned int)evaluator::Flags & evaluator::Flags & HereditaryBits, + + Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment, evaluator::Alignment) + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator(const XprType& select) + : m_conditionImpl(select.conditionMatrix()), + m_thenImpl(select.thenMatrix()), + m_elseImpl(select.elseMatrix()) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + if (m_conditionImpl.coeff(row, col)) + return m_thenImpl.coeff(row, col); + else + return m_elseImpl.coeff(row, col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + if (m_conditionImpl.coeff(index)) + return m_thenImpl.coeff(index); + else + return m_elseImpl.coeff(index); + } + +protected: + evaluator m_conditionImpl; + evaluator m_thenImpl; + evaluator m_elseImpl; +}; + + +// -------------------- Replicate -------------------- + +template +struct unary_evaluator > + : evaluator_base > +{ + typedef Replicate XprType; + typedef typename XprType::CoeffReturnType CoeffReturnType; + enum { + Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor + }; + typedef typename internal::nested_eval::type ArgTypeNested; + typedef typename internal::remove_all::type ArgTypeNestedCleaned; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0, + Flags = (evaluator::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits::Flags & RowMajorBit), + + Alignment = evaluator::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit unary_evaluator(const XprType& replicate) + : m_arg(replicate.nestedExpression()), + m_argImpl(m_arg), + m_rows(replicate.nestedExpression().rows()), + m_cols(replicate.nestedExpression().cols()) + {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + // try to avoid using modulo; this is a pure optimization strategy + const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 + : RowFactor==1 ? row + : row % m_rows.value(); + const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 + : ColFactor==1 ? col + : col % m_cols.value(); + + return m_argImpl.coeff(actual_row, actual_col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + // try to avoid using modulo; this is a pure optimization strategy + const Index actual_index = internal::traits::RowsAtCompileTime==1 + ? (ColFactor==1 ? index : index%m_cols.value()) + : (RowFactor==1 ? index : index%m_rows.value()); + + return m_argImpl.coeff(actual_index); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index row, Index col) const + { + const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 + : RowFactor==1 ? row + : row % m_rows.value(); + const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 + : ColFactor==1 ? col + : col % m_cols.value(); + + return m_argImpl.template packet(actual_row, actual_col); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index index) const + { + const Index actual_index = internal::traits::RowsAtCompileTime==1 + ? (ColFactor==1 ? index : index%m_cols.value()) + : (RowFactor==1 ? index : index%m_rows.value()); + + return m_argImpl.template packet(actual_index); + } + +protected: + const ArgTypeNested m_arg; + evaluator m_argImpl; + const variable_if_dynamic m_rows; + const variable_if_dynamic m_cols; +}; + +// -------------------- MatrixWrapper and ArrayWrapper -------------------- +// +// evaluator_wrapper_base is a common base class for the +// MatrixWrapper and ArrayWrapper evaluators. + +template +struct evaluator_wrapper_base + : evaluator_base +{ + typedef typename remove_all::type ArgType; + enum { + CoeffReadCost = evaluator::CoeffReadCost, + Flags = evaluator::Flags, + Alignment = evaluator::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {} + + typedef typename ArgType::Scalar Scalar; + typedef typename ArgType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + return m_argImpl.coeff(row, col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return m_argImpl.coeff(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(row, col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index index) + { + return m_argImpl.coeffRef(index); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index row, Index col) const + { + return m_argImpl.template packet(row, col); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index index) const + { + return m_argImpl.template packet(index); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index row, Index col, const PacketType& x) + { + m_argImpl.template writePacket(row, col, x); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketType& x) + { + m_argImpl.template writePacket(index, x); + } + +protected: + evaluator m_argImpl; +}; + +template +struct unary_evaluator > + : evaluator_wrapper_base > +{ + typedef MatrixWrapper XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit unary_evaluator(const XprType& wrapper) + : evaluator_wrapper_base >(wrapper.nestedExpression()) + { } +}; + +template +struct unary_evaluator > + : evaluator_wrapper_base > +{ + typedef ArrayWrapper XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit unary_evaluator(const XprType& wrapper) + : evaluator_wrapper_base >(wrapper.nestedExpression()) + { } +}; + + +// -------------------- Reverse -------------------- + +// defined in Reverse.h: +template struct reverse_packet_cond; + +template +struct unary_evaluator > + : evaluator_base > +{ + typedef Reverse XprType; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + enum { + IsRowMajor = XprType::IsRowMajor, + IsColMajor = !IsRowMajor, + ReverseRow = (Direction == Vertical) || (Direction == BothDirections), + ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), + ReversePacket = (Direction == BothDirections) + || ((Direction == Vertical) && IsColMajor) + || ((Direction == Horizontal) && IsRowMajor), + + CoeffReadCost = evaluator::CoeffReadCost, + + // let's enable LinearAccess only with vectorization because of the product overhead + // FIXME enable DirectAccess with negative strides? + Flags0 = evaluator::Flags, + LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) ) + || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1)) + ? LinearAccessBit : 0, + + Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess), + + Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f. + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit unary_evaluator(const XprType& reverse) + : m_argImpl(reverse.nestedExpression()), + m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1), + m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row, + ReverseCol ? m_cols.value() - col - 1 : col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row, + ReverseCol ? m_cols.value() - col - 1 : col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index index) + { + return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index row, Index col) const + { + enum { + PacketSize = unpacket_traits::size, + OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, + OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 + }; + typedef internal::reverse_packet_cond reverse_packet; + return reverse_packet::run(m_argImpl.template packet( + ReverseRow ? m_rows.value() - row - OffsetRow : row, + ReverseCol ? m_cols.value() - col - OffsetCol : col)); + } + + template + EIGEN_STRONG_INLINE + PacketType packet(Index index) const + { + enum { PacketSize = unpacket_traits::size }; + return preverse(m_argImpl.template packet(m_rows.value() * m_cols.value() - index - PacketSize)); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index row, Index col, const PacketType& x) + { + // FIXME we could factorize some code with packet(i,j) + enum { + PacketSize = unpacket_traits::size, + OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, + OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 + }; + typedef internal::reverse_packet_cond reverse_packet; + m_argImpl.template writePacket( + ReverseRow ? m_rows.value() - row - OffsetRow : row, + ReverseCol ? m_cols.value() - col - OffsetCol : col, + reverse_packet::run(x)); + } + + template + EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketType& x) + { + enum { PacketSize = unpacket_traits::size }; + m_argImpl.template writePacket + (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x)); + } + +protected: + evaluator m_argImpl; + + // If we do not reverse rows, then we do not need to know the number of rows; same for columns + // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors. + const variable_if_dynamic m_rows; + const variable_if_dynamic m_cols; +}; + + +// -------------------- Diagonal -------------------- + +template +struct evaluator > + : evaluator_base > +{ + typedef Diagonal XprType; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + + Flags = (unsigned int)(evaluator::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit, + + Alignment = 0 + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit evaluator(const XprType& diagonal) + : m_argImpl(diagonal.nestedExpression()), + m_index(diagonal.index()) + { } + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index) const + { + return m_argImpl.coeff(row + rowOffset(), row + colOffset()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index index) const + { + return m_argImpl.coeff(index + rowOffset(), index + colOffset()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index row, Index) + { + return m_argImpl.coeffRef(row + rowOffset(), row + colOffset()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index index) + { + return m_argImpl.coeffRef(index + rowOffset(), index + colOffset()); + } + +protected: + evaluator m_argImpl; + const internal::variable_if_dynamicindex m_index; + +private: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; } +}; + + +//---------------------------------------------------------------------- +// deprecated code +//---------------------------------------------------------------------- + +// -------------------- EvalToTemp -------------------- + +// expression class for evaluating nested expression to a temporary + +template class EvalToTemp; + +template +struct traits > + : public traits +{ }; + +template +class EvalToTemp + : public dense_xpr_base >::type +{ + public: + + typedef typename dense_xpr_base::type Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp) + + explicit EvalToTemp(const ArgType& arg) + : m_arg(arg) + { } + + const ArgType& arg() const + { + return m_arg; + } + + EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT + { + return m_arg.rows(); + } + + EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT + { + return m_arg.cols(); + } + + private: + const ArgType& m_arg; +}; + +template +struct evaluator > + : public evaluator +{ + typedef EvalToTemp XprType; + typedef typename ArgType::PlainObject PlainObject; + typedef evaluator Base; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) + : m_result(xpr.arg()) + { + ::new (static_cast(this)) Base(m_result); + } + + // This constructor is used when nesting an EvalTo evaluator in another evaluator + EIGEN_DEVICE_FUNC evaluator(const ArgType& arg) + : m_result(arg) + { + ::new (static_cast(this)) Base(m_result); + } + +protected: + PlainObject m_result; +}; + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COREEVALUATORS_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CoreIterators.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CoreIterators.h new file mode 100644 index 000000000..b96719681 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CoreIterators.h @@ -0,0 +1,132 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COREITERATORS_H +#define EIGEN_COREITERATORS_H + +namespace Eigen { + +/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core + */ + +namespace internal { + +template +class inner_iterator_selector; + +} + +/** \class InnerIterator + * \brief An InnerIterator allows to loop over the element of any matrix expression. + * + * \warning To be used with care because an evaluator is constructed every time an InnerIterator iterator is constructed. + * + * TODO: add a usage example + */ +template +class InnerIterator +{ +protected: + typedef internal::inner_iterator_selector::Kind> IteratorType; + typedef internal::evaluator EvaluatorType; + typedef typename internal::traits::Scalar Scalar; +public: + /** Construct an iterator over the \a outerId -th row or column of \a xpr */ + InnerIterator(const XprType &xpr, const Index &outerId) + : m_eval(xpr), m_iter(m_eval, outerId, xpr.innerSize()) + {} + + /// \returns the value of the current coefficient. + EIGEN_STRONG_INLINE Scalar value() const { return m_iter.value(); } + /** Increment the iterator \c *this to the next non-zero coefficient. + * Explicit zeros are not skipped over. To skip explicit zeros, see class SparseView + */ + EIGEN_STRONG_INLINE InnerIterator& operator++() { m_iter.operator++(); return *this; } + EIGEN_STRONG_INLINE InnerIterator& operator+=(Index i) { m_iter.operator+=(i); return *this; } + EIGEN_STRONG_INLINE InnerIterator operator+(Index i) + { InnerIterator result(*this); result+=i; return result; } + + + /// \returns the column or row index of the current coefficient. + EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } + /// \returns the row index of the current coefficient. + EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } + /// \returns the column index of the current coefficient. + EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } + /// \returns \c true if the iterator \c *this still references a valid coefficient. + EIGEN_STRONG_INLINE operator bool() const { return m_iter; } + +protected: + EvaluatorType m_eval; + IteratorType m_iter; +private: + // If you get here, then you're not using the right InnerIterator type, e.g.: + // SparseMatrix A; + // SparseMatrix::InnerIterator it(A,0); + template InnerIterator(const EigenBase&,Index outer); +}; + +namespace internal { + +// Generic inner iterator implementation for dense objects +template +class inner_iterator_selector +{ +protected: + typedef evaluator EvaluatorType; + typedef typename traits::Scalar Scalar; + enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; + +public: + EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &innerSize) + : m_eval(eval), m_inner(0), m_outer(outerId), m_end(innerSize) + {} + + EIGEN_STRONG_INLINE Scalar value() const + { + return (IsRowMajor) ? m_eval.coeff(m_outer, m_inner) + : m_eval.coeff(m_inner, m_outer); + } + + EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; } + + EIGEN_STRONG_INLINE Index index() const { return m_inner; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } + + EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } + +protected: + const EvaluatorType& m_eval; + Index m_inner; + const Index m_outer; + const Index m_end; +}; + +// For iterator-based evaluator, inner-iterator is already implemented as +// evaluator<>::InnerIterator +template +class inner_iterator_selector + : public evaluator::InnerIterator +{ +protected: + typedef typename evaluator::InnerIterator Base; + typedef evaluator EvaluatorType; + +public: + EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/) + : Base(eval, outerId) + {} +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COREITERATORS_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseBinaryOp.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseBinaryOp.h new file mode 100644 index 000000000..2202b1cc6 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseBinaryOp.h @@ -0,0 +1,183 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_BINARY_OP_H +#define EIGEN_CWISE_BINARY_OP_H + +namespace Eigen { + +namespace internal { +template +struct traits > +{ + // we must not inherit from traits since it has + // the potential to cause problems with MSVC + typedef typename remove_all::type Ancestor; + typedef typename traits::XprKind XprKind; + enum { + RowsAtCompileTime = traits::RowsAtCompileTime, + ColsAtCompileTime = traits::ColsAtCompileTime, + MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = traits::MaxColsAtCompileTime + }; + + // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor), + // we still want to handle the case when the result type is different. + typedef typename result_of< + BinaryOp( + const typename Lhs::Scalar&, + const typename Rhs::Scalar& + ) + >::type Scalar; + typedef typename cwise_promote_storage_type::StorageKind, + typename traits::StorageKind, + BinaryOp>::ret StorageKind; + typedef typename promote_index_type::StorageIndex, + typename traits::StorageIndex>::type StorageIndex; + typedef typename Lhs::Nested LhsNested; + typedef typename Rhs::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + enum { + Flags = cwise_promote_storage_order::StorageKind,typename traits::StorageKind,_LhsNested::Flags & RowMajorBit,_RhsNested::Flags & RowMajorBit>::value + }; +}; +} // end namespace internal + +template +class CwiseBinaryOpImpl; + +/** \class CwiseBinaryOp + * \ingroup Core_Module + * + * \brief Generic expression where a coefficient-wise binary operator is applied to two expressions + * + * \tparam BinaryOp template functor implementing the operator + * \tparam LhsType the type of the left-hand side + * \tparam RhsType the type of the right-hand side + * + * This class represents an expression where a coefficient-wise binary operator is applied to two expressions. + * It is the return type of binary operators, by which we mean only those binary operators where + * both the left-hand side and the right-hand side are Eigen expressions. + * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp. + * + * Most of the time, this is the only way that it is used, so you typically don't have to name + * CwiseBinaryOp types explicitly. + * + * \sa MatrixBase::binaryExpr(const MatrixBase &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp + */ +template +class CwiseBinaryOp : + public CwiseBinaryOpImpl< + BinaryOp, LhsType, RhsType, + typename internal::cwise_promote_storage_type::StorageKind, + typename internal::traits::StorageKind, + BinaryOp>::ret>, + internal::no_assignment_operator +{ + public: + + typedef typename internal::remove_all::type Functor; + typedef typename internal::remove_all::type Lhs; + typedef typename internal::remove_all::type Rhs; + + typedef typename CwiseBinaryOpImpl< + BinaryOp, LhsType, RhsType, + typename internal::cwise_promote_storage_type::StorageKind, + typename internal::traits::StorageKind, + BinaryOp>::ret>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp) + + typedef typename internal::ref_selector::type LhsNested; + typedef typename internal::ref_selector::type RhsNested; + typedef typename internal::remove_reference::type _LhsNested; + typedef typename internal::remove_reference::type _RhsNested; + +#if EIGEN_COMP_MSVC && EIGEN_HAS_CXX11 + //Required for Visual Studio or the Copy constructor will probably not get inlined! + EIGEN_STRONG_INLINE + CwiseBinaryOp(const CwiseBinaryOp&) = default; +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp()) + : m_lhs(aLhs), m_rhs(aRhs), m_functor(func) + { + EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar); + // require the sizes to match + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs) + eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index rows() const EIGEN_NOEXCEPT { + // return the fixed size type if available to enable compile time optimizations + return internal::traits::type>::RowsAtCompileTime==Dynamic ? m_rhs.rows() : m_lhs.rows(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index cols() const EIGEN_NOEXCEPT { + // return the fixed size type if available to enable compile time optimizations + return internal::traits::type>::ColsAtCompileTime==Dynamic ? m_rhs.cols() : m_lhs.cols(); + } + + /** \returns the left hand side nested expression */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const _LhsNested& lhs() const { return m_lhs; } + /** \returns the right hand side nested expression */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const _RhsNested& rhs() const { return m_rhs; } + /** \returns the functor representing the binary operation */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const BinaryOp& functor() const { return m_functor; } + + protected: + LhsNested m_lhs; + RhsNested m_rhs; + const BinaryOp m_functor; +}; + +// Generic API dispatcher +template +class CwiseBinaryOpImpl + : public internal::generic_xpr_base >::type +{ +public: + typedef typename internal::generic_xpr_base >::type Base; +}; + +/** replaces \c *this by \c *this - \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & +MatrixBase::operator-=(const MatrixBase &other) +{ + call_assignment(derived(), other.derived(), internal::sub_assign_op()); + return derived(); +} + +/** replaces \c *this by \c *this + \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & +MatrixBase::operator+=(const MatrixBase& other) +{ + call_assignment(derived(), other.derived(), internal::add_assign_op()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_CWISE_BINARY_OP_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseNullaryOp.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseNullaryOp.h new file mode 100644 index 000000000..289ec510a --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseNullaryOp.h @@ -0,0 +1,1001 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_NULLARY_OP_H +#define EIGEN_CWISE_NULLARY_OP_H + +namespace Eigen { + +namespace internal { +template +struct traits > : traits +{ + enum { + Flags = traits::Flags & RowMajorBit + }; +}; + +} // namespace internal + +/** \class CwiseNullaryOp + * \ingroup Core_Module + * + * \brief Generic expression of a matrix where all coefficients are defined by a functor + * + * \tparam NullaryOp template functor implementing the operator + * \tparam PlainObjectType the underlying plain matrix/array type + * + * This class represents an expression of a generic nullary operator. + * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods, + * and most of the time this is the only way it is used. + * + * However, if you want to write a function returning such an expression, you + * will need to use this class. + * + * The functor NullaryOp must expose one of the following method: + + + + +
\c operator()() if the procedural generation does not depend on the coefficient entries (e.g., random numbers)
\c operator()(Index i)if the procedural generation makes sense for vectors only and that it depends on the coefficient index \c i (e.g., linspace)
\c operator()(Index i,Index j)if the procedural generation depends on the matrix coordinates \c i, \c j (e.g., to generate a checkerboard with 0 and 1)
+ * It is also possible to expose the last two operators if the generation makes sense for matrices but can be optimized for vectors. + * + * See DenseBase::NullaryExpr(Index,const CustomNullaryOp&) for an example binding + * C++11 random number generators. + * + * A nullary expression can also be used to implement custom sophisticated matrix manipulations + * that cannot be covered by the existing set of natively supported matrix manipulations. + * See this \ref TopicCustomizing_NullaryExpr "page" for some examples and additional explanations + * on the behavior of CwiseNullaryOp. + * + * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr + */ +template +class CwiseNullaryOp : public internal::dense_xpr_base< CwiseNullaryOp >::type, internal::no_assignment_operator +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) + + EIGEN_DEVICE_FUNC + CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp()) + : m_rows(rows), m_cols(cols), m_functor(func) + { + eigen_assert(rows >= 0 + && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) + && cols >= 0 + && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index rows() const { return m_rows.value(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index cols() const { return m_cols.value(); } + + /** \returns the functor representing the nullary operation */ + EIGEN_DEVICE_FUNC + const NullaryOp& functor() const { return m_functor; } + + protected: + const internal::variable_if_dynamic m_rows; + const internal::variable_if_dynamic m_cols; + const NullaryOp m_functor; +}; + + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +#ifndef EIGEN_PARSED_BY_DOXYGEN +const CwiseNullaryOp::PlainObject> +#else +const CwiseNullaryOp +#endif +DenseBase::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func) +{ + return CwiseNullaryOp(rows, cols, func); +} + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * Here is an example with C++11 random generators: \include random_cpp11.cpp + * Output: \verbinclude random_cpp11.out + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +#ifndef EIGEN_PARSED_BY_DOXYGEN +const CwiseNullaryOp::PlainObject> +#else +const CwiseNullaryOp +#endif +DenseBase::NullaryExpr(Index size, const CustomNullaryOp& func) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + if(RowsAtCompileTime == 1) return CwiseNullaryOp(1, size, func); + else return CwiseNullaryOp(size, 1, func); +} + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * This variant is only for fixed-size DenseBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +#ifndef EIGEN_PARSED_BY_DOXYGEN +const CwiseNullaryOp::PlainObject> +#else +const CwiseNullaryOp +#endif +DenseBase::NullaryExpr(const CustomNullaryOp& func) +{ + return CwiseNullaryOp(RowsAtCompileTime, ColsAtCompileTime, func); +} + +/** \returns an expression of a constant matrix of value \a value + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this DenseBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Constant(Index rows, Index cols, const Scalar& value) +{ + return DenseBase::NullaryExpr(rows, cols, internal::scalar_constant_op(value)); +} + +/** \returns an expression of a constant matrix of value \a value + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this DenseBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Constant(Index size, const Scalar& value) +{ + return DenseBase::NullaryExpr(size, internal::scalar_constant_op(value)); +} + +/** \returns an expression of a constant matrix of value \a value + * + * This variant is only for fixed-size DenseBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Constant(const Scalar& value) +{ + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return DenseBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op(value)); +} + +/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&) + * + * \only_for_vectors + * + * Example: \include DenseBase_LinSpaced_seq_deprecated.cpp + * Output: \verbinclude DenseBase_LinSpaced_seq_deprecated.out + * + * \sa LinSpaced(Index,const Scalar&, const Scalar&), setLinSpaced(Index,const Scalar&,const Scalar&) + */ +template +EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType +DenseBase::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return DenseBase::NullaryExpr(size, internal::linspaced_op(low,high,size)); +} + +/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&) + * + * \sa LinSpaced(const Scalar&, const Scalar&) + */ +template +EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType +DenseBase::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op(low,high,Derived::SizeAtCompileTime)); +} + +/** + * \brief Sets a linearly spaced vector. + * + * The function generates 'size' equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * Example: \include DenseBase_LinSpaced.cpp + * Output: \verbinclude DenseBase_LinSpaced.out + * + * For integer scalar types, an even spacing is possible if and only if the length of the range, + * i.e., \c high-low is a scalar multiple of \c size-1, or if \c size is a scalar multiple of the + * number of values \c high-low+1 (meaning each value can be repeated the same number of time). + * If one of these two considions is not satisfied, then \c high is lowered to the largest value + * satisfying one of this constraint. + * Here are some examples: + * + * Example: \include DenseBase_LinSpacedInt.cpp + * Output: \verbinclude DenseBase_LinSpacedInt.out + * + * \sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType +DenseBase::LinSpaced(Index size, const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return DenseBase::NullaryExpr(size, internal::linspaced_op(low,high,size)); +} + +/** + * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&) + * Special version for fixed size types which does not require the size parameter. + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType +DenseBase::LinSpaced(const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op(low,high,Derived::SizeAtCompileTime)); +} + +/** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */ +template +EIGEN_DEVICE_FUNC bool DenseBase::isApproxToConstant +(const Scalar& val, const RealScalar& prec) const +{ + typename internal::nested_eval::type self(derived()); + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) + if(!internal::isApprox(self.coeff(i, j), val, prec)) + return false; + return true; +} + +/** This is just an alias for isApproxToConstant(). + * + * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ +template +EIGEN_DEVICE_FUNC bool DenseBase::isConstant +(const Scalar& val, const RealScalar& prec) const +{ + return isApproxToConstant(val, prec); +} + +/** Alias for setConstant(): sets all coefficients in this expression to \a val. + * + * \sa setConstant(), Constant(), class CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void DenseBase::fill(const Scalar& val) +{ + setConstant(val); +} + +/** Sets all coefficients in this expression to value \a val. + * + * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& val) +{ + return derived() = Constant(rows(), cols(), val); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to the given value \a val. + * + * \only_for_vectors + * + * Example: \include Matrix_setConstant_int.cpp + * Output: \verbinclude Matrix_setConstant_int.out + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setConstant(Index size, const Scalar& val) +{ + resize(size); + return setConstant(val); +} + +/** Resizes to the given size, and sets all coefficients in this expression to the given value \a val. + * + * \param rows the new number of rows + * \param cols the new number of columns + * \param val the value to which all coefficients are set + * + * Example: \include Matrix_setConstant_int_int.cpp + * Output: \verbinclude Matrix_setConstant_int_int.out + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setConstant(Index rows, Index cols, const Scalar& val) +{ + resize(rows, cols); + return setConstant(val); +} + +/** Resizes to the given size, changing only the number of columns, and sets all + * coefficients in this expression to the given value \a val. For the parameter + * of type NoChange_t, just pass the special value \c NoChange. + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setConstant(NoChange_t, Index cols, const Scalar& val) +{ + return setConstant(rows(), cols, val); +} + +/** Resizes to the given size, changing only the number of rows, and sets all + * coefficients in this expression to the given value \a val. For the parameter + * of type NoChange_t, just pass the special value \c NoChange. + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setConstant(Index rows, NoChange_t, const Scalar& val) +{ + return setConstant(rows, cols(), val); +} + + +/** + * \brief Sets a linearly spaced vector. + * + * The function generates 'size' equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * Example: \include DenseBase_setLinSpaced.cpp + * Output: \verbinclude DenseBase_setLinSpaced.out + * + * For integer scalar types, do not miss the explanations on the definition + * of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink. + * + * \sa LinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op(low,high,newSize)); +} + +/** + * \brief Sets a linearly spaced vector. + * + * The function fills \c *this with equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * For integer scalar types, do not miss the explanations on the definition + * of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink. + * + * \sa LinSpaced(Index,const Scalar&,const Scalar&), setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return setLinSpaced(size(), low, high); +} + +// zero: + +/** \returns an expression of a zero matrix. + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * Example: \include MatrixBase_zero_int_int.cpp + * Output: \verbinclude MatrixBase_zero_int_int.out + * + * \sa Zero(), Zero(Index) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Zero(Index rows, Index cols) +{ + return Constant(rows, cols, Scalar(0)); +} + +/** \returns an expression of a zero vector. + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * Example: \include MatrixBase_zero_int.cpp + * Output: \verbinclude MatrixBase_zero_int.out + * + * \sa Zero(), Zero(Index,Index) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Zero(Index size) +{ + return Constant(size, Scalar(0)); +} + +/** \returns an expression of a fixed-size zero matrix or vector. + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_zero.cpp + * Output: \verbinclude MatrixBase_zero.out + * + * \sa Zero(Index), Zero(Index,Index) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Zero() +{ + return Constant(Scalar(0)); +} + +/** \returns true if *this is approximately equal to the zero matrix, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isZero.cpp + * Output: \verbinclude MatrixBase_isZero.out + * + * \sa class CwiseNullaryOp, Zero() + */ +template +EIGEN_DEVICE_FUNC bool DenseBase::isZero(const RealScalar& prec) const +{ + typename internal::nested_eval::type self(derived()); + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) + if(!internal::isMuchSmallerThan(self.coeff(i, j), static_cast(1), prec)) + return false; + return true; +} + +/** Sets all coefficients in this expression to zero. + * + * Example: \include MatrixBase_setZero.cpp + * Output: \verbinclude MatrixBase_setZero.out + * + * \sa class CwiseNullaryOp, Zero() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setZero() +{ + return setConstant(Scalar(0)); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to zero. + * + * \only_for_vectors + * + * Example: \include Matrix_setZero_int.cpp + * Output: \verbinclude Matrix_setZero_int.out + * + * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setZero(Index newSize) +{ + resize(newSize); + return setConstant(Scalar(0)); +} + +/** Resizes to the given size, and sets all coefficients in this expression to zero. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setZero_int_int.cpp + * Output: \verbinclude Matrix_setZero_int_int.out + * + * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setZero(Index rows, Index cols) +{ + resize(rows, cols); + return setConstant(Scalar(0)); +} + +/** Resizes to the given size, changing only the number of columns, and sets all + * coefficients in this expression to zero. For the parameter of type NoChange_t, + * just pass the special value \c NoChange. + * + * \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(Index, NoChange_t), class CwiseNullaryOp, DenseBase::Zero() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setZero(NoChange_t, Index cols) +{ + return setZero(rows(), cols); +} + +/** Resizes to the given size, changing only the number of rows, and sets all + * coefficients in this expression to zero. For the parameter of type NoChange_t, + * just pass the special value \c NoChange. + * + * \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(NoChange_t, Index), class CwiseNullaryOp, DenseBase::Zero() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setZero(Index rows, NoChange_t) +{ + return setZero(rows, cols()); +} + +// ones: + +/** \returns an expression of a matrix where all coefficients equal one. + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used + * instead. + * + * Example: \include MatrixBase_ones_int_int.cpp + * Output: \verbinclude MatrixBase_ones_int_int.out + * + * \sa Ones(), Ones(Index), isOnes(), class Ones + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Ones(Index rows, Index cols) +{ + return Constant(rows, cols, Scalar(1)); +} + +/** \returns an expression of a vector where all coefficients equal one. + * + * The parameter \a newSize is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Ones() should be used + * instead. + * + * Example: \include MatrixBase_ones_int.cpp + * Output: \verbinclude MatrixBase_ones_int.out + * + * \sa Ones(), Ones(Index,Index), isOnes(), class Ones + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Ones(Index newSize) +{ + return Constant(newSize, Scalar(1)); +} + +/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one. + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_ones.cpp + * Output: \verbinclude MatrixBase_ones.out + * + * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Ones() +{ + return Constant(Scalar(1)); +} + +/** \returns true if *this is approximately equal to the matrix where all coefficients + * are equal to 1, within the precision given by \a prec. + * + * Example: \include MatrixBase_isOnes.cpp + * Output: \verbinclude MatrixBase_isOnes.out + * + * \sa class CwiseNullaryOp, Ones() + */ +template +EIGEN_DEVICE_FUNC bool DenseBase::isOnes +(const RealScalar& prec) const +{ + return isApproxToConstant(Scalar(1), prec); +} + +/** Sets all coefficients in this expression to one. + * + * Example: \include MatrixBase_setOnes.cpp + * Output: \verbinclude MatrixBase_setOnes.out + * + * \sa class CwiseNullaryOp, Ones() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setOnes() +{ + return setConstant(Scalar(1)); +} + +/** Resizes to the given \a newSize, and sets all coefficients in this expression to one. + * + * \only_for_vectors + * + * Example: \include Matrix_setOnes_int.cpp + * Output: \verbinclude Matrix_setOnes_int.out + * + * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setOnes(Index newSize) +{ + resize(newSize); + return setConstant(Scalar(1)); +} + +/** Resizes to the given size, and sets all coefficients in this expression to one. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setOnes_int_int.cpp + * Output: \verbinclude Matrix_setOnes_int_int.out + * + * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setOnes(Index rows, Index cols) +{ + resize(rows, cols); + return setConstant(Scalar(1)); +} + +/** Resizes to the given size, changing only the number of rows, and sets all + * coefficients in this expression to one. For the parameter of type NoChange_t, + * just pass the special value \c NoChange. + * + * \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(NoChange_t, Index), class CwiseNullaryOp, MatrixBase::Ones() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setOnes(Index rows, NoChange_t) +{ + return setOnes(rows, cols()); +} + +/** Resizes to the given size, changing only the number of columns, and sets all + * coefficients in this expression to one. For the parameter of type NoChange_t, + * just pass the special value \c NoChange. + * + * \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(Index, NoChange_t) class CwiseNullaryOp, MatrixBase::Ones() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setOnes(NoChange_t, Index cols) +{ + return setOnes(rows(), cols); +} + +// Identity: + +/** \returns an expression of the identity matrix (not necessarily square). + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used + * instead. + * + * Example: \include MatrixBase_identity_int_int.cpp + * Output: \verbinclude MatrixBase_identity_int_int.out + * + * \sa Identity(), setIdentity(), isIdentity() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType +MatrixBase::Identity(Index rows, Index cols) +{ + return DenseBase::NullaryExpr(rows, cols, internal::scalar_identity_op()); +} + +/** \returns an expression of the identity matrix (not necessarily square). + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variant taking size arguments. + * + * Example: \include MatrixBase_identity.cpp + * Output: \verbinclude MatrixBase_identity.out + * + * \sa Identity(Index,Index), setIdentity(), isIdentity() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType +MatrixBase::Identity() +{ + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return MatrixBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op()); +} + +/** \returns true if *this is approximately equal to the identity matrix + * (not necessarily square), + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isIdentity.cpp + * Output: \verbinclude MatrixBase_isIdentity.out + * + * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity() + */ +template +bool MatrixBase::isIdentity +(const RealScalar& prec) const +{ + typename internal::nested_eval::type self(derived()); + for(Index j = 0; j < cols(); ++j) + { + for(Index i = 0; i < rows(); ++i) + { + if(i == j) + { + if(!internal::isApprox(self.coeff(i, j), static_cast(1), prec)) + return false; + } + else + { + if(!internal::isMuchSmallerThan(self.coeff(i, j), static_cast(1), prec)) + return false; + } + } + } + return true; +} + +namespace internal { + +template=16)> +struct setIdentity_impl +{ + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Derived& run(Derived& m) + { + return m = Derived::Identity(m.rows(), m.cols()); + } +}; + +template +struct setIdentity_impl +{ + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Derived& run(Derived& m) + { + m.setZero(); + const Index size = numext::mini(m.rows(), m.cols()); + for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); + return m; + } +}; + +} // end namespace internal + +/** Writes the identity expression (not necessarily square) into *this. + * + * Example: \include MatrixBase_setIdentity.cpp + * Output: \verbinclude MatrixBase_setIdentity.out + * + * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity() +{ + return internal::setIdentity_impl::run(derived()); +} + +/** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setIdentity_int_int.cpp + * Output: \verbinclude Matrix_setIdentity_int_int.out + * + * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(Index rows, Index cols) +{ + derived().resize(rows, cols); + return setIdentity(); +} + +/** \returns an expression of the i-th unit (basis) vector. + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index newSize, Index i) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i); +} + +/** \returns an expression of the i-th unit (basis) vector. + * + * \only_for_vectors + * + * This variant is for fixed-size vector only. + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index i) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return BasisReturnType(SquareMatrixType::Identity(),i); +} + +/** \returns an expression of the X axis unit vector (1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitX() +{ return Derived::Unit(0); } + +/** \returns an expression of the Y axis unit vector (0,1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitY() +{ return Derived::Unit(1); } + +/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitZ() +{ return Derived::Unit(2); } + +/** \returns an expression of the W axis unit vector (0,0,0,1) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitW() +{ return Derived::Unit(3); } + +/** \brief Set the coefficients of \c *this to the i-th unit (basis) vector + * + * \param i index of the unique coefficient to be set to 1 + * + * \only_for_vectors + * + * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setUnit(Index i) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + eigen_assert(i +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setUnit(Index newSize, Index i) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + eigen_assert(i +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2016 Eugene Brevdo +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_TERNARY_OP_H +#define EIGEN_CWISE_TERNARY_OP_H + +namespace Eigen { + +namespace internal { +template +struct traits > { + // we must not inherit from traits since it has + // the potential to cause problems with MSVC + typedef typename remove_all::type Ancestor; + typedef typename traits::XprKind XprKind; + enum { + RowsAtCompileTime = traits::RowsAtCompileTime, + ColsAtCompileTime = traits::ColsAtCompileTime, + MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = traits::MaxColsAtCompileTime + }; + + // even though we require Arg1, Arg2, and Arg3 to have the same scalar type + // (see CwiseTernaryOp constructor), + // we still want to handle the case when the result type is different. + typedef typename result_of::type Scalar; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::StorageIndex StorageIndex; + + typedef typename Arg1::Nested Arg1Nested; + typedef typename Arg2::Nested Arg2Nested; + typedef typename Arg3::Nested Arg3Nested; + typedef typename remove_reference::type _Arg1Nested; + typedef typename remove_reference::type _Arg2Nested; + typedef typename remove_reference::type _Arg3Nested; + enum { Flags = _Arg1Nested::Flags & RowMajorBit }; +}; +} // end namespace internal + +template +class CwiseTernaryOpImpl; + +/** \class CwiseTernaryOp + * \ingroup Core_Module + * + * \brief Generic expression where a coefficient-wise ternary operator is + * applied to two expressions + * + * \tparam TernaryOp template functor implementing the operator + * \tparam Arg1Type the type of the first argument + * \tparam Arg2Type the type of the second argument + * \tparam Arg3Type the type of the third argument + * + * This class represents an expression where a coefficient-wise ternary + * operator is applied to three expressions. + * It is the return type of ternary operators, by which we mean only those + * ternary operators where + * all three arguments are Eigen expressions. + * For example, the return type of betainc(matrix1, matrix2, matrix3) is a + * CwiseTernaryOp. + * + * Most of the time, this is the only way that it is used, so you typically + * don't have to name + * CwiseTernaryOp types explicitly. + * + * \sa MatrixBase::ternaryExpr(const MatrixBase &, const + * MatrixBase &, const CustomTernaryOp &) const, class CwiseBinaryOp, + * class CwiseUnaryOp, class CwiseNullaryOp + */ +template +class CwiseTernaryOp : public CwiseTernaryOpImpl< + TernaryOp, Arg1Type, Arg2Type, Arg3Type, + typename internal::traits::StorageKind>, + internal::no_assignment_operator +{ + public: + typedef typename internal::remove_all::type Arg1; + typedef typename internal::remove_all::type Arg2; + typedef typename internal::remove_all::type Arg3; + + typedef typename CwiseTernaryOpImpl< + TernaryOp, Arg1Type, Arg2Type, Arg3Type, + typename internal::traits::StorageKind>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseTernaryOp) + + typedef typename internal::ref_selector::type Arg1Nested; + typedef typename internal::ref_selector::type Arg2Nested; + typedef typename internal::ref_selector::type Arg3Nested; + typedef typename internal::remove_reference::type _Arg1Nested; + typedef typename internal::remove_reference::type _Arg2Nested; + typedef typename internal::remove_reference::type _Arg3Nested; + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CwiseTernaryOp(const Arg1& a1, const Arg2& a2, + const Arg3& a3, + const TernaryOp& func = TernaryOp()) + : m_arg1(a1), m_arg2(a2), m_arg3(a3), m_functor(func) { + // require the sizes to match + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg2) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg3) + + // The index types should match + EIGEN_STATIC_ASSERT((internal::is_same< + typename internal::traits::StorageKind, + typename internal::traits::StorageKind>::value), + STORAGE_KIND_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same< + typename internal::traits::StorageKind, + typename internal::traits::StorageKind>::value), + STORAGE_KIND_MUST_MATCH) + + eigen_assert(a1.rows() == a2.rows() && a1.cols() == a2.cols() && + a1.rows() == a3.rows() && a1.cols() == a3.cols()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index rows() const { + // return the fixed size type if available to enable compile time + // optimizations + if (internal::traits::type>:: + RowsAtCompileTime == Dynamic && + internal::traits::type>:: + RowsAtCompileTime == Dynamic) + return m_arg3.rows(); + else if (internal::traits::type>:: + RowsAtCompileTime == Dynamic && + internal::traits::type>:: + RowsAtCompileTime == Dynamic) + return m_arg2.rows(); + else + return m_arg1.rows(); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index cols() const { + // return the fixed size type if available to enable compile time + // optimizations + if (internal::traits::type>:: + ColsAtCompileTime == Dynamic && + internal::traits::type>:: + ColsAtCompileTime == Dynamic) + return m_arg3.cols(); + else if (internal::traits::type>:: + ColsAtCompileTime == Dynamic && + internal::traits::type>:: + ColsAtCompileTime == Dynamic) + return m_arg2.cols(); + else + return m_arg1.cols(); + } + + /** \returns the first argument nested expression */ + EIGEN_DEVICE_FUNC + const _Arg1Nested& arg1() const { return m_arg1; } + /** \returns the first argument nested expression */ + EIGEN_DEVICE_FUNC + const _Arg2Nested& arg2() const { return m_arg2; } + /** \returns the third argument nested expression */ + EIGEN_DEVICE_FUNC + const _Arg3Nested& arg3() const { return m_arg3; } + /** \returns the functor representing the ternary operation */ + EIGEN_DEVICE_FUNC + const TernaryOp& functor() const { return m_functor; } + + protected: + Arg1Nested m_arg1; + Arg2Nested m_arg2; + Arg3Nested m_arg3; + const TernaryOp m_functor; +}; + +// Generic API dispatcher +template +class CwiseTernaryOpImpl + : public internal::generic_xpr_base< + CwiseTernaryOp >::type { + public: + typedef typename internal::generic_xpr_base< + CwiseTernaryOp >::type Base; +}; + +} // end namespace Eigen + +#endif // EIGEN_CWISE_TERNARY_OP_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseUnaryOp.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseUnaryOp.h new file mode 100644 index 000000000..e68c4f748 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseUnaryOp.h @@ -0,0 +1,103 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_UNARY_OP_H +#define EIGEN_CWISE_UNARY_OP_H + +namespace Eigen { + +namespace internal { +template +struct traits > + : traits +{ + typedef typename result_of< + UnaryOp(const typename XprType::Scalar&) + >::type Scalar; + typedef typename XprType::Nested XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; + enum { + Flags = _XprTypeNested::Flags & RowMajorBit + }; +}; +} + +template +class CwiseUnaryOpImpl; + +/** \class CwiseUnaryOp + * \ingroup Core_Module + * + * \brief Generic expression where a coefficient-wise unary operator is applied to an expression + * + * \tparam UnaryOp template functor implementing the operator + * \tparam XprType the type of the expression to which we are applying the unary operator + * + * This class represents an expression where a unary operator is applied to an expression. + * It is the return type of all operations taking exactly 1 input expression, regardless of the + * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix + * is considered unary, because only the right-hand side is an expression, and its + * return type is a specialization of CwiseUnaryOp. + * + * Most of the time, this is the only way that it is used, so you typically don't have to name + * CwiseUnaryOp types explicitly. + * + * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp + */ +template +class CwiseUnaryOp : public CwiseUnaryOpImpl::StorageKind>, internal::no_assignment_operator +{ + public: + + typedef typename CwiseUnaryOpImpl::StorageKind>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp) + typedef typename internal::ref_selector::type XprTypeNested; + typedef typename internal::remove_all::type NestedExpression; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) + : m_xpr(xpr), m_functor(func) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index rows() const EIGEN_NOEXCEPT { return m_xpr.rows(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index cols() const EIGEN_NOEXCEPT { return m_xpr.cols(); } + + /** \returns the functor representing the unary operation */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const UnaryOp& functor() const { return m_functor; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename internal::remove_all::type& + nestedExpression() { return m_xpr; } + + protected: + XprTypeNested m_xpr; + const UnaryOp m_functor; +}; + +// Generic API dispatcher +template +class CwiseUnaryOpImpl + : public internal::generic_xpr_base >::type +{ +public: + typedef typename internal::generic_xpr_base >::type Base; +}; + +} // end namespace Eigen + +#endif // EIGEN_CWISE_UNARY_OP_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseUnaryView.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseUnaryView.h new file mode 100644 index 000000000..a06d7621e --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/CwiseUnaryView.h @@ -0,0 +1,132 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_UNARY_VIEW_H +#define EIGEN_CWISE_UNARY_VIEW_H + +namespace Eigen { + +namespace internal { +template +struct traits > + : traits +{ + typedef typename result_of< + ViewOp(const typename traits::Scalar&) + >::type Scalar; + typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename remove_all::type _MatrixTypeNested; + enum { + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + Flags = traits<_MatrixTypeNested>::Flags & (RowMajorBit | FlagsLvalueBit | DirectAccessBit), // FIXME DirectAccessBit should not be handled by expressions + MatrixTypeInnerStride = inner_stride_at_compile_time::ret, + // need to cast the sizeof's from size_t to int explicitly, otherwise: + // "error: no integral type can represent all of the enumerator values + InnerStrideAtCompileTime = MatrixTypeInnerStride == Dynamic + ? int(Dynamic) + : int(MatrixTypeInnerStride) * int(sizeof(typename traits::Scalar) / sizeof(Scalar)), + OuterStrideAtCompileTime = outer_stride_at_compile_time::ret == Dynamic + ? int(Dynamic) + : outer_stride_at_compile_time::ret * int(sizeof(typename traits::Scalar) / sizeof(Scalar)) + }; +}; +} + +template +class CwiseUnaryViewImpl; + +/** \class CwiseUnaryView + * \ingroup Core_Module + * + * \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector + * + * \tparam ViewOp template functor implementing the view + * \tparam MatrixType the type of the matrix we are applying the unary operator + * + * This class represents a lvalue expression of a generic unary view operator of a matrix or a vector. + * It is the return type of real() and imag(), and most of the time this is the only way it is used. + * + * \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp + */ +template +class CwiseUnaryView : public CwiseUnaryViewImpl::StorageKind> +{ + public: + + typedef typename CwiseUnaryViewImpl::StorageKind>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView) + typedef typename internal::ref_selector::non_const_type MatrixTypeNested; + typedef typename internal::remove_all::type NestedExpression; + + explicit EIGEN_DEVICE_FUNC inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp()) + : m_matrix(mat), m_functor(func) {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView) + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } + + /** \returns the functor representing unary operation */ + EIGEN_DEVICE_FUNC const ViewOp& functor() const { return m_functor; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC const typename internal::remove_all::type& + nestedExpression() const { return m_matrix; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC typename internal::remove_reference::type& + nestedExpression() { return m_matrix; } + + protected: + MatrixTypeNested m_matrix; + ViewOp m_functor; +}; + +// Generic API dispatcher +template +class CwiseUnaryViewImpl + : public internal::generic_xpr_base >::type +{ +public: + typedef typename internal::generic_xpr_base >::type Base; +}; + +template +class CwiseUnaryViewImpl + : public internal::dense_xpr_base< CwiseUnaryView >::type +{ + public: + + typedef CwiseUnaryView Derived; + typedef typename internal::dense_xpr_base< CwiseUnaryView >::type Base; + + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl) + + EIGEN_DEVICE_FUNC inline Scalar* data() { return &(this->coeffRef(0)); } + EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(this->coeff(0)); } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const + { + return derived().nestedExpression().innerStride() * sizeof(typename internal::traits::Scalar) / sizeof(Scalar); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const + { + return derived().nestedExpression().outerStride() * sizeof(typename internal::traits::Scalar) / sizeof(Scalar); + } + protected: + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl) +}; + +} // end namespace Eigen + +#endif // EIGEN_CWISE_UNARY_VIEW_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DenseBase.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DenseBase.h new file mode 100644 index 000000000..9b16db68d --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DenseBase.h @@ -0,0 +1,701 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DENSEBASE_H +#define EIGEN_DENSEBASE_H + +namespace Eigen { + +namespace internal { + +// The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type. +// This dummy function simply aims at checking that at compile time. +static inline void check_DenseIndex_is_signed() { + EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE) +} + +} // end namespace internal + +/** \class DenseBase + * \ingroup Core_Module + * + * \brief Base class for all dense matrices, vectors, and arrays + * + * This class is the base that is inherited by all dense objects (matrix, vector, arrays, + * and related expression types). The common Eigen API for dense objects is contained in this class. + * + * \tparam Derived is the derived type, e.g., a matrix type or an expression. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN. + * + * \sa \blank \ref TopicClassHierarchy + */ +template class DenseBase +#ifndef EIGEN_PARSED_BY_DOXYGEN + : public DenseCoeffsBase::value> +#else + : public DenseCoeffsBase +#endif // not EIGEN_PARSED_BY_DOXYGEN +{ + public: + + /** Inner iterator type to iterate over the coefficients of a row or column. + * \sa class InnerIterator + */ + typedef Eigen::InnerIterator InnerIterator; + + typedef typename internal::traits::StorageKind StorageKind; + + /** + * \brief The type used to store indices + * \details This typedef is relevant for types that store multiple indices such as + * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index + * \sa \blank \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase. + */ + typedef typename internal::traits::StorageIndex StorageIndex; + + /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex, etc. */ + typedef typename internal::traits::Scalar Scalar; + + /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex, etc. + * + * It is an alias for the Scalar type */ + typedef Scalar value_type; + + typedef typename NumTraits::Real RealScalar; + typedef DenseCoeffsBase::value> Base; + + using Base::derived; + using Base::const_cast_derived; + using Base::rows; + using Base::cols; + using Base::size; + using Base::rowIndexByOuterInner; + using Base::colIndexByOuterInner; + using Base::coeff; + using Base::coeffByOuterInner; + using Base::operator(); + using Base::operator[]; + using Base::x; + using Base::y; + using Base::z; + using Base::w; + using Base::stride; + using Base::innerStride; + using Base::outerStride; + using Base::rowStride; + using Base::colStride; + typedef typename Base::CoeffReturnType CoeffReturnType; + + enum { + + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + /**< The number of rows at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ + + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + /**< The number of columns at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ + + + SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, + internal::traits::ColsAtCompileTime>::ret), + /**< This is equal to the number of coefficients, i.e. the number of + * rows times the number of columns, or to \a Dynamic if this is not + * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ + + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + /**< This value is equal to the maximum possible number of rows that this expression + * might have. If this expression might have an arbitrarily high number of rows, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime + */ + + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, + /**< This value is equal to the maximum possible number of columns that this expression + * might have. If this expression might have an arbitrarily high number of columns, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime + */ + + MaxSizeAtCompileTime = (internal::size_at_compile_time::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime>::ret), + /**< This value is equal to the maximum possible number of coefficients that this expression + * might have. If this expression might have an arbitrarily high number of coefficients, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime + */ + + IsVectorAtCompileTime = internal::traits::RowsAtCompileTime == 1 + || internal::traits::ColsAtCompileTime == 1, + /**< This is set to true if either the number of rows or the number of + * columns is known at compile-time to be equal to 1. Indeed, in that case, + * we are dealing with a column-vector (if there is only one column) or with + * a row-vector (if there is only one row). */ + + NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2, + /**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors, + * and 2 for matrices. + */ + + Flags = internal::traits::Flags, + /**< This stores expression \ref flags flags which may or may not be inherited by new expressions + * constructed from this one. See the \ref flags "list of flags". + */ + + IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */ + + InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) + : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + + InnerStrideAtCompileTime = internal::inner_stride_at_compile_time::ret, + OuterStrideAtCompileTime = internal::outer_stride_at_compile_time::ret + }; + + typedef typename internal::find_best_packet::type PacketScalar; + + enum { IsPlainObjectBase = 0 }; + + /** The plain matrix type corresponding to this expression. + * \sa PlainObject */ + typedef Matrix::Scalar, + internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime, + AutoAlign | (internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor), + internal::traits::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime + > PlainMatrix; + + /** The plain array type corresponding to this expression. + * \sa PlainObject */ + typedef Array::Scalar, + internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime, + AutoAlign | (internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor), + internal::traits::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime + > PlainArray; + + /** \brief The plain matrix or array type corresponding to this expression. + * + * This is not necessarily exactly the return type of eval(). In the case of plain matrices, + * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed + * that the return type of eval() is either PlainObject or const PlainObject&. + */ + typedef typename internal::conditional::XprKind,MatrixXpr >::value, + PlainMatrix, PlainArray>::type PlainObject; + + /** \returns the number of nonzero coefficients which is in practice the number + * of stored coefficients. */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index nonZeros() const { return size(); } + + /** \returns the outer size. + * + * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension + * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a + * column-major matrix, and the number of rows for a row-major matrix. */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + Index outerSize() const + { + return IsVectorAtCompileTime ? 1 + : int(IsRowMajor) ? this->rows() : this->cols(); + } + + /** \returns the inner size. + * + * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension + * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a + * column-major matrix, and the number of columns for a row-major matrix. */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + Index innerSize() const + { + return IsVectorAtCompileTime ? this->size() + : int(IsRowMajor) ? this->cols() : this->rows(); + } + + /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are + * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does + * nothing else. + */ + EIGEN_DEVICE_FUNC + void resize(Index newSize) + { + EIGEN_ONLY_USED_FOR_DEBUG(newSize); + eigen_assert(newSize == this->size() + && "DenseBase::resize() does not actually allow to resize."); + } + /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are + * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does + * nothing else. + */ + EIGEN_DEVICE_FUNC + void resize(Index rows, Index cols) + { + EIGEN_ONLY_USED_FOR_DEBUG(rows); + EIGEN_ONLY_USED_FOR_DEBUG(cols); + eigen_assert(rows == this->rows() && cols == this->cols() + && "DenseBase::resize() does not actually allow to resize."); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp,PlainObject> ConstantReturnType; + /** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */ + EIGEN_DEPRECATED typedef CwiseNullaryOp,PlainObject> SequentialLinSpacedReturnType; + /** \internal Represents a vector with linearly spaced coefficients that allows random access. */ + typedef CwiseNullaryOp,PlainObject> RandomAccessLinSpacedReturnType; + /** \internal the return type of MatrixBase::eigenvalues() */ + typedef Matrix::Scalar>::Real, internal::traits::ColsAtCompileTime, 1> EigenvaluesReturnType; + +#endif // not EIGEN_PARSED_BY_DOXYGEN + + /** Copies \a other into *this. \returns a reference to *this. */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator=(const DenseBase& other); + + /** Special case of the template operator=, in order to prevent the compiler + * from generating a default operator= (issue hit with g++ 4.1) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator=(const DenseBase& other); + + template + EIGEN_DEVICE_FUNC + Derived& operator=(const EigenBase &other); + + template + EIGEN_DEVICE_FUNC + Derived& operator+=(const EigenBase &other); + + template + EIGEN_DEVICE_FUNC + Derived& operator-=(const EigenBase &other); + + template + EIGEN_DEVICE_FUNC + Derived& operator=(const ReturnByValue& func); + + /** \internal + * Copies \a other into *this without evaluating other. \returns a reference to *this. */ + template + /** \deprecated */ + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC + Derived& lazyAssign(const DenseBase& other); + + EIGEN_DEVICE_FUNC + CommaInitializer operator<< (const Scalar& s); + + template + /** \deprecated it now returns \c *this */ + EIGEN_DEPRECATED + const Derived& flagged() const + { return derived(); } + + template + EIGEN_DEVICE_FUNC + CommaInitializer operator<< (const DenseBase& other); + + typedef Transpose TransposeReturnType; + EIGEN_DEVICE_FUNC + TransposeReturnType transpose(); + typedef typename internal::add_const >::type ConstTransposeReturnType; + EIGEN_DEVICE_FUNC + ConstTransposeReturnType transpose() const; + EIGEN_DEVICE_FUNC + void transposeInPlace(); + + EIGEN_DEVICE_FUNC static const ConstantReturnType + Constant(Index rows, Index cols, const Scalar& value); + EIGEN_DEVICE_FUNC static const ConstantReturnType + Constant(Index size, const Scalar& value); + EIGEN_DEVICE_FUNC static const ConstantReturnType + Constant(const Scalar& value); + + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType + LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high); + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType + LinSpaced(Sequential_t, const Scalar& low, const Scalar& high); + + EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType + LinSpaced(Index size, const Scalar& low, const Scalar& high); + EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType + LinSpaced(const Scalar& low, const Scalar& high); + + template EIGEN_DEVICE_FUNC + static const CwiseNullaryOp + NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func); + template EIGEN_DEVICE_FUNC + static const CwiseNullaryOp + NullaryExpr(Index size, const CustomNullaryOp& func); + template EIGEN_DEVICE_FUNC + static const CwiseNullaryOp + NullaryExpr(const CustomNullaryOp& func); + + EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index rows, Index cols); + EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index size); + EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(); + EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index rows, Index cols); + EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index size); + EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(); + + EIGEN_DEVICE_FUNC void fill(const Scalar& value); + EIGEN_DEVICE_FUNC Derived& setConstant(const Scalar& value); + EIGEN_DEVICE_FUNC Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high); + EIGEN_DEVICE_FUNC Derived& setLinSpaced(const Scalar& low, const Scalar& high); + EIGEN_DEVICE_FUNC Derived& setZero(); + EIGEN_DEVICE_FUNC Derived& setOnes(); + EIGEN_DEVICE_FUNC Derived& setRandom(); + + template EIGEN_DEVICE_FUNC + bool isApprox(const DenseBase& other, + const RealScalar& prec = NumTraits::dummy_precision()) const; + EIGEN_DEVICE_FUNC + bool isMuchSmallerThan(const RealScalar& other, + const RealScalar& prec = NumTraits::dummy_precision()) const; + template EIGEN_DEVICE_FUNC + bool isMuchSmallerThan(const DenseBase& other, + const RealScalar& prec = NumTraits::dummy_precision()) const; + + EIGEN_DEVICE_FUNC bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits::dummy_precision()) const; + EIGEN_DEVICE_FUNC bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits::dummy_precision()) const; + EIGEN_DEVICE_FUNC bool isZero(const RealScalar& prec = NumTraits::dummy_precision()) const; + EIGEN_DEVICE_FUNC bool isOnes(const RealScalar& prec = NumTraits::dummy_precision()) const; + + inline bool hasNaN() const; + inline bool allFinite() const; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator*=(const Scalar& other); + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator/=(const Scalar& other); + + typedef typename internal::add_const_on_value_type::type>::type EvalReturnType; + /** \returns the matrix or vector obtained by evaluating this expression. + * + * Notice that in the case of a plain matrix or vector (not an expression) this function just returns + * a const reference, in order to avoid a useless copy. + * + * \warning Be careful with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink. + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE EvalReturnType eval() const + { + // Even though MSVC does not honor strong inlining when the return type + // is a dynamic matrix, we desperately need strong inlining for fixed + // size types on MSVC. + return typename internal::eval::type(derived()); + } + + /** swaps *this with the expression \a other. + * + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void swap(const DenseBase& other) + { + EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); + eigen_assert(rows()==other.rows() && cols()==other.cols()); + call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op()); + } + + /** swaps *this with the matrix or array \a other. + * + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void swap(PlainObjectBase& other) + { + eigen_assert(rows()==other.rows() && cols()==other.cols()); + call_assignment(derived(), other.derived(), internal::swap_assign_op()); + } + + EIGEN_DEVICE_FUNC inline const NestByValue nestByValue() const; + EIGEN_DEVICE_FUNC inline const ForceAlignedAccess forceAlignedAccess() const; + EIGEN_DEVICE_FUNC inline ForceAlignedAccess forceAlignedAccess(); + template EIGEN_DEVICE_FUNC + inline const typename internal::conditional,Derived&>::type forceAlignedAccessIf() const; + template EIGEN_DEVICE_FUNC + inline typename internal::conditional,Derived&>::type forceAlignedAccessIf(); + + EIGEN_DEVICE_FUNC Scalar sum() const; + EIGEN_DEVICE_FUNC Scalar mean() const; + EIGEN_DEVICE_FUNC Scalar trace() const; + + EIGEN_DEVICE_FUNC Scalar prod() const; + + template + EIGEN_DEVICE_FUNC typename internal::traits::Scalar minCoeff() const; + template + EIGEN_DEVICE_FUNC typename internal::traits::Scalar maxCoeff() const; + + + // By default, the fastest version with undefined NaN propagation semantics is + // used. + // TODO(rmlarsen): Replace with default template argument when we move to + // c++11 or beyond. + EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar minCoeff() const { + return minCoeff(); + } + EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar maxCoeff() const { + return maxCoeff(); + } + + template + EIGEN_DEVICE_FUNC + typename internal::traits::Scalar minCoeff(IndexType* row, IndexType* col) const; + template + EIGEN_DEVICE_FUNC + typename internal::traits::Scalar maxCoeff(IndexType* row, IndexType* col) const; + template + EIGEN_DEVICE_FUNC + typename internal::traits::Scalar minCoeff(IndexType* index) const; + template + EIGEN_DEVICE_FUNC + typename internal::traits::Scalar maxCoeff(IndexType* index) const; + + // TODO(rmlarsen): Replace these methods with a default template argument. + template + EIGEN_DEVICE_FUNC inline + typename internal::traits::Scalar minCoeff(IndexType* row, IndexType* col) const { + return minCoeff(row, col); + } + template + EIGEN_DEVICE_FUNC inline + typename internal::traits::Scalar maxCoeff(IndexType* row, IndexType* col) const { + return maxCoeff(row, col); + } + template + EIGEN_DEVICE_FUNC inline + typename internal::traits::Scalar minCoeff(IndexType* index) const { + return minCoeff(index); + } + template + EIGEN_DEVICE_FUNC inline + typename internal::traits::Scalar maxCoeff(IndexType* index) const { + return maxCoeff(index); + } + + template + EIGEN_DEVICE_FUNC + Scalar redux(const BinaryOp& func) const; + + template + EIGEN_DEVICE_FUNC + void visit(Visitor& func) const; + + /** \returns a WithFormat proxy object allowing to print a matrix the with given + * format \a fmt. + * + * See class IOFormat for some examples. + * + * \sa class IOFormat, class WithFormat + */ + inline const WithFormat format(const IOFormat& fmt) const + { + return WithFormat(derived(), fmt); + } + + /** \returns the unique coefficient of a 1x1 expression */ + EIGEN_DEVICE_FUNC + CoeffReturnType value() const + { + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + return derived().coeff(0,0); + } + + EIGEN_DEVICE_FUNC bool all() const; + EIGEN_DEVICE_FUNC bool any() const; + EIGEN_DEVICE_FUNC Index count() const; + + typedef VectorwiseOp RowwiseReturnType; + typedef const VectorwiseOp ConstRowwiseReturnType; + typedef VectorwiseOp ColwiseReturnType; + typedef const VectorwiseOp ConstColwiseReturnType; + + /** \returns a VectorwiseOp wrapper of *this for broadcasting and partial reductions + * + * Example: \include MatrixBase_rowwise.cpp + * Output: \verbinclude MatrixBase_rowwise.out + * + * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting + */ + //Code moved here due to a CUDA compiler bug + EIGEN_DEVICE_FUNC inline ConstRowwiseReturnType rowwise() const { + return ConstRowwiseReturnType(derived()); + } + EIGEN_DEVICE_FUNC RowwiseReturnType rowwise(); + + /** \returns a VectorwiseOp wrapper of *this broadcasting and partial reductions + * + * Example: \include MatrixBase_colwise.cpp + * Output: \verbinclude MatrixBase_colwise.out + * + * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting + */ + EIGEN_DEVICE_FUNC inline ConstColwiseReturnType colwise() const { + return ConstColwiseReturnType(derived()); + } + EIGEN_DEVICE_FUNC ColwiseReturnType colwise(); + + typedef CwiseNullaryOp,PlainObject> RandomReturnType; + static const RandomReturnType Random(Index rows, Index cols); + static const RandomReturnType Random(Index size); + static const RandomReturnType Random(); + + template + inline EIGEN_DEVICE_FUNC const Select + select(const DenseBase& thenMatrix, + const DenseBase& elseMatrix) const; + + template + inline EIGEN_DEVICE_FUNC const Select + select(const DenseBase& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const; + + template + inline EIGEN_DEVICE_FUNC const Select + select(const typename ElseDerived::Scalar& thenScalar, const DenseBase& elseMatrix) const; + + template RealScalar lpNorm() const; + + template + EIGEN_DEVICE_FUNC + const Replicate replicate() const; + /** + * \return an expression of the replication of \c *this + * + * Example: \include MatrixBase_replicate_int_int.cpp + * Output: \verbinclude MatrixBase_replicate_int_int.out + * + * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate + */ + //Code moved here due to a CUDA compiler bug + EIGEN_DEVICE_FUNC + const Replicate replicate(Index rowFactor, Index colFactor) const + { + return Replicate(derived(), rowFactor, colFactor); + } + + typedef Reverse ReverseReturnType; + typedef const Reverse ConstReverseReturnType; + EIGEN_DEVICE_FUNC ReverseReturnType reverse(); + /** This is the const version of reverse(). */ + //Code moved here due to a CUDA compiler bug + EIGEN_DEVICE_FUNC ConstReverseReturnType reverse() const + { + return ConstReverseReturnType(derived()); + } + EIGEN_DEVICE_FUNC void reverseInPlace(); + + #ifdef EIGEN_PARSED_BY_DOXYGEN + /** STL-like RandomAccessIterator + * iterator type as returned by the begin() and end() methods. + */ + typedef random_access_iterator_type iterator; + /** This is the const version of iterator (aka read-only) */ + typedef random_access_iterator_type const_iterator; + #else + typedef typename internal::conditional< (Flags&DirectAccessBit)==DirectAccessBit, + internal::pointer_based_stl_iterator, + internal::generic_randaccess_stl_iterator + >::type iterator_type; + + typedef typename internal::conditional< (Flags&DirectAccessBit)==DirectAccessBit, + internal::pointer_based_stl_iterator, + internal::generic_randaccess_stl_iterator + >::type const_iterator_type; + + // Stl-style iterators are supported only for vectors. + + typedef typename internal::conditional< IsVectorAtCompileTime, + iterator_type, + void + >::type iterator; + + typedef typename internal::conditional< IsVectorAtCompileTime, + const_iterator_type, + void + >::type const_iterator; + #endif + + inline iterator begin(); + inline const_iterator begin() const; + inline const_iterator cbegin() const; + inline iterator end(); + inline const_iterator end() const; + inline const_iterator cend() const; + +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase +#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) +#define EIGEN_DOC_UNARY_ADDONS(X,Y) +# include "../plugins/CommonCwiseUnaryOps.h" +# include "../plugins/BlockMethods.h" +# include "../plugins/IndexedViewMethods.h" +# include "../plugins/ReshapedMethods.h" +# ifdef EIGEN_DENSEBASE_PLUGIN +# include EIGEN_DENSEBASE_PLUGIN +# endif +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS +#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF +#undef EIGEN_DOC_UNARY_ADDONS + + // disable the use of evalTo for dense objects with a nice compilation error + template + EIGEN_DEVICE_FUNC + inline void evalTo(Dest& ) const + { + EIGEN_STATIC_ASSERT((internal::is_same::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS); + } + + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(DenseBase) + /** Default constructor. Do nothing. */ + EIGEN_DEVICE_FUNC DenseBase() + { + /* Just checks for self-consistency of the flags. + * Only do it when debugging Eigen, as this borders on paranoia and could slow compilation down + */ +#ifdef EIGEN_INTERNAL_DEBUGGING + EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor)) + && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))), + INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION) +#endif + } + + private: + EIGEN_DEVICE_FUNC explicit DenseBase(int); + EIGEN_DEVICE_FUNC DenseBase(int,int); + template EIGEN_DEVICE_FUNC explicit DenseBase(const DenseBase&); +}; + +} // end namespace Eigen + +#endif // EIGEN_DENSEBASE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DenseCoeffsBase.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DenseCoeffsBase.h new file mode 100644 index 000000000..37fcdb591 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DenseCoeffsBase.h @@ -0,0 +1,685 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DENSECOEFFSBASE_H +#define EIGEN_DENSECOEFFSBASE_H + +namespace Eigen { + +namespace internal { +template struct add_const_on_value_type_if_arithmetic +{ + typedef typename conditional::value, T, typename add_const_on_value_type::type>::type type; +}; +} + +/** \brief Base class providing read-only coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * + * \note #ReadOnlyAccessors Constant indicating read-only access + * + * This class defines the \c operator() \c const function and friends, which can be used to read specific + * entries of a matrix or array. + * + * \sa DenseCoeffsBase, DenseCoeffsBase, + * \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase : public EigenBase +{ + public: + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + + // Explanation for this CoeffReturnType typedef. + // - This is the return type of the coeff() method. + // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references + // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value). + // - The is_artihmetic check is required since "const int", "const double", etc. will cause warnings on some systems + // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is + // not possible, since the underlying expressions might not offer a valid address the reference could be referring to. + typedef typename internal::conditional::Flags&LvalueBit), + const Scalar&, + typename internal::conditional::value, Scalar, const Scalar>::type + >::type CoeffReturnType; + + typedef typename internal::add_const_on_value_type_if_arithmetic< + typename internal::packet_traits::type + >::type PacketReturnType; + + typedef EigenBase Base; + using Base::rows; + using Base::cols; + using Base::size; + using Base::derived; + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const + { + return int(Derived::RowsAtCompileTime) == 1 ? 0 + : int(Derived::ColsAtCompileTime) == 1 ? inner + : int(Derived::Flags)&RowMajorBit ? outer + : inner; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const + { + return int(Derived::ColsAtCompileTime) == 1 ? 0 + : int(Derived::RowsAtCompileTime) == 1 ? inner + : int(Derived::Flags)&RowMajorBit ? inner + : outer; + } + + /** Short version: don't use this function, use + * \link operator()(Index,Index) const \endlink instead. + * + * Long version: this function is similar to + * \link operator()(Index,Index) const \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator()(Index,Index) const \endlink. + * + * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const + { + eigen_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return internal::evaluator(derived()).coeff(row,col); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const + { + return coeff(rowIndexByOuterInner(outer, inner), + colIndexByOuterInner(outer, inner)); + } + + /** \returns the coefficient at given the given row and column. + * + * \sa operator()(Index,Index), operator[](Index) + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const + { + eigen_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return coeff(row, col); + } + + /** Short version: don't use this function, use + * \link operator[](Index) const \endlink instead. + * + * Long version: this function is similar to + * \link operator[](Index) const \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameter \a index is in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator[](Index) const \endlink. + * + * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const + */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CoeffReturnType + coeff(Index index) const + { + EIGEN_STATIC_ASSERT(internal::evaluator::Flags & LinearAccessBit, + THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) + eigen_internal_assert(index >= 0 && index < size()); + return internal::evaluator(derived()).coeff(index); + } + + + /** \returns the coefficient at given index. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, + * z() const, w() const + */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CoeffReturnType + operator[](Index index) const + { + EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, + THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) + eigen_assert(index >= 0 && index < size()); + return coeff(index); + } + + /** \returns the coefficient at given index. + * + * This is synonymous to operator[](Index) const. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, + * z() const, w() const + */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CoeffReturnType + operator()(Index index) const + { + eigen_assert(index >= 0 && index < size()); + return coeff(index); + } + + /** equivalent to operator[](0). */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CoeffReturnType + x() const { return (*this)[0]; } + + /** equivalent to operator[](1). */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CoeffReturnType + y() const + { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=2, OUT_OF_RANGE_ACCESS); + return (*this)[1]; + } + + /** equivalent to operator[](2). */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CoeffReturnType + z() const + { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=3, OUT_OF_RANGE_ACCESS); + return (*this)[2]; + } + + /** equivalent to operator[](3). */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE CoeffReturnType + w() const + { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=4, OUT_OF_RANGE_ACCESS); + return (*this)[3]; + } + + /** \internal + * \returns the packet of coefficients starting at the given row and column. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit. + * + * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ + + template + EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const + { + typedef typename internal::packet_traits::type DefaultPacketType; + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); + return internal::evaluator(derived()).template packet(row,col); + } + + + /** \internal */ + template + EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const + { + return packet(rowIndexByOuterInner(outer, inner), + colIndexByOuterInner(outer, inner)); + } + + /** \internal + * \returns the packet of coefficients starting at the given index. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit and the LinearAccessBit. + * + * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ + + template + EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT(internal::evaluator::Flags & LinearAccessBit, + THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) + typedef typename internal::packet_traits::type DefaultPacketType; + eigen_internal_assert(index >= 0 && index < size()); + return internal::evaluator(derived()).template packet(index); + } + + protected: + // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase. + // But some methods are only available in the DirectAccess case. + // So we add dummy methods here with these names, so that "using... " doesn't fail. + // It's not private so that the child class DenseBase can access them, and it's not public + // either since it's an implementation detail, so has to be protected. + void coeffRef(); + void coeffRefByOuterInner(); + void writePacket(); + void writePacketByOuterInner(); + void copyCoeff(); + void copyCoeffByOuterInner(); + void copyPacket(); + void copyPacketByOuterInner(); + void stride(); + void innerStride(); + void outerStride(); + void rowStride(); + void colStride(); +}; + +/** \brief Base class providing read/write coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * + * \note #WriteAccessors Constant indicating read/write access + * + * This class defines the non-const \c operator() function and friends, which can be used to write specific + * entries of a matrix or array. This class inherits DenseCoeffsBase which + * defines the const variant for reading specific entries. + * + * \sa DenseCoeffsBase, \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase : public DenseCoeffsBase +{ + public: + + typedef DenseCoeffsBase Base; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + + using Base::coeff; + using Base::rows; + using Base::cols; + using Base::size; + using Base::derived; + using Base::rowIndexByOuterInner; + using Base::colIndexByOuterInner; + using Base::operator[]; + using Base::operator(); + using Base::x; + using Base::y; + using Base::z; + using Base::w; + + /** Short version: don't use this function, use + * \link operator()(Index,Index) \endlink instead. + * + * Long version: this function is similar to + * \link operator()(Index,Index) \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator()(Index,Index) \endlink. + * + * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index) + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) + { + eigen_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return internal::evaluator(derived()).coeffRef(row,col); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& + coeffRefByOuterInner(Index outer, Index inner) + { + return coeffRef(rowIndexByOuterInner(outer, inner), + colIndexByOuterInner(outer, inner)); + } + + /** \returns a reference to the coefficient at given the given row and column. + * + * \sa operator[](Index) + */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& + operator()(Index row, Index col) + { + eigen_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return coeffRef(row, col); + } + + + /** Short version: don't use this function, use + * \link operator[](Index) \endlink instead. + * + * Long version: this function is similar to + * \link operator[](Index) \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator[](Index) \endlink. + * + * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index) + */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& + coeffRef(Index index) + { + EIGEN_STATIC_ASSERT(internal::evaluator::Flags & LinearAccessBit, + THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) + eigen_internal_assert(index >= 0 && index < size()); + return internal::evaluator(derived()).coeffRef(index); + } + + /** \returns a reference to the coefficient at given index. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() + */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& + operator[](Index index) + { + EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, + THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) + eigen_assert(index >= 0 && index < size()); + return coeffRef(index); + } + + /** \returns a reference to the coefficient at given index. + * + * This is synonymous to operator[](Index). + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() + */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& + operator()(Index index) + { + eigen_assert(index >= 0 && index < size()); + return coeffRef(index); + } + + /** equivalent to operator[](0). */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& + x() { return (*this)[0]; } + + /** equivalent to operator[](1). */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& + y() + { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=2, OUT_OF_RANGE_ACCESS); + return (*this)[1]; + } + + /** equivalent to operator[](2). */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& + z() + { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=3, OUT_OF_RANGE_ACCESS); + return (*this)[2]; + } + + /** equivalent to operator[](3). */ + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& + w() + { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=4, OUT_OF_RANGE_ACCESS); + return (*this)[3]; + } +}; + +/** \brief Base class providing direct read-only coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * + * \note #DirectAccessors Constant indicating direct access + * + * This class defines functions to work with strides which can be used to access entries directly. This class + * inherits DenseCoeffsBase which defines functions to access entries read-only using + * \c operator() . + * + * \sa \blank \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase : public DenseCoeffsBase +{ + public: + + typedef DenseCoeffsBase Base; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + using Base::rows; + using Base::cols; + using Base::size; + using Base::derived; + + /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. + * + * \sa outerStride(), rowStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index innerStride() const + { + return derived().innerStride(); + } + + /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns + * in a column-major matrix). + * + * \sa innerStride(), rowStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index outerStride() const + { + return derived().outerStride(); + } + + // FIXME shall we remove it ? + EIGEN_CONSTEXPR inline Index stride() const + { + return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); + } + + /** \returns the pointer increment between two consecutive rows. + * + * \sa innerStride(), outerStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index rowStride() const + { + return Derived::IsRowMajor ? outerStride() : innerStride(); + } + + /** \returns the pointer increment between two consecutive columns. + * + * \sa innerStride(), outerStride(), rowStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index colStride() const + { + return Derived::IsRowMajor ? innerStride() : outerStride(); + } +}; + +/** \brief Base class providing direct read/write coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * + * \note #DirectWriteAccessors Constant indicating direct access + * + * This class defines functions to work with strides which can be used to access entries directly. This class + * inherits DenseCoeffsBase which defines functions to access entries read/write using + * \c operator(). + * + * \sa \blank \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase + : public DenseCoeffsBase +{ + public: + + typedef DenseCoeffsBase Base; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + using Base::rows; + using Base::cols; + using Base::size; + using Base::derived; + + /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. + * + * \sa outerStride(), rowStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index innerStride() const EIGEN_NOEXCEPT + { + return derived().innerStride(); + } + + /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns + * in a column-major matrix). + * + * \sa innerStride(), rowStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index outerStride() const EIGEN_NOEXCEPT + { + return derived().outerStride(); + } + + // FIXME shall we remove it ? + EIGEN_CONSTEXPR inline Index stride() const EIGEN_NOEXCEPT + { + return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); + } + + /** \returns the pointer increment between two consecutive rows. + * + * \sa innerStride(), outerStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index rowStride() const EIGEN_NOEXCEPT + { + return Derived::IsRowMajor ? outerStride() : innerStride(); + } + + /** \returns the pointer increment between two consecutive columns. + * + * \sa innerStride(), outerStride(), rowStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index colStride() const EIGEN_NOEXCEPT + { + return Derived::IsRowMajor ? innerStride() : outerStride(); + } +}; + +namespace internal { + +template +struct first_aligned_impl +{ + static EIGEN_CONSTEXPR inline Index run(const Derived&) EIGEN_NOEXCEPT + { return 0; } +}; + +template +struct first_aligned_impl +{ + static inline Index run(const Derived& m) + { + return internal::first_aligned(m.data(), m.size()); + } +}; + +/** \internal \returns the index of the first element of the array stored by \a m that is properly aligned with respect to \a Alignment for vectorization. + * + * \tparam Alignment requested alignment in Bytes. + * + * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more + * documentation. + */ +template +static inline Index first_aligned(const DenseBase& m) +{ + enum { ReturnZero = (int(evaluator::Alignment) >= Alignment) || !(Derived::Flags & DirectAccessBit) }; + return first_aligned_impl::run(m.derived()); +} + +template +static inline Index first_default_aligned(const DenseBase& m) +{ + typedef typename Derived::Scalar Scalar; + typedef typename packet_traits::type DefaultPacketType; + return internal::first_aligned::alignment),Derived>(m); +} + +template::ret> +struct inner_stride_at_compile_time +{ + enum { ret = traits::InnerStrideAtCompileTime }; +}; + +template +struct inner_stride_at_compile_time +{ + enum { ret = 0 }; +}; + +template::ret> +struct outer_stride_at_compile_time +{ + enum { ret = traits::OuterStrideAtCompileTime }; +}; + +template +struct outer_stride_at_compile_time +{ + enum { ret = 0 }; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_DENSECOEFFSBASE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DenseStorage.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DenseStorage.h new file mode 100644 index 000000000..08ef6c530 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DenseStorage.h @@ -0,0 +1,652 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2009 Benoit Jacob +// Copyright (C) 2010-2013 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIXSTORAGE_H +#define EIGEN_MATRIXSTORAGE_H + +#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) X; EIGEN_DENSE_STORAGE_CTOR_PLUGIN; +#else + #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) +#endif + +namespace Eigen { + +namespace internal { + +struct constructor_without_unaligned_array_assert {}; + +template +EIGEN_DEVICE_FUNC +void check_static_allocation_size() +{ + // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit + #if EIGEN_STACK_ALLOCATION_LIMIT + EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); + #endif +} + +/** \internal + * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: + * to 16 bytes boundary if the total size is a multiple of 16 bytes. + */ +template ::value > +struct plain_array +{ + T array[Size]; + + EIGEN_DEVICE_FUNC + plain_array() + { + check_static_allocation_size(); + } + + EIGEN_DEVICE_FUNC + plain_array(constructor_without_unaligned_array_assert) + { + check_static_allocation_size(); + } +}; + +#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT) + #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) +#elif EIGEN_GNUC_AT_LEAST(4,7) + // GCC 4.7 is too aggressive in its optimizations and remove the alignment test based on the fact the array is declared to be aligned. + // See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900 + // Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined: + template + EIGEN_ALWAYS_INLINE PtrType eigen_unaligned_array_assert_workaround_gcc47(PtrType array) { return array; } + #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ + eigen_assert((internal::UIntPtr(eigen_unaligned_array_assert_workaround_gcc47(array)) & (sizemask)) == 0 \ + && "this assertion is explained here: " \ + "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ + " **** READ THIS WEB PAGE !!! ****"); +#else + #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ + eigen_assert((internal::UIntPtr(array) & (sizemask)) == 0 \ + && "this assertion is explained here: " \ + "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ + " **** READ THIS WEB PAGE !!! ****"); +#endif + +template +struct plain_array +{ + EIGEN_ALIGN_TO_BOUNDARY(8) T array[Size]; + + EIGEN_DEVICE_FUNC + plain_array() + { + EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(7); + check_static_allocation_size(); + } + + EIGEN_DEVICE_FUNC + plain_array(constructor_without_unaligned_array_assert) + { + check_static_allocation_size(); + } +}; + +template +struct plain_array +{ + EIGEN_ALIGN_TO_BOUNDARY(16) T array[Size]; + + EIGEN_DEVICE_FUNC + plain_array() + { + EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(15); + check_static_allocation_size(); + } + + EIGEN_DEVICE_FUNC + plain_array(constructor_without_unaligned_array_assert) + { + check_static_allocation_size(); + } +}; + +template +struct plain_array +{ + EIGEN_ALIGN_TO_BOUNDARY(32) T array[Size]; + + EIGEN_DEVICE_FUNC + plain_array() + { + EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(31); + check_static_allocation_size(); + } + + EIGEN_DEVICE_FUNC + plain_array(constructor_without_unaligned_array_assert) + { + check_static_allocation_size(); + } +}; + +template +struct plain_array +{ + EIGEN_ALIGN_TO_BOUNDARY(64) T array[Size]; + + EIGEN_DEVICE_FUNC + plain_array() + { + EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(63); + check_static_allocation_size(); + } + + EIGEN_DEVICE_FUNC + plain_array(constructor_without_unaligned_array_assert) + { + check_static_allocation_size(); + } +}; + +template +struct plain_array +{ + T array[1]; + EIGEN_DEVICE_FUNC plain_array() {} + EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) {} +}; + +struct plain_array_helper { + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + static void copy(const plain_array& src, const Eigen::Index size, + plain_array& dst) { + smart_copy(src.array, src.array + size, dst.array); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + static void swap(plain_array& a, const Eigen::Index a_size, + plain_array& b, const Eigen::Index b_size) { + if (a_size < b_size) { + std::swap_ranges(b.array, b.array + a_size, a.array); + smart_move(b.array + a_size, b.array + b_size, a.array + a_size); + } else if (a_size > b_size) { + std::swap_ranges(a.array, a.array + b_size, b.array); + smart_move(a.array + b_size, a.array + a_size, b.array + b_size); + } else { + std::swap_ranges(a.array, a.array + a_size, b.array); + } + } +}; + +} // end namespace internal + +/** \internal + * + * \class DenseStorage + * \ingroup Core_Module + * + * \brief Stores the data of a matrix + * + * This class stores the data of fixed-size, dynamic-size or mixed matrices + * in a way as compact as possible. + * + * \sa Matrix + */ +template class DenseStorage; + +// purely fixed-size matrix +template class DenseStorage +{ + internal::plain_array m_data; + public: + EIGEN_DEVICE_FUNC DenseStorage() { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size) + } + EIGEN_DEVICE_FUNC + explicit DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()) {} +#if !EIGEN_HAS_CXX11 || defined(EIGEN_DENSE_STORAGE_CTOR_PLUGIN) + EIGEN_DEVICE_FUNC + DenseStorage(const DenseStorage& other) : m_data(other.m_data) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size) + } +#else + EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) = default; +#endif +#if !EIGEN_HAS_CXX11 + EIGEN_DEVICE_FUNC + DenseStorage& operator=(const DenseStorage& other) + { + if (this != &other) m_data = other.m_data; + return *this; + } +#else + EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) = default; +#endif +#if EIGEN_HAS_RVALUE_REFERENCES +#if !EIGEN_HAS_CXX11 + EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT + : m_data(std::move(other.m_data)) + { + } + EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT + { + if (this != &other) + m_data = std::move(other.m_data); + return *this; + } +#else + EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&&) = default; + EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&&) = default; +#endif +#endif + EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + eigen_internal_assert(size==rows*cols && rows==_Rows && cols==_Cols); + EIGEN_UNUSED_VARIABLE(size); + EIGEN_UNUSED_VARIABLE(rows); + EIGEN_UNUSED_VARIABLE(cols); + } + EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { + numext::swap(m_data, other.m_data); + } + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index rows(void) EIGEN_NOEXCEPT {return _Rows;} + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index cols(void) EIGEN_NOEXCEPT {return _Cols;} + EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {} + EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {} + EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } + EIGEN_DEVICE_FUNC T *data() { return m_data.array; } +}; + +// null matrix +template class DenseStorage +{ + public: + EIGEN_DEVICE_FUNC DenseStorage() {} + EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) {} + EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) {} + EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) { return *this; } + EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {} + EIGEN_DEVICE_FUNC void swap(DenseStorage& ) {} + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index rows(void) EIGEN_NOEXCEPT {return _Rows;} + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index cols(void) EIGEN_NOEXCEPT {return _Cols;} + EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {} + EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {} + EIGEN_DEVICE_FUNC const T *data() const { return 0; } + EIGEN_DEVICE_FUNC T *data() { return 0; } +}; + +// more specializations for null matrices; these are necessary to resolve ambiguities +template class DenseStorage +: public DenseStorage { }; + +template class DenseStorage +: public DenseStorage { }; + +template class DenseStorage +: public DenseStorage { }; + +// dynamic-size matrix with fixed-size storage +template class DenseStorage +{ + internal::plain_array m_data; + Index m_rows; + Index m_cols; + public: + EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {} + EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} + EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) + : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(other.m_rows), m_cols(other.m_cols) + { + internal::plain_array_helper::copy(other.m_data, m_rows * m_cols, m_data); + } + EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) + { + if (this != &other) + { + m_rows = other.m_rows; + m_cols = other.m_cols; + internal::plain_array_helper::copy(other.m_data, m_rows * m_cols, m_data); + } + return *this; + } + EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {} + EIGEN_DEVICE_FUNC void swap(DenseStorage& other) + { + internal::plain_array_helper::swap(m_data, m_rows * m_cols, other.m_data, other.m_rows * other.m_cols); + numext::swap(m_rows,other.m_rows); + numext::swap(m_cols,other.m_cols); + } + EIGEN_DEVICE_FUNC Index rows() const {return m_rows;} + EIGEN_DEVICE_FUNC Index cols() const {return m_cols;} + EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; } + EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; } + EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } + EIGEN_DEVICE_FUNC T *data() { return m_data.array; } +}; + +// dynamic-size matrix with fixed-size storage and fixed width +template class DenseStorage +{ + internal::plain_array m_data; + Index m_rows; + public: + EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {} + EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {} + EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) + : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(other.m_rows) + { + internal::plain_array_helper::copy(other.m_data, m_rows * _Cols, m_data); + } + + EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) + { + if (this != &other) + { + m_rows = other.m_rows; + internal::plain_array_helper::copy(other.m_data, m_rows * _Cols, m_data); + } + return *this; + } + EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index) : m_rows(rows) {} + EIGEN_DEVICE_FUNC void swap(DenseStorage& other) + { + internal::plain_array_helper::swap(m_data, m_rows * _Cols, other.m_data, other.m_rows * _Cols); + numext::swap(m_rows, other.m_rows); + } + EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT {return m_rows;} + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols(void) const EIGEN_NOEXCEPT {return _Cols;} + EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index) { m_rows = rows; } + EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index) { m_rows = rows; } + EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } + EIGEN_DEVICE_FUNC T *data() { return m_data.array; } +}; + +// dynamic-size matrix with fixed-size storage and fixed height +template class DenseStorage +{ + internal::plain_array m_data; + Index m_cols; + public: + EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {} + EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {} + EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) + : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(other.m_cols) + { + internal::plain_array_helper::copy(other.m_data, _Rows * m_cols, m_data); + } + EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) + { + if (this != &other) + { + m_cols = other.m_cols; + internal::plain_array_helper::copy(other.m_data, _Rows * m_cols, m_data); + } + return *this; + } + EIGEN_DEVICE_FUNC DenseStorage(Index, Index, Index cols) : m_cols(cols) {} + EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { + internal::plain_array_helper::swap(m_data, _Rows * m_cols, other.m_data, _Rows * other.m_cols); + numext::swap(m_cols, other.m_cols); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows(void) const EIGEN_NOEXCEPT {return _Rows;} + EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT {return m_cols;} + EIGEN_DEVICE_FUNC void conservativeResize(Index, Index, Index cols) { m_cols = cols; } + EIGEN_DEVICE_FUNC void resize(Index, Index, Index cols) { m_cols = cols; } + EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } + EIGEN_DEVICE_FUNC T *data() { return m_data.array; } +}; + +// purely dynamic matrix. +template class DenseStorage +{ + T *m_data; + Index m_rows; + Index m_cols; + public: + EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0), m_cols(0) {} + EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(0), m_rows(0), m_cols(0) {} + EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) + : m_data(internal::conditional_aligned_new_auto(size)), m_rows(rows), m_cols(cols) + { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + eigen_internal_assert(size==rows*cols && rows>=0 && cols >=0); + } + EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) + : m_data(internal::conditional_aligned_new_auto(other.m_rows*other.m_cols)) + , m_rows(other.m_rows) + , m_cols(other.m_cols) + { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*m_cols) + internal::smart_copy(other.m_data, other.m_data+other.m_rows*other.m_cols, m_data); + } + EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) + { + if (this != &other) + { + DenseStorage tmp(other); + this->swap(tmp); + } + return *this; + } +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC + DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT + : m_data(std::move(other.m_data)) + , m_rows(std::move(other.m_rows)) + , m_cols(std::move(other.m_cols)) + { + other.m_data = nullptr; + other.m_rows = 0; + other.m_cols = 0; + } + EIGEN_DEVICE_FUNC + DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT + { + numext::swap(m_data, other.m_data); + numext::swap(m_rows, other.m_rows); + numext::swap(m_cols, other.m_cols); + return *this; + } +#endif + EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); } + EIGEN_DEVICE_FUNC void swap(DenseStorage& other) + { + numext::swap(m_data,other.m_data); + numext::swap(m_rows,other.m_rows); + numext::swap(m_cols,other.m_cols); + } + EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT {return m_rows;} + EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT {return m_cols;} + void conservativeResize(Index size, Index rows, Index cols) + { + m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*m_cols); + m_rows = rows; + m_cols = cols; + } + EIGEN_DEVICE_FUNC void resize(Index size, Index rows, Index cols) + { + if(size != m_rows*m_cols) + { + internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); + if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative + m_data = internal::conditional_aligned_new_auto(size); + else + m_data = 0; + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + m_rows = rows; + m_cols = cols; + } + EIGEN_DEVICE_FUNC const T *data() const { return m_data; } + EIGEN_DEVICE_FUNC T *data() { return m_data; } +}; + +// matrix with dynamic width and fixed height (so that matrix has dynamic size). +template class DenseStorage +{ + T *m_data; + Index m_cols; + public: + EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_cols(0) {} + explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} + EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto(size)), m_cols(cols) + { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + eigen_internal_assert(size==rows*cols && rows==_Rows && cols >=0); + EIGEN_UNUSED_VARIABLE(rows); + } + EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) + : m_data(internal::conditional_aligned_new_auto(_Rows*other.m_cols)) + , m_cols(other.m_cols) + { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_cols*_Rows) + internal::smart_copy(other.m_data, other.m_data+_Rows*m_cols, m_data); + } + EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) + { + if (this != &other) + { + DenseStorage tmp(other); + this->swap(tmp); + } + return *this; + } +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC + DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT + : m_data(std::move(other.m_data)) + , m_cols(std::move(other.m_cols)) + { + other.m_data = nullptr; + other.m_cols = 0; + } + EIGEN_DEVICE_FUNC + DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT + { + numext::swap(m_data, other.m_data); + numext::swap(m_cols, other.m_cols); + return *this; + } +#endif + EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); } + EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { + numext::swap(m_data,other.m_data); + numext::swap(m_cols,other.m_cols); + } + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index rows(void) EIGEN_NOEXCEPT {return _Rows;} + EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT {return m_cols;} + EIGEN_DEVICE_FUNC void conservativeResize(Index size, Index, Index cols) + { + m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, _Rows*m_cols); + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index, Index cols) + { + if(size != _Rows*m_cols) + { + internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); + if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative + m_data = internal::conditional_aligned_new_auto(size); + else + m_data = 0; + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + m_cols = cols; + } + EIGEN_DEVICE_FUNC const T *data() const { return m_data; } + EIGEN_DEVICE_FUNC T *data() { return m_data; } +}; + +// matrix with dynamic height and fixed width (so that matrix has dynamic size). +template class DenseStorage +{ + T *m_data; + Index m_rows; + public: + EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0) {} + explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} + EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(rows) + { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + eigen_internal_assert(size==rows*cols && rows>=0 && cols == _Cols); + EIGEN_UNUSED_VARIABLE(cols); + } + EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) + : m_data(internal::conditional_aligned_new_auto(other.m_rows*_Cols)) + , m_rows(other.m_rows) + { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*_Cols) + internal::smart_copy(other.m_data, other.m_data+other.m_rows*_Cols, m_data); + } + EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) + { + if (this != &other) + { + DenseStorage tmp(other); + this->swap(tmp); + } + return *this; + } +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC + DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT + : m_data(std::move(other.m_data)) + , m_rows(std::move(other.m_rows)) + { + other.m_data = nullptr; + other.m_rows = 0; + } + EIGEN_DEVICE_FUNC + DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT + { + numext::swap(m_data, other.m_data); + numext::swap(m_rows, other.m_rows); + return *this; + } +#endif + EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); } + EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { + numext::swap(m_data,other.m_data); + numext::swap(m_rows,other.m_rows); + } + EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT {return m_rows;} + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index cols(void) {return _Cols;} + void conservativeResize(Index size, Index rows, Index) + { + m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*_Cols); + m_rows = rows; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index rows, Index) + { + if(size != m_rows*_Cols) + { + internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); + if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative + m_data = internal::conditional_aligned_new_auto(size); + else + m_data = 0; + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + m_rows = rows; + } + EIGEN_DEVICE_FUNC const T *data() const { return m_data; } + EIGEN_DEVICE_FUNC T *data() { return m_data; } +}; + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Diagonal.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Diagonal.h new file mode 100644 index 000000000..3112d2c16 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Diagonal.h @@ -0,0 +1,258 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2009 Benoit Jacob +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DIAGONAL_H +#define EIGEN_DIAGONAL_H + +namespace Eigen { + +/** \class Diagonal + * \ingroup Core_Module + * + * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix + * + * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal + * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal. + * A positive value means a superdiagonal, a negative value means a subdiagonal. + * You can also use DynamicIndex so the index can be set at runtime. + * + * The matrix is not required to be square. + * + * This class represents an expression of the main diagonal, or any sub/super diagonal + * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the + * time this is the only way it is used. + * + * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index) + */ + +namespace internal { +template +struct traits > + : traits +{ + typedef typename ref_selector::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; + typedef typename MatrixType::StorageKind StorageKind; + enum { + RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic + : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), + MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), + ColsAtCompileTime = 1, + MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic + : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime, + MatrixType::MaxColsAtCompileTime) + : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), + MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), + MaxColsAtCompileTime = 1, + MaskLvalueBit = is_lvalue::value ? LvalueBit : 0, + Flags = (unsigned int)_MatrixTypeNested::Flags & (RowMajorBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, // FIXME DirectAccessBit should not be handled by expressions + MatrixTypeOuterStride = outer_stride_at_compile_time::ret, + InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1, + OuterStrideAtCompileTime = 0 + }; +}; +} + +template class Diagonal + : public internal::dense_xpr_base< Diagonal >::type +{ + public: + + enum { DiagIndex = _DiagIndex }; + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) + + EIGEN_DEVICE_FUNC + explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) + { + eigen_assert( a_index <= m_matrix.cols() && -a_index <= m_matrix.rows() ); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) + + EIGEN_DEVICE_FUNC + inline Index rows() const + { + return m_index.value()<0 ? numext::mini(m_matrix.cols(),m_matrix.rows()+m_index.value()) + : numext::mini(m_matrix.rows(),m_matrix.cols()-m_index.value()); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index cols() const EIGEN_NOEXCEPT { return 1; } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index innerStride() const EIGEN_NOEXCEPT { + return m_matrix.outerStride() + 1; + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index outerStride() const EIGEN_NOEXCEPT { return 0; } + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + EIGEN_DEVICE_FUNC + inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.coeffRef(rowOffset(), colOffset())); } + EIGEN_DEVICE_FUNC + inline const Scalar* data() const { return &(m_matrix.coeffRef(rowOffset(), colOffset())); } + + EIGEN_DEVICE_FUNC + inline Scalar& coeffRef(Index row, Index) + { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return m_matrix.coeffRef(row+rowOffset(), row+colOffset()); + } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index row, Index) const + { + return m_matrix.coeffRef(row+rowOffset(), row+colOffset()); + } + + EIGEN_DEVICE_FUNC + inline CoeffReturnType coeff(Index row, Index) const + { + return m_matrix.coeff(row+rowOffset(), row+colOffset()); + } + + EIGEN_DEVICE_FUNC + inline Scalar& coeffRef(Index idx) + { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return m_matrix.coeffRef(idx+rowOffset(), idx+colOffset()); + } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index idx) const + { + return m_matrix.coeffRef(idx+rowOffset(), idx+colOffset()); + } + + EIGEN_DEVICE_FUNC + inline CoeffReturnType coeff(Index idx) const + { + return m_matrix.coeff(idx+rowOffset(), idx+colOffset()); + } + + EIGEN_DEVICE_FUNC + inline const typename internal::remove_all::type& + nestedExpression() const + { + return m_matrix; + } + + EIGEN_DEVICE_FUNC + inline Index index() const + { + return m_index.value(); + } + + protected: + typename internal::ref_selector::non_const_type m_matrix; + const internal::variable_if_dynamicindex m_index; + + private: + // some compilers may fail to optimize std::max etc in case of compile-time constants... + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index absDiagIndex() const EIGEN_NOEXCEPT { return m_index.value()>0 ? m_index.value() : -m_index.value(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index rowOffset() const EIGEN_NOEXCEPT { return m_index.value()>0 ? 0 : -m_index.value(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index colOffset() const EIGEN_NOEXCEPT { return m_index.value()>0 ? m_index.value() : 0; } + // trigger a compile-time error if someone try to call packet + template typename MatrixType::PacketReturnType packet(Index) const; + template typename MatrixType::PacketReturnType packet(Index,Index) const; +}; + +/** \returns an expression of the main diagonal of the matrix \c *this + * + * \c *this is not required to be square. + * + * Example: \include MatrixBase_diagonal.cpp + * Output: \verbinclude MatrixBase_diagonal.out + * + * \sa class Diagonal */ +template +EIGEN_DEVICE_FUNC inline typename MatrixBase::DiagonalReturnType +MatrixBase::diagonal() +{ + return DiagonalReturnType(derived()); +} + +/** This is the const version of diagonal(). */ +template +EIGEN_DEVICE_FUNC inline typename MatrixBase::ConstDiagonalReturnType +MatrixBase::diagonal() const +{ + return ConstDiagonalReturnType(derived()); +} + +/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this + * + * \c *this is not required to be square. + * + * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 + * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. + * + * Example: \include MatrixBase_diagonal_int.cpp + * Output: \verbinclude MatrixBase_diagonal_int.out + * + * \sa MatrixBase::diagonal(), class Diagonal */ +template +EIGEN_DEVICE_FUNC inline typename MatrixBase::DiagonalDynamicIndexReturnType +MatrixBase::diagonal(Index index) +{ + return DiagonalDynamicIndexReturnType(derived(), index); +} + +/** This is the const version of diagonal(Index). */ +template +EIGEN_DEVICE_FUNC inline typename MatrixBase::ConstDiagonalDynamicIndexReturnType +MatrixBase::diagonal(Index index) const +{ + return ConstDiagonalDynamicIndexReturnType(derived(), index); +} + +/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this + * + * \c *this is not required to be square. + * + * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 + * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. + * + * Example: \include MatrixBase_diagonal_template_int.cpp + * Output: \verbinclude MatrixBase_diagonal_template_int.out + * + * \sa MatrixBase::diagonal(), class Diagonal */ +template +template +EIGEN_DEVICE_FUNC +inline typename MatrixBase::template DiagonalIndexReturnType::Type +MatrixBase::diagonal() +{ + return typename DiagonalIndexReturnType::Type(derived()); +} + +/** This is the const version of diagonal(). */ +template +template +EIGEN_DEVICE_FUNC +inline typename MatrixBase::template ConstDiagonalIndexReturnType::Type +MatrixBase::diagonal() const +{ + return typename ConstDiagonalIndexReturnType::Type(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_DIAGONAL_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DiagonalMatrix.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DiagonalMatrix.h new file mode 100644 index 000000000..542685c65 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DiagonalMatrix.h @@ -0,0 +1,391 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2007-2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DIAGONALMATRIX_H +#define EIGEN_DIAGONALMATRIX_H + +namespace Eigen { + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +class DiagonalBase : public EigenBase +{ + public: + typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; + typedef typename DiagonalVectorType::Scalar Scalar; + typedef typename DiagonalVectorType::RealScalar RealScalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::StorageIndex StorageIndex; + + enum { + RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, + MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, + IsVectorAtCompileTime = 0, + Flags = NoPreferredStorageOrderBit + }; + + typedef Matrix DenseMatrixType; + typedef DenseMatrixType DenseType; + typedef DiagonalMatrix PlainObject; + + EIGEN_DEVICE_FUNC + inline const Derived& derived() const { return *static_cast(this); } + EIGEN_DEVICE_FUNC + inline Derived& derived() { return *static_cast(this); } + + EIGEN_DEVICE_FUNC + DenseMatrixType toDenseMatrix() const { return derived(); } + + EIGEN_DEVICE_FUNC + inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); } + EIGEN_DEVICE_FUNC + inline DiagonalVectorType& diagonal() { return derived().diagonal(); } + + EIGEN_DEVICE_FUNC + inline Index rows() const { return diagonal().size(); } + EIGEN_DEVICE_FUNC + inline Index cols() const { return diagonal().size(); } + + template + EIGEN_DEVICE_FUNC + const Product + operator*(const MatrixBase &matrix) const + { + return Product(derived(),matrix.derived()); + } + + typedef DiagonalWrapper, const DiagonalVectorType> > InverseReturnType; + EIGEN_DEVICE_FUNC + inline const InverseReturnType + inverse() const + { + return InverseReturnType(diagonal().cwiseInverse()); + } + + EIGEN_DEVICE_FUNC + inline const DiagonalWrapper + operator*(const Scalar& scalar) const + { + return DiagonalWrapper(diagonal() * scalar); + } + EIGEN_DEVICE_FUNC + friend inline const DiagonalWrapper + operator*(const Scalar& scalar, const DiagonalBase& other) + { + return DiagonalWrapper(scalar * other.diagonal()); + } + + template + EIGEN_DEVICE_FUNC + #ifdef EIGEN_PARSED_BY_DOXYGEN + inline unspecified_expression_type + #else + inline const DiagonalWrapper + #endif + operator+(const DiagonalBase& other) const + { + return (diagonal() + other.diagonal()).asDiagonal(); + } + + template + EIGEN_DEVICE_FUNC + #ifdef EIGEN_PARSED_BY_DOXYGEN + inline unspecified_expression_type + #else + inline const DiagonalWrapper + #endif + operator-(const DiagonalBase& other) const + { + return (diagonal() - other.diagonal()).asDiagonal(); + } +}; + +#endif + +/** \class DiagonalMatrix + * \ingroup Core_Module + * + * \brief Represents a diagonal matrix with its storage + * + * \param _Scalar the type of coefficients + * \param SizeAtCompileTime the dimension of the matrix, or Dynamic + * \param MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults + * to SizeAtCompileTime. Most of the time, you do not need to specify it. + * + * \sa class DiagonalWrapper + */ + +namespace internal { +template +struct traits > + : traits > +{ + typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType; + typedef DiagonalShape StorageKind; + enum { + Flags = LvalueBit | NoPreferredStorageOrderBit + }; +}; +} +template +class DiagonalMatrix + : public DiagonalBase > +{ + public: + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; + typedef const DiagonalMatrix& Nested; + typedef _Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::StorageIndex StorageIndex; + #endif + + protected: + + DiagonalVectorType m_diagonal; + + public: + + /** const version of diagonal(). */ + EIGEN_DEVICE_FUNC + inline const DiagonalVectorType& diagonal() const { return m_diagonal; } + /** \returns a reference to the stored vector of diagonal coefficients. */ + EIGEN_DEVICE_FUNC + inline DiagonalVectorType& diagonal() { return m_diagonal; } + + /** Default constructor without initialization */ + EIGEN_DEVICE_FUNC + inline DiagonalMatrix() {} + + /** Constructs a diagonal matrix with given dimension */ + EIGEN_DEVICE_FUNC + explicit inline DiagonalMatrix(Index dim) : m_diagonal(dim) {} + + /** 2D constructor. */ + EIGEN_DEVICE_FUNC + inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {} + + /** 3D constructor. */ + EIGEN_DEVICE_FUNC + inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {} + + #if EIGEN_HAS_CXX11 + /** \brief Construct a diagonal matrix with fixed size from an arbitrary number of coefficients. \cpp11 + * + * There exists C++98 anologue constructors for fixed-size diagonal matrices having 2 or 3 coefficients. + * + * \warning To construct a diagonal matrix of fixed size, the number of values passed to this + * constructor must match the fixed dimension of \c *this. + * + * \sa DiagonalMatrix(const Scalar&, const Scalar&) + * \sa DiagonalMatrix(const Scalar&, const Scalar&, const Scalar&) + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + DiagonalMatrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const ArgTypes&... args) + : m_diagonal(a0, a1, a2, args...) {} + + /** \brief Constructs a DiagonalMatrix and initializes it by elements given by an initializer list of initializer + * lists \cpp11 + */ + EIGEN_DEVICE_FUNC + explicit EIGEN_STRONG_INLINE DiagonalMatrix(const std::initializer_list>& list) + : m_diagonal(list) {} + #endif // EIGEN_HAS_CXX11 + + /** Copy constructor. */ + template + EIGEN_DEVICE_FUNC + inline DiagonalMatrix(const DiagonalBase& other) : m_diagonal(other.diagonal()) {} + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */ + inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {} + #endif + + /** generic constructor from expression of the diagonal coefficients */ + template + EIGEN_DEVICE_FUNC + explicit inline DiagonalMatrix(const MatrixBase& other) : m_diagonal(other) + {} + + /** Copy operator. */ + template + EIGEN_DEVICE_FUNC + DiagonalMatrix& operator=(const DiagonalBase& other) + { + m_diagonal = other.diagonal(); + return *this; + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + EIGEN_DEVICE_FUNC + DiagonalMatrix& operator=(const DiagonalMatrix& other) + { + m_diagonal = other.diagonal(); + return *this; + } + #endif + + /** Resizes to given size. */ + EIGEN_DEVICE_FUNC + inline void resize(Index size) { m_diagonal.resize(size); } + /** Sets all coefficients to zero. */ + EIGEN_DEVICE_FUNC + inline void setZero() { m_diagonal.setZero(); } + /** Resizes and sets all coefficients to zero. */ + EIGEN_DEVICE_FUNC + inline void setZero(Index size) { m_diagonal.setZero(size); } + /** Sets this matrix to be the identity matrix of the current size. */ + EIGEN_DEVICE_FUNC + inline void setIdentity() { m_diagonal.setOnes(); } + /** Sets this matrix to be the identity matrix of the given size. */ + EIGEN_DEVICE_FUNC + inline void setIdentity(Index size) { m_diagonal.setOnes(size); } +}; + +/** \class DiagonalWrapper + * \ingroup Core_Module + * + * \brief Expression of a diagonal matrix + * + * \param _DiagonalVectorType the type of the vector of diagonal coefficients + * + * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients, + * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal() + * and most of the time this is the only way that it is used. + * + * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal() + */ + +namespace internal { +template +struct traits > +{ + typedef _DiagonalVectorType DiagonalVectorType; + typedef typename DiagonalVectorType::Scalar Scalar; + typedef typename DiagonalVectorType::StorageIndex StorageIndex; + typedef DiagonalShape StorageKind; + typedef typename traits::XprKind XprKind; + enum { + RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, + MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, + Flags = (traits::Flags & LvalueBit) | NoPreferredStorageOrderBit + }; +}; +} + +template +class DiagonalWrapper + : public DiagonalBase >, internal::no_assignment_operator +{ + public: + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef _DiagonalVectorType DiagonalVectorType; + typedef DiagonalWrapper Nested; + #endif + + /** Constructor from expression of diagonal coefficients to wrap. */ + EIGEN_DEVICE_FUNC + explicit inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {} + + /** \returns a const reference to the wrapped expression of diagonal coefficients. */ + EIGEN_DEVICE_FUNC + const DiagonalVectorType& diagonal() const { return m_diagonal; } + + protected: + typename DiagonalVectorType::Nested m_diagonal; +}; + +/** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients + * + * \only_for_vectors + * + * Example: \include MatrixBase_asDiagonal.cpp + * Output: \verbinclude MatrixBase_asDiagonal.out + * + * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal() + **/ +template +EIGEN_DEVICE_FUNC inline const DiagonalWrapper +MatrixBase::asDiagonal() const +{ + return DiagonalWrapper(derived()); +} + +/** \returns true if *this is approximately equal to a diagonal matrix, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isDiagonal.cpp + * Output: \verbinclude MatrixBase_isDiagonal.out + * + * \sa asDiagonal() + */ +template +bool MatrixBase::isDiagonal(const RealScalar& prec) const +{ + if(cols() != rows()) return false; + RealScalar maxAbsOnDiagonal = static_cast(-1); + for(Index j = 0; j < cols(); ++j) + { + RealScalar absOnDiagonal = numext::abs(coeff(j,j)); + if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; + } + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < j; ++i) + { + if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; + if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; + } + return true; +} + +namespace internal { + +template<> struct storage_kind_to_shape { typedef DiagonalShape Shape; }; + +struct Diagonal2Dense {}; + +template<> struct AssignmentKind { typedef Diagonal2Dense Kind; }; + +// Diagonal matrix to Dense assignment +template< typename DstXprType, typename SrcXprType, typename Functor> +struct Assignment +{ + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &/*func*/) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + dst.setZero(); + dst.diagonal() = src.diagonal(); + } + + static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op &/*func*/) + { dst.diagonal() += src.diagonal(); } + + static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op &/*func*/) + { dst.diagonal() -= src.diagonal(); } +}; + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_DIAGONALMATRIX_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DiagonalProduct.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DiagonalProduct.h new file mode 100644 index 000000000..7911d1cd1 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/DiagonalProduct.h @@ -0,0 +1,28 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2007-2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DIAGONALPRODUCT_H +#define EIGEN_DIAGONALPRODUCT_H + +namespace Eigen { + +/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal. + */ +template +template +EIGEN_DEVICE_FUNC inline const Product +MatrixBase::operator*(const DiagonalBase &a_diagonal) const +{ + return Product(derived(),a_diagonal.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_DIAGONALPRODUCT_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Dot.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Dot.h new file mode 100644 index 000000000..5c3441b92 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Dot.h @@ -0,0 +1,318 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008, 2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DOT_H +#define EIGEN_DOT_H + +namespace Eigen { + +namespace internal { + +// helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot +// with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE +// looking at the static assertions. Thus this is a trick to get better compile errors. +template +struct dot_nocheck +{ + typedef scalar_conj_product_op::Scalar,typename traits::Scalar> conj_prod; + typedef typename conj_prod::result_type ResScalar; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE + static ResScalar run(const MatrixBase& a, const MatrixBase& b) + { + return a.template binaryExpr(b).sum(); + } +}; + +template +struct dot_nocheck +{ + typedef scalar_conj_product_op::Scalar,typename traits::Scalar> conj_prod; + typedef typename conj_prod::result_type ResScalar; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE + static ResScalar run(const MatrixBase& a, const MatrixBase& b) + { + return a.transpose().template binaryExpr(b).sum(); + } +}; + +} // end namespace internal + +/** \fn MatrixBase::dot + * \returns the dot product of *this with other. + * + * \only_for_vectors + * + * \note If the scalar type is complex numbers, then this function returns the hermitian + * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the + * second variable. + * + * \sa squaredNorm(), norm() + */ +template +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE +typename ScalarBinaryOpTraits::Scalar,typename internal::traits::Scalar>::ReturnType +MatrixBase::dot(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) +#if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG)) + typedef internal::scalar_conj_product_op func; + EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar); +#endif + + eigen_assert(size() == other.size()); + + return internal::dot_nocheck::run(*this, other); +} + +//---------- implementation of L2 norm and related functions ---------- + +/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the squared Frobenius norm. + * In both cases, it consists in the sum of the square of all the matrix entries. + * For vectors, this is also equals to the dot product of \c *this with itself. + * + * \sa dot(), norm(), lpNorm() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Real MatrixBase::squaredNorm() const +{ + return numext::real((*this).cwiseAbs2().sum()); +} + +/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm. + * In both cases, it consists in the square root of the sum of the square of all the matrix entries. + * For vectors, this is also equals to the square root of the dot product of \c *this with itself. + * + * \sa lpNorm(), dot(), squaredNorm() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Real MatrixBase::norm() const +{ + return numext::sqrt(squaredNorm()); +} + +/** \returns an expression of the quotient of \c *this by its own norm. + * + * \warning If the input vector is too small (i.e., this->norm()==0), + * then this function returns a copy of the input. + * + * \only_for_vectors + * + * \sa norm(), normalize() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::PlainObject +MatrixBase::normalized() const +{ + typedef typename internal::nested_eval::type _Nested; + _Nested n(derived()); + RealScalar z = n.squaredNorm(); + // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU + if(z>RealScalar(0)) + return n / numext::sqrt(z); + else + return n; +} + +/** Normalizes the vector, i.e. divides it by its own norm. + * + * \only_for_vectors + * + * \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged. + * + * \sa norm(), normalized() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase::normalize() +{ + RealScalar z = squaredNorm(); + // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU + if(z>RealScalar(0)) + derived() /= numext::sqrt(z); +} + +/** \returns an expression of the quotient of \c *this by its own norm while avoiding underflow and overflow. + * + * \only_for_vectors + * + * This method is analogue to the normalized() method, but it reduces the risk of + * underflow and overflow when computing the norm. + * + * \warning If the input vector is too small (i.e., this->norm()==0), + * then this function returns a copy of the input. + * + * \sa stableNorm(), stableNormalize(), normalized() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::PlainObject +MatrixBase::stableNormalized() const +{ + typedef typename internal::nested_eval::type _Nested; + _Nested n(derived()); + RealScalar w = n.cwiseAbs().maxCoeff(); + RealScalar z = (n/w).squaredNorm(); + if(z>RealScalar(0)) + return n / (numext::sqrt(z)*w); + else + return n; +} + +/** Normalizes the vector while avoid underflow and overflow + * + * \only_for_vectors + * + * This method is analogue to the normalize() method, but it reduces the risk of + * underflow and overflow when computing the norm. + * + * \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged. + * + * \sa stableNorm(), stableNormalized(), normalize() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase::stableNormalize() +{ + RealScalar w = cwiseAbs().maxCoeff(); + RealScalar z = (derived()/w).squaredNorm(); + if(z>RealScalar(0)) + derived() /= numext::sqrt(z)*w; +} + +//---------- implementation of other norms ---------- + +namespace internal { + +template +struct lpNorm_selector +{ + typedef typename NumTraits::Scalar>::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const MatrixBase& m) + { + EIGEN_USING_STD(pow) + return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p); + } +}; + +template +struct lpNorm_selector +{ + EIGEN_DEVICE_FUNC + static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) + { + return m.cwiseAbs().sum(); + } +}; + +template +struct lpNorm_selector +{ + EIGEN_DEVICE_FUNC + static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) + { + return m.norm(); + } +}; + +template +struct lpNorm_selector +{ + typedef typename NumTraits::Scalar>::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const MatrixBase& m) + { + if(Derived::SizeAtCompileTime==0 || (Derived::SizeAtCompileTime==Dynamic && m.size()==0)) + return RealScalar(0); + return m.cwiseAbs().maxCoeff(); + } +}; + +} // end namespace internal + +/** \returns the \b coefficient-wise \f$ \ell^p \f$ norm of \c *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values + * of the coefficients of \c *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$ + * norm, that is the maximum of the absolute values of the coefficients of \c *this. + * + * In all cases, if \c *this is empty, then the value 0 is returned. + * + * \note For matrices, this function does not compute the operator-norm. That is, if \c *this is a matrix, then its coefficients are interpreted as a 1D vector. Nonetheless, you can easily compute the 1-norm and \f$\infty\f$-norm matrix operator norms using \link TutorialReductionsVisitorsBroadcastingReductionsNorm partial reductions \endlink. + * + * \sa norm() + */ +template +template +#ifndef EIGEN_PARSED_BY_DOXYGEN +EIGEN_DEVICE_FUNC inline typename NumTraits::Scalar>::Real +#else +EIGEN_DEVICE_FUNC MatrixBase::RealScalar +#endif +MatrixBase::lpNorm() const +{ + return internal::lpNorm_selector::run(*this); +} + +//---------- implementation of isOrthogonal / isUnitary ---------- + +/** \returns true if *this is approximately orthogonal to \a other, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isOrthogonal.cpp + * Output: \verbinclude MatrixBase_isOrthogonal.out + */ +template +template +bool MatrixBase::isOrthogonal +(const MatrixBase& other, const RealScalar& prec) const +{ + typename internal::nested_eval::type nested(derived()); + typename internal::nested_eval::type otherNested(other.derived()); + return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); +} + +/** \returns true if *this is approximately an unitary matrix, + * within the precision given by \a prec. In the case where the \a Scalar + * type is real numbers, a unitary matrix is an orthogonal matrix, whence the name. + * + * \note This can be used to check whether a family of vectors forms an orthonormal basis. + * Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an + * orthonormal basis. + * + * Example: \include MatrixBase_isUnitary.cpp + * Output: \verbinclude MatrixBase_isUnitary.out + */ +template +bool MatrixBase::isUnitary(const RealScalar& prec) const +{ + typename internal::nested_eval::type self(derived()); + for(Index i = 0; i < cols(); ++i) + { + if(!internal::isApprox(self.col(i).squaredNorm(), static_cast(1), prec)) + return false; + for(Index j = 0; j < i; ++j) + if(!internal::isMuchSmallerThan(self.col(i).dot(self.col(j)), static_cast(1), prec)) + return false; + } + return true; +} + +} // end namespace Eigen + +#endif // EIGEN_DOT_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/EigenBase.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/EigenBase.h new file mode 100644 index 000000000..6b3c7d374 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/EigenBase.h @@ -0,0 +1,160 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EIGENBASE_H +#define EIGEN_EIGENBASE_H + +namespace Eigen { + +/** \class EigenBase + * \ingroup Core_Module + * + * Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T). + * + * In other words, an EigenBase object is an object that can be copied into a MatrixBase. + * + * Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc. + * + * Notice that this class is trivial, it is only used to disambiguate overloaded functions. + * + * \sa \blank \ref TopicClassHierarchy + */ +template struct EigenBase +{ +// typedef typename internal::plain_matrix_type::type PlainObject; + + /** \brief The interface type of indices + * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. + * \sa StorageIndex, \ref TopicPreprocessorDirectives. + * DEPRECATED: Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead. + * Deprecation is not marked with a doxygen comment because there are too many existing usages to add the deprecation attribute. + */ + typedef Eigen::Index Index; + + // FIXME is it needed? + typedef typename internal::traits::StorageKind StorageKind; + + /** \returns a reference to the derived object */ + EIGEN_DEVICE_FUNC + Derived& derived() { return *static_cast(this); } + /** \returns a const reference to the derived object */ + EIGEN_DEVICE_FUNC + const Derived& derived() const { return *static_cast(this); } + + EIGEN_DEVICE_FUNC + inline Derived& const_cast_derived() const + { return *static_cast(const_cast(this)); } + EIGEN_DEVICE_FUNC + inline const Derived& const_derived() const + { return *static_cast(this); } + + /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index rows() const EIGEN_NOEXCEPT { return derived().rows(); } + /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index cols() const EIGEN_NOEXCEPT { return derived().cols(); } + /** \returns the number of coefficients, which is rows()*cols(). + * \sa rows(), cols(), SizeAtCompileTime. */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index size() const EIGEN_NOEXCEPT { return rows() * cols(); } + + /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ + template + EIGEN_DEVICE_FUNC + inline void evalTo(Dest& dst) const + { derived().evalTo(dst); } + + /** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */ + template + EIGEN_DEVICE_FUNC + inline void addTo(Dest& dst) const + { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + typename Dest::PlainObject res(rows(),cols()); + evalTo(res); + dst += res; + } + + /** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */ + template + EIGEN_DEVICE_FUNC + inline void subTo(Dest& dst) const + { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + typename Dest::PlainObject res(rows(),cols()); + evalTo(res); + dst -= res; + } + + /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */ + template + EIGEN_DEVICE_FUNC inline void applyThisOnTheRight(Dest& dst) const + { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + dst = dst * this->derived(); + } + + /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */ + template + EIGEN_DEVICE_FUNC inline void applyThisOnTheLeft(Dest& dst) const + { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + dst = this->derived() * dst; + } + +}; + +/*************************************************************************** +* Implementation of matrix base methods +***************************************************************************/ + +/** \brief Copies the generic expression \a other into *this. + * + * \details The expression must provide a (templated) evalTo(Derived& dst) const + * function which does the actual job. In practice, this allows any user to write + * its own special matrix without having to modify MatrixBase + * + * \returns a reference to *this. + */ +template +template +EIGEN_DEVICE_FUNC +Derived& DenseBase::operator=(const EigenBase &other) +{ + call_assignment(derived(), other.derived()); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC +Derived& DenseBase::operator+=(const EigenBase &other) +{ + call_assignment(derived(), other.derived(), internal::add_assign_op()); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC +Derived& DenseBase::operator-=(const EigenBase &other) +{ + call_assignment(derived(), other.derived(), internal::sub_assign_op()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_EIGENBASE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ForceAlignedAccess.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ForceAlignedAccess.h new file mode 100644 index 000000000..817a43afc --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ForceAlignedAccess.h @@ -0,0 +1,150 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FORCEALIGNEDACCESS_H +#define EIGEN_FORCEALIGNEDACCESS_H + +namespace Eigen { + +/** \class ForceAlignedAccess + * \ingroup Core_Module + * + * \brief Enforce aligned packet loads and stores regardless of what is requested + * + * \param ExpressionType the type of the object of which we are forcing aligned packet access + * + * This class is the return type of MatrixBase::forceAlignedAccess() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::forceAlignedAccess() + */ + +namespace internal { +template +struct traits > : public traits +{}; +} + +template class ForceAlignedAccess + : public internal::dense_xpr_base< ForceAlignedAccess >::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess) + + EIGEN_DEVICE_FUNC explicit inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index outerStride() const EIGEN_NOEXCEPT { return m_expression.outerStride(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index innerStride() const EIGEN_NOEXCEPT { return m_expression.innerStride(); } + + EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const + { + return m_expression.coeff(row, col); + } + + EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) + { + return m_expression.const_cast_derived().coeffRef(row, col); + } + + EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const + { + return m_expression.coeff(index); + } + + EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) + { + return m_expression.const_cast_derived().coeffRef(index); + } + + template + inline const PacketScalar packet(Index row, Index col) const + { + return m_expression.template packet(row, col); + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(row, col, x); + } + + template + inline const PacketScalar packet(Index index) const + { + return m_expression.template packet(index); + } + + template + inline void writePacket(Index index, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(index, x); + } + + EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; } + + protected: + const ExpressionType& m_expression; + + private: + ForceAlignedAccess& operator=(const ForceAlignedAccess&); +}; + +/** \returns an expression of *this with forced aligned access + * \sa forceAlignedAccessIf(),class ForceAlignedAccess + */ +template +inline const ForceAlignedAccess +MatrixBase::forceAlignedAccess() const +{ + return ForceAlignedAccess(derived()); +} + +/** \returns an expression of *this with forced aligned access + * \sa forceAlignedAccessIf(), class ForceAlignedAccess + */ +template +inline ForceAlignedAccess +MatrixBase::forceAlignedAccess() +{ + return ForceAlignedAccess(derived()); +} + +/** \returns an expression of *this with forced aligned access if \a Enable is true. + * \sa forceAlignedAccess(), class ForceAlignedAccess + */ +template +template +inline typename internal::add_const_on_value_type,Derived&>::type>::type +MatrixBase::forceAlignedAccessIf() const +{ + return derived(); // FIXME This should not work but apparently is never used +} + +/** \returns an expression of *this with forced aligned access if \a Enable is true. + * \sa forceAlignedAccess(), class ForceAlignedAccess + */ +template +template +inline typename internal::conditional,Derived&>::type +MatrixBase::forceAlignedAccessIf() +{ + return derived(); // FIXME This should not work but apparently is never used +} + +} // end namespace Eigen + +#endif // EIGEN_FORCEALIGNEDACCESS_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Fuzzy.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Fuzzy.h new file mode 100644 index 000000000..43aa49b2b --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Fuzzy.h @@ -0,0 +1,155 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FUZZY_H +#define EIGEN_FUZZY_H + +namespace Eigen { + +namespace internal +{ + +template::IsInteger> +struct isApprox_selector +{ + EIGEN_DEVICE_FUNC + static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) + { + typename internal::nested_eval::type nested(x); + typename internal::nested_eval::type otherNested(y); + return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * numext::mini(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); + } +}; + +template +struct isApprox_selector +{ + EIGEN_DEVICE_FUNC + static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&) + { + return x.matrix() == y.matrix(); + } +}; + +template::IsInteger> +struct isMuchSmallerThan_object_selector +{ + EIGEN_DEVICE_FUNC + static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) + { + return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum(); + } +}; + +template +struct isMuchSmallerThan_object_selector +{ + EIGEN_DEVICE_FUNC + static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&) + { + return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); + } +}; + +template::IsInteger> +struct isMuchSmallerThan_scalar_selector +{ + EIGEN_DEVICE_FUNC + static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec) + { + return x.cwiseAbs2().sum() <= numext::abs2(prec * y); + } +}; + +template +struct isMuchSmallerThan_scalar_selector +{ + EIGEN_DEVICE_FUNC + static bool run(const Derived& x, const typename Derived::RealScalar&, const typename Derived::RealScalar&) + { + return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); + } +}; + +} // end namespace internal + + +/** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ + * are considered to be approximately equal within precision \f$ p \f$ if + * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f] + * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm + * L2 norm). + * + * \note Because of the multiplicativeness of this comparison, one can't use this function + * to check whether \c *this is approximately equal to the zero matrix or vector. + * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix + * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const + * RealScalar&, RealScalar) instead. + * + * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const + */ +template +template +EIGEN_DEVICE_FUNC bool DenseBase::isApprox( + const DenseBase& other, + const RealScalar& prec +) const +{ + return internal::isApprox_selector::run(derived(), other.derived(), prec); +} + +/** \returns \c true if the norm of \c *this is much smaller than \a other, + * within the precision determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is + * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if + * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f] + * + * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason, + * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm + * of a reference matrix of same dimensions. + * + * \sa isApprox(), isMuchSmallerThan(const DenseBase&, RealScalar) const + */ +template +EIGEN_DEVICE_FUNC bool DenseBase::isMuchSmallerThan( + const typename NumTraits::Real& other, + const RealScalar& prec +) const +{ + return internal::isMuchSmallerThan_scalar_selector::run(derived(), other, prec); +} + +/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other, + * within the precision determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is + * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if + * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f] + * For matrices, the comparison is done using the Hilbert-Schmidt norm. + * + * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const + */ +template +template +EIGEN_DEVICE_FUNC bool DenseBase::isMuchSmallerThan( + const DenseBase& other, + const RealScalar& prec +) const +{ + return internal::isMuchSmallerThan_object_selector::run(derived(), other.derived(), prec); +} + +} // end namespace Eigen + +#endif // EIGEN_FUZZY_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/GeneralProduct.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/GeneralProduct.h new file mode 100644 index 000000000..6906aa75d --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/GeneralProduct.h @@ -0,0 +1,465 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008-2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERAL_PRODUCT_H +#define EIGEN_GENERAL_PRODUCT_H + +namespace Eigen { + +enum { + Large = 2, + Small = 3 +}; + +// Define the threshold value to fallback from the generic matrix-matrix product +// implementation (heavy) to the lightweight coeff-based product one. +// See generic_product_impl +// in products/GeneralMatrixMatrix.h for more details. +// TODO This threshold should also be used in the compile-time selector below. +#ifndef EIGEN_GEMM_TO_COEFFBASED_THRESHOLD +// This default value has been obtained on a Haswell architecture. +#define EIGEN_GEMM_TO_COEFFBASED_THRESHOLD 20 +#endif + +namespace internal { + +template struct product_type_selector; + +template struct product_size_category +{ + enum { + #ifndef EIGEN_GPU_COMPILE_PHASE + is_large = MaxSize == Dynamic || + Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD || + (Size==Dynamic && MaxSize>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD), + #else + is_large = 0, + #endif + value = is_large ? Large + : Size == 1 ? 1 + : Small + }; +}; + +template struct product_type +{ + typedef typename remove_all::type _Lhs; + typedef typename remove_all::type _Rhs; + enum { + MaxRows = traits<_Lhs>::MaxRowsAtCompileTime, + Rows = traits<_Lhs>::RowsAtCompileTime, + MaxCols = traits<_Rhs>::MaxColsAtCompileTime, + Cols = traits<_Rhs>::ColsAtCompileTime, + MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::MaxColsAtCompileTime, + traits<_Rhs>::MaxRowsAtCompileTime), + Depth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::ColsAtCompileTime, + traits<_Rhs>::RowsAtCompileTime) + }; + + // the splitting into different lines of code here, introducing the _select enums and the typedef below, + // is to work around an internal compiler error with gcc 4.1 and 4.2. +private: + enum { + rows_select = product_size_category::value, + cols_select = product_size_category::value, + depth_select = product_size_category::value + }; + typedef product_type_selector selector; + +public: + enum { + value = selector::ret, + ret = selector::ret + }; +#ifdef EIGEN_DEBUG_PRODUCT + static void debug() + { + EIGEN_DEBUG_VAR(Rows); + EIGEN_DEBUG_VAR(Cols); + EIGEN_DEBUG_VAR(Depth); + EIGEN_DEBUG_VAR(rows_select); + EIGEN_DEBUG_VAR(cols_select); + EIGEN_DEBUG_VAR(depth_select); + EIGEN_DEBUG_VAR(value); + } +#endif +}; + +/* The following allows to select the kind of product at compile time + * based on the three dimensions of the product. + * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */ +// FIXME I'm not sure the current mapping is the ideal one. +template struct product_type_selector { enum { ret = OuterProduct }; }; +template struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template struct product_type_selector<1, N, 1> { enum { ret = LazyCoeffBasedProductMode }; }; +template struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; }; +template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; }; +template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = GemvProduct }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; + +} // end namespace internal + +/*********************************************************************** +* Implementation of Inner Vector Vector Product +***********************************************************************/ + +// FIXME : maybe the "inner product" could return a Scalar +// instead of a 1x1 matrix ?? +// Pro: more natural for the user +// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix +// product ends up to a row-vector times col-vector product... To tackle this use +// case, we could have a specialization for Block with: operator=(Scalar x); + +/*********************************************************************** +* Implementation of Outer Vector Vector Product +***********************************************************************/ + +/*********************************************************************** +* Implementation of General Matrix Vector Product +***********************************************************************/ + +/* According to the shape/flags of the matrix we have to distinghish 3 different cases: + * 1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine + * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine + * 3 - all other cases are handled using a simple loop along the outer-storage direction. + * Therefore we need a lower level meta selector. + * Furthermore, if the matrix is the rhs, then the product has to be transposed. + */ +namespace internal { + +template +struct gemv_dense_selector; + +} // end namespace internal + +namespace internal { + +template struct gemv_static_vector_if; + +template +struct gemv_static_vector_if +{ + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; } +}; + +template +struct gemv_static_vector_if +{ + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { return 0; } +}; + +template +struct gemv_static_vector_if +{ + enum { + ForceAlignment = internal::packet_traits::Vectorizable, + PacketSize = internal::packet_traits::size + }; + #if EIGEN_MAX_STATIC_ALIGN_BYTES!=0 + internal::plain_array m_data; + EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; } + #else + // Some architectures cannot align on the stack, + // => let's manually enforce alignment by allocating more data and return the address of the first aligned element. + internal::plain_array m_data; + EIGEN_STRONG_INLINE Scalar* data() { + return ForceAlignment + ? reinterpret_cast((internal::UIntPtr(m_data.array) & ~(std::size_t(EIGEN_MAX_ALIGN_BYTES-1))) + EIGEN_MAX_ALIGN_BYTES) + : m_data.array; + } + #endif +}; + +// The vector is on the left => transposition +template +struct gemv_dense_selector +{ + template + static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) + { + Transpose destT(dest); + enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor }; + gemv_dense_selector + ::run(rhs.transpose(), lhs.transpose(), destT, alpha); + } +}; + +template<> struct gemv_dense_selector +{ + template + static inline void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) + { + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + typedef typename Dest::Scalar ResScalar; + typedef typename Dest::RealScalar RealScalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + + typedef Map, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits::size)> MappedDest; + + ActualLhsType actualLhs = LhsBlasTraits::extract(lhs); + ActualRhsType actualRhs = RhsBlasTraits::extract(rhs); + + ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs); + + // make sure Dest is a compile-time vector type (bug 1166) + typedef typename conditional::type ActualDest; + + enum { + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1), + ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), + MightCannotUseDest = ((!EvalToDestAtCompileTime) || ComplexByReal) && (ActualDest::MaxSizeAtCompileTime!=0) + }; + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + RhsScalar compatibleAlpha = get_factor::run(actualAlpha); + + if(!MightCannotUseDest) + { + // shortcut if we are sure to be able to use dest directly, + // this ease the compiler to generate cleaner and more optimzized code for most common cases + general_matrix_vector_product + ::run( + actualLhs.rows(), actualLhs.cols(), + LhsMapper(actualLhs.data(), actualLhs.outerStride()), + RhsMapper(actualRhs.data(), actualRhs.innerStride()), + dest.data(), 1, + compatibleAlpha); + } + else + { + gemv_static_vector_if static_dest; + + const bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0)); + const bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; + + ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), + evalToDest ? dest.data() : static_dest.data()); + + if(!evalToDest) + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + Index size = dest.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + if(!alphaIsCompatible) + { + MappedDest(actualDestPtr, dest.size()).setZero(); + compatibleAlpha = RhsScalar(1); + } + else + MappedDest(actualDestPtr, dest.size()) = dest; + } + + general_matrix_vector_product + ::run( + actualLhs.rows(), actualLhs.cols(), + LhsMapper(actualLhs.data(), actualLhs.outerStride()), + RhsMapper(actualRhs.data(), actualRhs.innerStride()), + actualDestPtr, 1, + compatibleAlpha); + + if (!evalToDest) + { + if(!alphaIsCompatible) + dest.matrix() += actualAlpha * MappedDest(actualDestPtr, dest.size()); + else + dest = MappedDest(actualDestPtr, dest.size()); + } + } + } +}; + +template<> struct gemv_dense_selector +{ + template + static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) + { + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + typedef typename Dest::Scalar ResScalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + typedef typename internal::remove_all::type ActualRhsTypeCleaned; + + typename add_const::type actualLhs = LhsBlasTraits::extract(lhs); + typename add_const::type actualRhs = RhsBlasTraits::extract(rhs); + + ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs); + + enum { + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1 || ActualRhsTypeCleaned::MaxSizeAtCompileTime==0 + }; + + gemv_static_vector_if static_rhs; + + ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(), + DirectlyUseRhs ? const_cast(actualRhs.data()) : static_rhs.data()); + + if(!DirectlyUseRhs) + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + Index size = actualRhs.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + Map(actualRhsPtr, actualRhs.size()) = actualRhs; + } + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + general_matrix_vector_product + ::run( + actualLhs.rows(), actualLhs.cols(), + LhsMapper(actualLhs.data(), actualLhs.outerStride()), + RhsMapper(actualRhsPtr, 1), + dest.data(), dest.col(0).innerStride(), //NOTE if dest is not a vector at compile-time, then dest.innerStride() might be wrong. (bug 1166) + actualAlpha); + } +}; + +template<> struct gemv_dense_selector +{ + template + static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) + { + EIGEN_STATIC_ASSERT((!nested_eval::Evaluate),EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE); + // TODO if rhs is large enough it might be beneficial to make sure that dest is sequentially stored in memory, otherwise use a temp + typename nested_eval::type actual_rhs(rhs); + const Index size = rhs.rows(); + for(Index k=0; k struct gemv_dense_selector +{ + template + static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) + { + EIGEN_STATIC_ASSERT((!nested_eval::Evaluate),EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE); + typename nested_eval::type actual_rhs(rhs); + const Index rows = dest.rows(); + for(Index i=0; i +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +const Product +MatrixBase::operator*(const MatrixBase &other) const +{ + // A note regarding the function declaration: In MSVC, this function will sometimes + // not be inlined since DenseStorage is an unwindable object for dynamic + // matrices and product types are holding a member to store the result. + // Thus it does not help tagging this function with EIGEN_STRONG_INLINE. + enum { + ProductIsValid = Derived::ColsAtCompileTime==Dynamic + || OtherDerived::RowsAtCompileTime==Dynamic + || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), + AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwiseProduct(v2) + EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) +#ifdef EIGEN_DEBUG_PRODUCT + internal::product_type::debug(); +#endif + + return Product(derived(), other.derived()); +} + +/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation. + * + * The returned product will behave like any other expressions: the coefficients of the product will be + * computed once at a time as requested. This might be useful in some extremely rare cases when only + * a small and no coherent fraction of the result's coefficients have to be computed. + * + * \warning This version of the matrix product can be much much slower. So use it only if you know + * what you are doing and that you measured a true speed improvement. + * + * \sa operator*(const MatrixBase&) + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +const Product +MatrixBase::lazyProduct(const MatrixBase &other) const +{ + enum { + ProductIsValid = Derived::ColsAtCompileTime==Dynamic + || OtherDerived::RowsAtCompileTime==Dynamic + || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), + AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwiseProduct(v2) + EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) + + return Product(derived(), other.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_PRODUCT_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/GenericPacketMath.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/GenericPacketMath.h new file mode 100644 index 000000000..cf677a190 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/GenericPacketMath.h @@ -0,0 +1,1040 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERIC_PACKET_MATH_H +#define EIGEN_GENERIC_PACKET_MATH_H + +namespace Eigen { + +namespace internal { + +/** \internal + * \file GenericPacketMath.h + * + * Default implementation for types not supported by the vectorization. + * In practice these functions are provided to make easier the writing + * of generic vectorized code. + */ + +#ifndef EIGEN_DEBUG_ALIGNED_LOAD +#define EIGEN_DEBUG_ALIGNED_LOAD +#endif + +#ifndef EIGEN_DEBUG_UNALIGNED_LOAD +#define EIGEN_DEBUG_UNALIGNED_LOAD +#endif + +#ifndef EIGEN_DEBUG_ALIGNED_STORE +#define EIGEN_DEBUG_ALIGNED_STORE +#endif + +#ifndef EIGEN_DEBUG_UNALIGNED_STORE +#define EIGEN_DEBUG_UNALIGNED_STORE +#endif + +struct default_packet_traits +{ + enum { + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 1, + HasAbsDiff = 0, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 1, + HasBlend = 0, + // This flag is used to indicate whether packet comparison is supported. + // pcmp_eq, pcmp_lt and pcmp_le should be defined for it to be true. + HasCmp = 0, + + HasDiv = 0, + HasSqrt = 0, + HasRsqrt = 0, + HasExp = 0, + HasExpm1 = 0, + HasLog = 0, + HasLog1p = 0, + HasLog10 = 0, + HasPow = 0, + + HasSin = 0, + HasCos = 0, + HasTan = 0, + HasASin = 0, + HasACos = 0, + HasATan = 0, + HasSinh = 0, + HasCosh = 0, + HasTanh = 0, + HasLGamma = 0, + HasDiGamma = 0, + HasZeta = 0, + HasPolygamma = 0, + HasErf = 0, + HasErfc = 0, + HasNdtri = 0, + HasBessel = 0, + HasIGamma = 0, + HasIGammaDerA = 0, + HasGammaSampleDerAlpha = 0, + HasIGammac = 0, + HasBetaInc = 0, + + HasRound = 0, + HasRint = 0, + HasFloor = 0, + HasCeil = 0, + HasSign = 0 + }; +}; + +template struct packet_traits : default_packet_traits +{ + typedef T type; + typedef T half; + enum { + Vectorizable = 0, + size = 1, + AlignedOnScalar = 0, + HasHalfPacket = 0 + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0 + }; +}; + +template struct packet_traits : packet_traits { }; + +template struct unpacket_traits +{ + typedef T type; + typedef T half; + enum + { + size = 1, + alignment = 1, + vectorizable = false, + masked_load_available=false, + masked_store_available=false + }; +}; + +template struct unpacket_traits : unpacket_traits { }; + +template struct type_casting_traits { + enum { + VectorizedCast = 0, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +/** \internal Wrapper to ensure that multiple packet types can map to the same + same underlying vector type. */ +template +struct eigen_packet_wrapper +{ + EIGEN_ALWAYS_INLINE operator T&() { return m_val; } + EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; } + EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {} + EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {} + EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) { + m_val = v; + return *this; + } + + T m_val; +}; + + +/** \internal A convenience utility for determining if the type is a scalar. + * This is used to enable some generic packet implementations. + */ +template +struct is_scalar { + typedef typename unpacket_traits::type Scalar; + enum { + value = internal::is_same::value + }; +}; + +/** \internal \returns static_cast(a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline TgtPacket +pcast(const SrcPacket& a) { + return static_cast(a); +} +template +EIGEN_DEVICE_FUNC inline TgtPacket +pcast(const SrcPacket& a, const SrcPacket& /*b*/) { + return static_cast(a); +} +template +EIGEN_DEVICE_FUNC inline TgtPacket +pcast(const SrcPacket& a, const SrcPacket& /*b*/, const SrcPacket& /*c*/, const SrcPacket& /*d*/) { + return static_cast(a); +} +template +EIGEN_DEVICE_FUNC inline TgtPacket +pcast(const SrcPacket& a, const SrcPacket& /*b*/, const SrcPacket& /*c*/, const SrcPacket& /*d*/, + const SrcPacket& /*e*/, const SrcPacket& /*f*/, const SrcPacket& /*g*/, const SrcPacket& /*h*/) { + return static_cast(a); +} + +/** \internal \returns reinterpret_cast(a) */ +template +EIGEN_DEVICE_FUNC inline Target +preinterpret(const Packet& a); /* { return reinterpret_cast(a); } */ + +/** \internal \returns a + b (coeff-wise) */ +template EIGEN_DEVICE_FUNC inline Packet +padd(const Packet& a, const Packet& b) { return a+b; } +// Avoid compiler warning for boolean algebra. +template<> EIGEN_DEVICE_FUNC inline bool +padd(const bool& a, const bool& b) { return a || b; } + +/** \internal \returns a - b (coeff-wise) */ +template EIGEN_DEVICE_FUNC inline Packet +psub(const Packet& a, const Packet& b) { return a-b; } + +/** \internal \returns -a (coeff-wise) */ +template EIGEN_DEVICE_FUNC inline Packet +pnegate(const Packet& a) { return -a; } + +template<> EIGEN_DEVICE_FUNC inline bool +pnegate(const bool& a) { return !a; } + +/** \internal \returns conj(a) (coeff-wise) */ +template EIGEN_DEVICE_FUNC inline Packet +pconj(const Packet& a) { return numext::conj(a); } + +/** \internal \returns a * b (coeff-wise) */ +template EIGEN_DEVICE_FUNC inline Packet +pmul(const Packet& a, const Packet& b) { return a*b; } +// Avoid compiler warning for boolean algebra. +template<> EIGEN_DEVICE_FUNC inline bool +pmul(const bool& a, const bool& b) { return a && b; } + +/** \internal \returns a / b (coeff-wise) */ +template EIGEN_DEVICE_FUNC inline Packet +pdiv(const Packet& a, const Packet& b) { return a/b; } + +// In the generic case, memset to all one bits. +template +struct ptrue_impl { + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& /*a*/){ + Packet b; + memset(static_cast(&b), 0xff, sizeof(Packet)); + return b; + } +}; + +// For non-trivial scalars, set to Scalar(1) (i.e. a non-zero value). +// Although this is technically not a valid bitmask, the scalar path for pselect +// uses a comparison to zero, so this should still work in most cases. We don't +// have another option, since the scalar type requires initialization. +template +struct ptrue_impl::value && NumTraits::RequireInitialization>::type > { + static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/){ + return T(1); + } +}; + +/** \internal \returns one bits. */ +template EIGEN_DEVICE_FUNC inline Packet +ptrue(const Packet& a) { + return ptrue_impl::run(a); +} + +// In the general case, memset to zero. +template +struct pzero_impl { + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& /*a*/) { + Packet b; + memset(static_cast(&b), 0x00, sizeof(Packet)); + return b; + } +}; + +// For scalars, explicitly set to Scalar(0), since the underlying representation +// for zero may not consist of all-zero bits. +template +struct pzero_impl::value>::type> { + static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/) { + return T(0); + } +}; + +/** \internal \returns packet of zeros */ +template EIGEN_DEVICE_FUNC inline Packet +pzero(const Packet& a) { + return pzero_impl::run(a); +} + +/** \internal \returns a <= b as a bit mask */ +template EIGEN_DEVICE_FUNC inline Packet +pcmp_le(const Packet& a, const Packet& b) { return a<=b ? ptrue(a) : pzero(a); } + +/** \internal \returns a < b as a bit mask */ +template EIGEN_DEVICE_FUNC inline Packet +pcmp_lt(const Packet& a, const Packet& b) { return a EIGEN_DEVICE_FUNC inline Packet +pcmp_eq(const Packet& a, const Packet& b) { return a==b ? ptrue(a) : pzero(a); } + +/** \internal \returns a < b or a==NaN or b==NaN as a bit mask */ +template EIGEN_DEVICE_FUNC inline Packet +pcmp_lt_or_nan(const Packet& a, const Packet& b) { return a>=b ? pzero(a) : ptrue(a); } + +template +struct bit_and { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { + return a & b; + } +}; + +template +struct bit_or { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { + return a | b; + } +}; + +template +struct bit_xor { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { + return a ^ b; + } +}; + +template +struct bit_not { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a) const { + return ~a; + } +}; + +// Use operators &, |, ^, ~. +template +struct operator_bitwise_helper { + EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) { return bit_and()(a, b); } + EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) { return bit_or()(a, b); } + EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) { return bit_xor()(a, b); } + EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) { return bit_not()(a); } +}; + +// Apply binary operations byte-by-byte +template +struct bytewise_bitwise_helper { + EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) { + return binary(a, b, bit_and()); + } + EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) { + return binary(a, b, bit_or()); + } + EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) { + return binary(a, b, bit_xor()); + } + EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) { + return unary(a,bit_not()); + } + + private: + template + EIGEN_DEVICE_FUNC static inline T unary(const T& a, Op op) { + const unsigned char* a_ptr = reinterpret_cast(&a); + T c; + unsigned char* c_ptr = reinterpret_cast(&c); + for (size_t i = 0; i < sizeof(T); ++i) { + *c_ptr++ = op(*a_ptr++); + } + return c; + } + + template + EIGEN_DEVICE_FUNC static inline T binary(const T& a, const T& b, Op op) { + const unsigned char* a_ptr = reinterpret_cast(&a); + const unsigned char* b_ptr = reinterpret_cast(&b); + T c; + unsigned char* c_ptr = reinterpret_cast(&c); + for (size_t i = 0; i < sizeof(T); ++i) { + *c_ptr++ = op(*a_ptr++, *b_ptr++); + } + return c; + } +}; + +// In the general case, use byte-by-byte manipulation. +template +struct bitwise_helper : public bytewise_bitwise_helper {}; + +// For integers or non-trivial scalars, use binary operators. +template +struct bitwise_helper::value && (NumTraits::IsInteger || NumTraits::RequireInitialization)>::type + > : public operator_bitwise_helper {}; + +/** \internal \returns the bitwise and of \a a and \a b */ +template EIGEN_DEVICE_FUNC inline Packet +pand(const Packet& a, const Packet& b) { + return bitwise_helper::bitwise_and(a, b); +} + +/** \internal \returns the bitwise or of \a a and \a b */ +template EIGEN_DEVICE_FUNC inline Packet +por(const Packet& a, const Packet& b) { + return bitwise_helper::bitwise_or(a, b); +} + +/** \internal \returns the bitwise xor of \a a and \a b */ +template EIGEN_DEVICE_FUNC inline Packet +pxor(const Packet& a, const Packet& b) { + return bitwise_helper::bitwise_xor(a, b); +} + +/** \internal \returns the bitwise not of \a a */ +template EIGEN_DEVICE_FUNC inline Packet +pnot(const Packet& a) { + return bitwise_helper::bitwise_not(a); +} + +/** \internal \returns the bitwise and of \a a and not \a b */ +template EIGEN_DEVICE_FUNC inline Packet +pandnot(const Packet& a, const Packet& b) { return pand(a, pnot(b)); } + +// In the general case, use bitwise select. +template +struct pselect_impl { + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) { + return por(pand(a,mask),pandnot(b,mask)); + } +}; + +// For scalars, use ternary select. +template +struct pselect_impl::value>::type > { + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) { + return numext::equal_strict(mask, Packet(0)) ? b : a; + } +}; + +/** \internal \returns \a or \b for each field in packet according to \mask */ +template EIGEN_DEVICE_FUNC inline Packet +pselect(const Packet& mask, const Packet& a, const Packet& b) { + return pselect_impl::run(mask, a, b); +} + +template<> EIGEN_DEVICE_FUNC inline bool pselect( + const bool& cond, const bool& a, const bool& b) { + return cond ? a : b; +} + +/** \internal \returns the min or of \a a and \a b (coeff-wise) + If either \a a or \a b are NaN, the result is implementation defined. */ +template +struct pminmax_impl { + template + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) { + return op(a,b); + } +}; + +/** \internal \returns the min or max of \a a and \a b (coeff-wise) + If either \a a or \a b are NaN, NaN is returned. */ +template<> +struct pminmax_impl { + template + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) { + Packet not_nan_mask_a = pcmp_eq(a, a); + Packet not_nan_mask_b = pcmp_eq(b, b); + return pselect(not_nan_mask_a, + pselect(not_nan_mask_b, op(a, b), b), + a); + } +}; + +/** \internal \returns the min or max of \a a and \a b (coeff-wise) + If both \a a and \a b are NaN, NaN is returned. + Equivalent to std::fmin(a, b). */ +template<> +struct pminmax_impl { + template + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) { + Packet not_nan_mask_a = pcmp_eq(a, a); + Packet not_nan_mask_b = pcmp_eq(b, b); + return pselect(not_nan_mask_a, + pselect(not_nan_mask_b, op(a, b), a), + b); + } +}; + + +#ifndef SYCL_DEVICE_ONLY +#define EIGEN_BINARY_OP_NAN_PROPAGATION(Type, Func) Func +#else +#define EIGEN_BINARY_OP_NAN_PROPAGATION(Type, Func) \ +[](const Type& a, const Type& b) { \ + return Func(a, b);} +#endif + +/** \internal \returns the min of \a a and \a b (coeff-wise). + If \a a or \b b is NaN, the return value is implementation defined. */ +template EIGEN_DEVICE_FUNC inline Packet +pmin(const Packet& a, const Packet& b) { return numext::mini(a,b); } + +/** \internal \returns the min of \a a and \a b (coeff-wise). + NaNPropagation determines the NaN propagation semantics. */ +template +EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, const Packet& b) { + return pminmax_impl::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmin))); +} + +/** \internal \returns the max of \a a and \a b (coeff-wise) + If \a a or \b b is NaN, the return value is implementation defined. */ +template EIGEN_DEVICE_FUNC inline Packet +pmax(const Packet& a, const Packet& b) { return numext::maxi(a, b); } + +/** \internal \returns the max of \a a and \a b (coeff-wise). + NaNPropagation determines the NaN propagation semantics. */ +template +EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, const Packet& b) { + return pminmax_impl::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet,(pmax))); +} + +/** \internal \returns the absolute value of \a a */ +template EIGEN_DEVICE_FUNC inline Packet +pabs(const Packet& a) { return numext::abs(a); } +template<> EIGEN_DEVICE_FUNC inline unsigned int +pabs(const unsigned int& a) { return a; } +template<> EIGEN_DEVICE_FUNC inline unsigned long +pabs(const unsigned long& a) { return a; } +template<> EIGEN_DEVICE_FUNC inline unsigned long long +pabs(const unsigned long long& a) { return a; } + +/** \internal \returns the addsub value of \a a,b */ +template EIGEN_DEVICE_FUNC inline Packet +paddsub(const Packet& a, const Packet& b) { + return pselect(peven_mask(a), padd(a, b), psub(a, b)); + } + +/** \internal \returns the phase angle of \a a */ +template EIGEN_DEVICE_FUNC inline Packet +parg(const Packet& a) { using numext::arg; return arg(a); } + + +/** \internal \returns \a a logically shifted by N bits to the right */ +template EIGEN_DEVICE_FUNC inline int +parithmetic_shift_right(const int& a) { return a >> N; } +template EIGEN_DEVICE_FUNC inline long int +parithmetic_shift_right(const long int& a) { return a >> N; } + +/** \internal \returns \a a arithmetically shifted by N bits to the right */ +template EIGEN_DEVICE_FUNC inline int +plogical_shift_right(const int& a) { return static_cast(static_cast(a) >> N); } +template EIGEN_DEVICE_FUNC inline long int +plogical_shift_right(const long int& a) { return static_cast(static_cast(a) >> N); } + +/** \internal \returns \a a shifted by N bits to the left */ +template EIGEN_DEVICE_FUNC inline int +plogical_shift_left(const int& a) { return a << N; } +template EIGEN_DEVICE_FUNC inline long int +plogical_shift_left(const long int& a) { return a << N; } + +/** \internal \returns the significant and exponent of the underlying floating point numbers + * See https://en.cppreference.com/w/cpp/numeric/math/frexp + */ +template +EIGEN_DEVICE_FUNC inline Packet pfrexp(const Packet& a, Packet& exponent) { + int exp; + EIGEN_USING_STD(frexp); + Packet result = static_cast(frexp(a, &exp)); + exponent = static_cast(exp); + return result; +} + +/** \internal \returns a * 2^((int)exponent) + * See https://en.cppreference.com/w/cpp/numeric/math/ldexp + */ +template EIGEN_DEVICE_FUNC inline Packet +pldexp(const Packet &a, const Packet &exponent) { + EIGEN_USING_STD(ldexp) + return static_cast(ldexp(a, static_cast(exponent))); +} + +/** \internal \returns the min of \a a and \a b (coeff-wise) */ +template EIGEN_DEVICE_FUNC inline Packet +pabsdiff(const Packet& a, const Packet& b) { return pselect(pcmp_lt(a, b), psub(b, a), psub(a, b)); } + +/** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */ +template EIGEN_DEVICE_FUNC inline Packet +pload(const typename unpacket_traits::type* from) { return *from; } + +/** \internal \returns a packet version of \a *from, (un-aligned load) */ +template EIGEN_DEVICE_FUNC inline Packet +ploadu(const typename unpacket_traits::type* from) { return *from; } + +/** \internal \returns a packet version of \a *from, (un-aligned masked load) + * There is no generic implementation. We only have implementations for specialized + * cases. Generic case should not be called. + */ +template EIGEN_DEVICE_FUNC inline +typename enable_if::masked_load_available, Packet>::type +ploadu(const typename unpacket_traits::type* from, typename unpacket_traits::mask_t umask); + +/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */ +template EIGEN_DEVICE_FUNC inline Packet +pset1(const typename unpacket_traits::type& a) { return a; } + +/** \internal \returns a packet with constant coefficients set from bits */ +template EIGEN_DEVICE_FUNC inline Packet +pset1frombits(BitsType a); + +/** \internal \returns a packet with constant coefficients \a a[0], e.g.: (a[0],a[0],a[0],a[0]) */ +template EIGEN_DEVICE_FUNC inline Packet +pload1(const typename unpacket_traits::type *a) { return pset1(*a); } + +/** \internal \returns a packet with elements of \a *from duplicated. + * For instance, for a packet of 8 elements, 4 scalars will be read from \a *from and + * duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]} + * Currently, this function is only used for scalar * complex products. + */ +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet +ploaddup(const typename unpacket_traits::type* from) { return *from; } + +/** \internal \returns a packet with elements of \a *from quadrupled. + * For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and + * replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]} + * Currently, this function is only used in matrix products. + * For packet-size smaller or equal to 4, this function is equivalent to pload1 + */ +template EIGEN_DEVICE_FUNC inline Packet +ploadquad(const typename unpacket_traits::type* from) +{ return pload1(from); } + +/** \internal equivalent to + * \code + * a0 = pload1(a+0); + * a1 = pload1(a+1); + * a2 = pload1(a+2); + * a3 = pload1(a+3); + * \endcode + * \sa pset1, pload1, ploaddup, pbroadcast2 + */ +template EIGEN_DEVICE_FUNC +inline void pbroadcast4(const typename unpacket_traits::type *a, + Packet& a0, Packet& a1, Packet& a2, Packet& a3) +{ + a0 = pload1(a+0); + a1 = pload1(a+1); + a2 = pload1(a+2); + a3 = pload1(a+3); +} + +/** \internal equivalent to + * \code + * a0 = pload1(a+0); + * a1 = pload1(a+1); + * \endcode + * \sa pset1, pload1, ploaddup, pbroadcast4 + */ +template EIGEN_DEVICE_FUNC +inline void pbroadcast2(const typename unpacket_traits::type *a, + Packet& a0, Packet& a1) +{ + a0 = pload1(a+0); + a1 = pload1(a+1); +} + +/** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */ +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet +plset(const typename unpacket_traits::type& a) { return a; } + +/** \internal \returns a packet with constant coefficients \a a, e.g.: (x, 0, x, 0), + where x is the value of all 1-bits. */ +template EIGEN_DEVICE_FUNC inline Packet +peven_mask(const Packet& /*a*/) { + typedef typename unpacket_traits::type Scalar; + const size_t n = unpacket_traits::size; + EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) Scalar elements[n]; + for(size_t i = 0; i < n; ++i) { + memset(elements+i, ((i & 1) == 0 ? 0xff : 0), sizeof(Scalar)); + } + return ploadu(elements); +} + + +/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */ +template EIGEN_DEVICE_FUNC inline void pstore(Scalar* to, const Packet& from) +{ (*to) = from; } + +/** \internal copy the packet \a from to \a *to, (un-aligned store) */ +template EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from) +{ (*to) = from; } + +/** \internal copy the packet \a from to \a *to, (un-aligned store with a mask) + * There is no generic implementation. We only have implementations for specialized + * cases. Generic case should not be called. + */ +template +EIGEN_DEVICE_FUNC inline +typename enable_if::masked_store_available, void>::type +pstoreu(Scalar* to, const Packet& from, typename unpacket_traits::mask_t umask); + + template EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/) + { return ploadu(from); } + + template EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index /*stride*/) + { pstore(to, from); } + +/** \internal tries to do cache prefetching of \a addr */ +template EIGEN_DEVICE_FUNC inline void prefetch(const Scalar* addr) +{ +#if defined(EIGEN_HIP_DEVICE_COMPILE) + // do nothing +#elif defined(EIGEN_CUDA_ARCH) +#if defined(__LP64__) || EIGEN_OS_WIN64 + // 64-bit pointer operand constraint for inlined asm + asm(" prefetch.L1 [ %1 ];" : "=l"(addr) : "l"(addr)); +#else + // 32-bit pointer operand constraint for inlined asm + asm(" prefetch.L1 [ %1 ];" : "=r"(addr) : "r"(addr)); +#endif +#elif (!EIGEN_COMP_MSVC) && (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG || EIGEN_COMP_ICC) + __builtin_prefetch(addr); +#endif +} + +/** \internal \returns the reversed elements of \a a*/ +template EIGEN_DEVICE_FUNC inline Packet preverse(const Packet& a) +{ return a; } + +/** \internal \returns \a a with real and imaginary part flipped (for complex type only) */ +template EIGEN_DEVICE_FUNC inline Packet pcplxflip(const Packet& a) +{ + return Packet(numext::imag(a),numext::real(a)); +} + +/************************** +* Special math functions +***************************/ + +/** \internal \returns the sine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet psin(const Packet& a) { EIGEN_USING_STD(sin); return sin(a); } + +/** \internal \returns the cosine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pcos(const Packet& a) { EIGEN_USING_STD(cos); return cos(a); } + +/** \internal \returns the tan of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet ptan(const Packet& a) { EIGEN_USING_STD(tan); return tan(a); } + +/** \internal \returns the arc sine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pasin(const Packet& a) { EIGEN_USING_STD(asin); return asin(a); } + +/** \internal \returns the arc cosine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pacos(const Packet& a) { EIGEN_USING_STD(acos); return acos(a); } + +/** \internal \returns the arc tangent of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet patan(const Packet& a) { EIGEN_USING_STD(atan); return atan(a); } + +/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet psinh(const Packet& a) { EIGEN_USING_STD(sinh); return sinh(a); } + +/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pcosh(const Packet& a) { EIGEN_USING_STD(cosh); return cosh(a); } + +/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet ptanh(const Packet& a) { EIGEN_USING_STD(tanh); return tanh(a); } + +/** \internal \returns the exp of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pexp(const Packet& a) { EIGEN_USING_STD(exp); return exp(a); } + +/** \internal \returns the expm1 of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pexpm1(const Packet& a) { return numext::expm1(a); } + +/** \internal \returns the log of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet plog(const Packet& a) { EIGEN_USING_STD(log); return log(a); } + +/** \internal \returns the log1p of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet plog1p(const Packet& a) { return numext::log1p(a); } + +/** \internal \returns the log10 of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet plog10(const Packet& a) { EIGEN_USING_STD(log10); return log10(a); } + +/** \internal \returns the log10 of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet plog2(const Packet& a) { + typedef typename internal::unpacket_traits::type Scalar; + return pmul(pset1(Scalar(EIGEN_LOG2E)), plog(a)); +} + +/** \internal \returns the square-root of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet psqrt(const Packet& a) { return numext::sqrt(a); } + +/** \internal \returns the reciprocal square-root of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet prsqrt(const Packet& a) { + typedef typename internal::unpacket_traits::type Scalar; + return pdiv(pset1(Scalar(1)), psqrt(a)); +} + +/** \internal \returns the rounded value of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pround(const Packet& a) { using numext::round; return round(a); } + +/** \internal \returns the floor of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pfloor(const Packet& a) { using numext::floor; return floor(a); } + +/** \internal \returns the rounded value of \a a (coeff-wise) with current + * rounding mode */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet print(const Packet& a) { using numext::rint; return rint(a); } + +/** \internal \returns the ceil of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pceil(const Packet& a) { using numext::ceil; return ceil(a); } + +/** \internal \returns the first element of a packet */ +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type +pfirst(const Packet& a) +{ return a; } + +/** \internal \returns the sum of the elements of upper and lower half of \a a if \a a is larger than 4. + * For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7} + * For packet-size smaller or equal to 4, this boils down to a noop. + */ +template +EIGEN_DEVICE_FUNC inline typename conditional<(unpacket_traits::size%8)==0,typename unpacket_traits::half,Packet>::type +predux_half_dowto4(const Packet& a) +{ return a; } + +// Slow generic implementation of Packet reduction. +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type +predux_helper(const Packet& a, Op op) { + typedef typename unpacket_traits::type Scalar; + const size_t n = unpacket_traits::size; + EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) Scalar elements[n]; + pstoreu(elements, a); + for(size_t k = n / 2; k > 0; k /= 2) { + for(size_t i = 0; i < k; ++i) { + elements[i] = op(elements[i], elements[i + k]); + } + } + return elements[0]; +} + +/** \internal \returns the sum of the elements of \a a*/ +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type +predux(const Packet& a) +{ + return a; +} + +/** \internal \returns the product of the elements of \a a */ +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_mul( + const Packet& a) { + typedef typename unpacket_traits::type Scalar; + return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmul))); +} + +/** \internal \returns the min of the elements of \a a */ +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_min( + const Packet &a) { + typedef typename unpacket_traits::type Scalar; + return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin))); +} + +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_min( + const Packet& a) { + typedef typename unpacket_traits::type Scalar; + return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin))); +} + +/** \internal \returns the min of the elements of \a a */ +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_max( + const Packet &a) { + typedef typename unpacket_traits::type Scalar; + return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax))); +} + +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_max( + const Packet& a) { + typedef typename unpacket_traits::type Scalar; + return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax))); +} + +#undef EIGEN_BINARY_OP_NAN_PROPAGATION + +/** \internal \returns true if all coeffs of \a a means "true" + * It is supposed to be called on values returned by pcmp_*. + */ +// not needed yet +// template EIGEN_DEVICE_FUNC inline bool predux_all(const Packet& a) +// { return bool(a); } + +/** \internal \returns true if any coeffs of \a a means "true" + * It is supposed to be called on values returned by pcmp_*. + */ +template EIGEN_DEVICE_FUNC inline bool predux_any(const Packet& a) +{ + // Dirty but generic implementation where "true" is assumed to be non 0 and all the sames. + // It is expected that "true" is either: + // - Scalar(1) + // - bits full of ones (NaN for floats), + // - or first bit equals to 1 (1 for ints, smallest denormal for floats). + // For all these cases, taking the sum is just fine, and this boils down to a no-op for scalars. + typedef typename unpacket_traits::type Scalar; + return numext::not_equal_strict(predux(a), Scalar(0)); +} + +/*************************************************************************** +* The following functions might not have to be overwritten for vectorized types +***************************************************************************/ + +/** \internal copy a packet with constant coefficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */ +// NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type) +template +inline void pstore1(typename unpacket_traits::type* to, const typename unpacket_traits::type& a) +{ + pstore(to, pset1(a)); +} + +/** \internal \returns a * b + c (coeff-wise) */ +template EIGEN_DEVICE_FUNC inline Packet +pmadd(const Packet& a, + const Packet& b, + const Packet& c) +{ return padd(pmul(a, b),c); } + +/** \internal \returns a packet version of \a *from. + * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt(const typename unpacket_traits::type* from) +{ + if(Alignment >= unpacket_traits::alignment) + return pload(from); + else + return ploadu(from); +} + +/** \internal copy the packet \a from to \a *to. + * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(Scalar* to, const Packet& from) +{ + if(Alignment >= unpacket_traits::alignment) + pstore(to, from); + else + pstoreu(to, from); +} + +/** \internal \returns a packet version of \a *from. + * Unlike ploadt, ploadt_ro takes advantage of the read-only memory path on the + * hardware if available to speedup the loading of data that won't be modified + * by the current computation. + */ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_ro(const typename unpacket_traits::type* from) +{ + return ploadt(from); +} + +/*************************************************************************** +* Fast complex products (GCC generates a function call which is very slow) +***************************************************************************/ + +// Eigen+CUDA does not support complexes. +#if !defined(EIGEN_GPUCC) + +template<> inline std::complex pmul(const std::complex& a, const std::complex& b) +{ return std::complex(a.real()*b.real() - a.imag()*b.imag(), a.imag()*b.real() + a.real()*b.imag()); } + +template<> inline std::complex pmul(const std::complex& a, const std::complex& b) +{ return std::complex(a.real()*b.real() - a.imag()*b.imag(), a.imag()*b.real() + a.real()*b.imag()); } + +#endif + + +/*************************************************************************** + * PacketBlock, that is a collection of N packets where the number of words + * in the packet is a multiple of N. +***************************************************************************/ +template ::size> struct PacketBlock { + Packet packet[N]; +}; + +template EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& /*kernel*/) { + // Nothing to do in the scalar case, i.e. a 1x1 matrix. +} + +/*************************************************************************** + * Selector, i.e. vector of N boolean values used to select (i.e. blend) + * words from 2 packets. +***************************************************************************/ +template struct Selector { + bool select[N]; +}; + +template EIGEN_DEVICE_FUNC inline Packet +pblend(const Selector::size>& ifPacket, const Packet& thenPacket, const Packet& elsePacket) { + return ifPacket.select[0] ? thenPacket : elsePacket; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GENERIC_PACKET_MATH_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/GlobalFunctions.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/GlobalFunctions.h new file mode 100644 index 000000000..629af94b9 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/GlobalFunctions.h @@ -0,0 +1,194 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010-2016 Gael Guennebaud +// Copyright (C) 2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GLOBAL_FUNCTIONS_H +#define EIGEN_GLOBAL_FUNCTIONS_H + +#ifdef EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR,DOC_OP,DOC_DETAILS) \ + /** \returns an expression of the coefficient-wise DOC_OP of \a x + + DOC_DETAILS + + \sa Math functions, class CwiseUnaryOp + */ \ + template \ + inline const Eigen::CwiseUnaryOp, const Derived> \ + NAME(const Eigen::ArrayBase& x); + +#else + +#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR,DOC_OP,DOC_DETAILS) \ + template \ + inline const Eigen::CwiseUnaryOp, const Derived> \ + (NAME)(const Eigen::ArrayBase& x) { \ + return Eigen::CwiseUnaryOp, const Derived>(x.derived()); \ + } + +#endif // EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME,FUNCTOR) \ + \ + template \ + struct NAME##_retval > \ + { \ + typedef const Eigen::CwiseUnaryOp, const Derived> type; \ + }; \ + template \ + struct NAME##_impl > \ + { \ + static inline typename NAME##_retval >::type run(const Eigen::ArrayBase& x) \ + { \ + return typename NAME##_retval >::type(x.derived()); \ + } \ + }; + +namespace Eigen +{ + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real,scalar_real_op,real part,\sa ArrayBase::real) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag,scalar_imag_op,imaginary part,\sa ArrayBase::imag) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj,scalar_conjugate_op,complex conjugate,\sa ArrayBase::conjugate) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(inverse,scalar_inverse_op,inverse,\sa ArrayBase::inverse) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin,scalar_sin_op,sine,\sa ArrayBase::sin) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos,scalar_cos_op,cosine,\sa ArrayBase::cos) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan,scalar_tan_op,tangent,\sa ArrayBase::tan) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atan,scalar_atan_op,arc-tangent,\sa ArrayBase::atan) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin,scalar_asin_op,arc-sine,\sa ArrayBase::asin) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos,scalar_acos_op,arc-consine,\sa ArrayBase::acos) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sinh,scalar_sinh_op,hyperbolic sine,\sa ArrayBase::sinh) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cosh,scalar_cosh_op,hyperbolic cosine,\sa ArrayBase::cosh) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tanh,scalar_tanh_op,hyperbolic tangent,\sa ArrayBase::tanh) +#if EIGEN_HAS_CXX11_MATH + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asinh,scalar_asinh_op,inverse hyperbolic sine,\sa ArrayBase::asinh) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acosh,scalar_acosh_op,inverse hyperbolic cosine,\sa ArrayBase::acosh) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atanh,scalar_atanh_op,inverse hyperbolic tangent,\sa ArrayBase::atanh) +#endif + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(logistic,scalar_logistic_op,logistic function,\sa ArrayBase::logistic) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(lgamma,scalar_lgamma_op,natural logarithm of the gamma function,\sa ArrayBase::lgamma) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(digamma,scalar_digamma_op,derivative of lgamma,\sa ArrayBase::digamma) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erf,scalar_erf_op,error function,\sa ArrayBase::erf) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erfc,scalar_erfc_op,complement error function,\sa ArrayBase::erfc) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ndtri,scalar_ndtri_op,inverse normal distribution function,\sa ArrayBase::ndtri) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp,scalar_exp_op,exponential,\sa ArrayBase::exp) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(expm1,scalar_expm1_op,exponential of a value minus 1,\sa ArrayBase::expm1) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log,scalar_log_op,natural logarithm,\sa Eigen::log10 DOXCOMMA ArrayBase::log) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log1p,scalar_log1p_op,natural logarithm of 1 plus the value,\sa ArrayBase::log1p) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log10,scalar_log10_op,base 10 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log10) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log2,scalar_log2_op,base 2 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log2) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs,scalar_abs_op,absolute value,\sa ArrayBase::abs DOXCOMMA MatrixBase::cwiseAbs) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs2,scalar_abs2_op,squared absolute value,\sa ArrayBase::abs2 DOXCOMMA MatrixBase::cwiseAbs2) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(arg,scalar_arg_op,complex argument,\sa ArrayBase::arg DOXCOMMA MatrixBase::cwiseArg) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt,scalar_sqrt_op,square root,\sa ArrayBase::sqrt DOXCOMMA MatrixBase::cwiseSqrt) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rsqrt,scalar_rsqrt_op,reciprocal square root,\sa ArrayBase::rsqrt) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(square,scalar_square_op,square (power 2),\sa Eigen::abs2 DOXCOMMA Eigen::pow DOXCOMMA ArrayBase::square) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cube,scalar_cube_op,cube (power 3),\sa Eigen::pow DOXCOMMA ArrayBase::cube) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rint,scalar_rint_op,nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(round,scalar_round_op,nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(floor,scalar_floor_op,nearest integer not greater than the giben value,\sa Eigen::ceil DOXCOMMA ArrayBase::floor) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ceil,scalar_ceil_op,nearest integer not less than the giben value,\sa Eigen::floor DOXCOMMA ArrayBase::ceil) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isnan,scalar_isnan_op,not-a-number test,\sa Eigen::isinf DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isnan) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isinf,scalar_isinf_op,infinite value test,\sa Eigen::isnan DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isinf) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isfinite,scalar_isfinite_op,finite value test,\sa Eigen::isinf DOXCOMMA Eigen::isnan DOXCOMMA ArrayBase::isfinite) + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sign,scalar_sign_op,sign (or 0),\sa ArrayBase::sign) + + /** \returns an expression of the coefficient-wise power of \a x to the given constant \a exponent. + * + * \tparam ScalarExponent is the scalar type of \a exponent. It must be compatible with the scalar type of the given expression (\c Derived::Scalar). + * + * \sa ArrayBase::pow() + * + * \relates ArrayBase + */ +#ifdef EIGEN_PARSED_BY_DOXYGEN + template + inline const CwiseBinaryOp,Derived,Constant > + pow(const Eigen::ArrayBase& x, const ScalarExponent& exponent); +#else + template + EIGEN_DEVICE_FUNC inline + EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE( + const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg::type,pow)) + pow(const Eigen::ArrayBase& x, const ScalarExponent& exponent) + { + typedef typename internal::promote_scalar_arg::type PromotedExponent; + return EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,PromotedExponent,pow)(x.derived(), + typename internal::plain_constant_type::type(x.derived().rows(), x.derived().cols(), internal::scalar_constant_op(exponent))); + } +#endif + + /** \returns an expression of the coefficient-wise power of \a x to the given array of \a exponents. + * + * This function computes the coefficient-wise power. + * + * Example: \include Cwise_array_power_array.cpp + * Output: \verbinclude Cwise_array_power_array.out + * + * \sa ArrayBase::pow() + * + * \relates ArrayBase + */ + template + inline const Eigen::CwiseBinaryOp, const Derived, const ExponentDerived> + pow(const Eigen::ArrayBase& x, const Eigen::ArrayBase& exponents) + { + return Eigen::CwiseBinaryOp, const Derived, const ExponentDerived>( + x.derived(), + exponents.derived() + ); + } + + /** \returns an expression of the coefficient-wise power of the scalar \a x to the given array of \a exponents. + * + * This function computes the coefficient-wise power between a scalar and an array of exponents. + * + * \tparam Scalar is the scalar type of \a x. It must be compatible with the scalar type of the given array expression (\c Derived::Scalar). + * + * Example: \include Cwise_scalar_power_array.cpp + * Output: \verbinclude Cwise_scalar_power_array.out + * + * \sa ArrayBase::pow() + * + * \relates ArrayBase + */ +#ifdef EIGEN_PARSED_BY_DOXYGEN + template + inline const CwiseBinaryOp,Constant,Derived> + pow(const Scalar& x,const Eigen::ArrayBase& x); +#else + template + EIGEN_DEVICE_FUNC inline + EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE( + const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg::type,Derived,pow)) + pow(const Scalar& x, const Eigen::ArrayBase& exponents) { + typedef typename internal::promote_scalar_arg::type PromotedScalar; + return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedScalar,Derived,pow)( + typename internal::plain_constant_type::type(exponents.derived().rows(), exponents.derived().cols(), internal::scalar_constant_op(x)), exponents.derived()); + } +#endif + + + namespace internal + { + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op) + } +} + +// TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random, internal::isApprox...) + +#endif // EIGEN_GLOBAL_FUNCTIONS_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/IO.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/IO.h new file mode 100644 index 000000000..e81c31521 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/IO.h @@ -0,0 +1,258 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_IO_H +#define EIGEN_IO_H + +namespace Eigen { + +enum { DontAlignCols = 1 }; +enum { StreamPrecision = -1, + FullPrecision = -2 }; + +namespace internal { +template +std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt); +} + +/** \class IOFormat + * \ingroup Core_Module + * + * \brief Stores a set of parameters controlling the way matrices are printed + * + * List of available parameters: + * - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c FullPrecision. + * The default is the special value \c StreamPrecision which means to use the + * stream's own precision setting, as set for instance using \c cout.precision(3). The other special value + * \c FullPrecision means that the number of digits will be computed to match the full precision of each floating-point + * type. + * - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c DontAlignCols which + * allows to disable the alignment of columns, resulting in faster code. + * - \b coeffSeparator string printed between two coefficients of the same row + * - \b rowSeparator string printed between two rows + * - \b rowPrefix string printed at the beginning of each row + * - \b rowSuffix string printed at the end of each row + * - \b matPrefix string printed at the beginning of the matrix + * - \b matSuffix string printed at the end of the matrix + * - \b fill character printed to fill the empty space in aligned columns + * + * Example: \include IOFormat.cpp + * Output: \verbinclude IOFormat.out + * + * \sa DenseBase::format(), class WithFormat + */ +struct IOFormat +{ + /** Default constructor, see class IOFormat for the meaning of the parameters */ + IOFormat(int _precision = StreamPrecision, int _flags = 0, + const std::string& _coeffSeparator = " ", + const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="", + const std::string& _matPrefix="", const std::string& _matSuffix="", const char _fill=' ') + : matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator), + rowSpacer(""), coeffSeparator(_coeffSeparator), fill(_fill), precision(_precision), flags(_flags) + { + // TODO check if rowPrefix, rowSuffix or rowSeparator contains a newline + // don't add rowSpacer if columns are not to be aligned + if((flags & DontAlignCols)) + return; + int i = int(matSuffix.length())-1; + while (i>=0 && matSuffix[i]!='\n') + { + rowSpacer += ' '; + i--; + } + } + std::string matPrefix, matSuffix; + std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer; + std::string coeffSeparator; + char fill; + int precision; + int flags; +}; + +/** \class WithFormat + * \ingroup Core_Module + * + * \brief Pseudo expression providing matrix output with given format + * + * \tparam ExpressionType the type of the object on which IO stream operations are performed + * + * This class represents an expression with stream operators controlled by a given IOFormat. + * It is the return type of DenseBase::format() + * and most of the time this is the only way it is used. + * + * See class IOFormat for some examples. + * + * \sa DenseBase::format(), class IOFormat + */ +template +class WithFormat +{ + public: + + WithFormat(const ExpressionType& matrix, const IOFormat& format) + : m_matrix(matrix), m_format(format) + {} + + friend std::ostream & operator << (std::ostream & s, const WithFormat& wf) + { + return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format); + } + + protected: + typename ExpressionType::Nested m_matrix; + IOFormat m_format; +}; + +namespace internal { + +// NOTE: This helper is kept for backward compatibility with previous code specializing +// this internal::significant_decimals_impl structure. In the future we should directly +// call digits10() which has been introduced in July 2016 in 3.3. +template +struct significant_decimals_impl +{ + static inline int run() + { + return NumTraits::digits10(); + } +}; + +/** \internal + * print the matrix \a _m to the output stream \a s using the output format \a fmt */ +template +std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt) +{ + using internal::is_same; + using internal::conditional; + + if(_m.size() == 0) + { + s << fmt.matPrefix << fmt.matSuffix; + return s; + } + + typename Derived::Nested m = _m; + typedef typename Derived::Scalar Scalar; + typedef typename + conditional< + is_same::value || + is_same::value || + is_same::value || + is_same::value, + int, + typename conditional< + is_same >::value || + is_same >::value || + is_same >::value || + is_same >::value, + std::complex, + const Scalar& + >::type + >::type PrintType; + + Index width = 0; + + std::streamsize explicit_precision; + if(fmt.precision == StreamPrecision) + { + explicit_precision = 0; + } + else if(fmt.precision == FullPrecision) + { + if (NumTraits::IsInteger) + { + explicit_precision = 0; + } + else + { + explicit_precision = significant_decimals_impl::run(); + } + } + else + { + explicit_precision = fmt.precision; + } + + std::streamsize old_precision = 0; + if(explicit_precision) old_precision = s.precision(explicit_precision); + + bool align_cols = !(fmt.flags & DontAlignCols); + if(align_cols) + { + // compute the largest width + for(Index j = 0; j < m.cols(); ++j) + for(Index i = 0; i < m.rows(); ++i) + { + std::stringstream sstr; + sstr.copyfmt(s); + sstr << static_cast(m.coeff(i,j)); + width = std::max(width, Index(sstr.str().length())); + } + } + std::streamsize old_width = s.width(); + char old_fill_character = s.fill(); + s << fmt.matPrefix; + for(Index i = 0; i < m.rows(); ++i) + { + if (i) + s << fmt.rowSpacer; + s << fmt.rowPrefix; + if(width) { + s.fill(fmt.fill); + s.width(width); + } + s << static_cast(m.coeff(i, 0)); + for(Index j = 1; j < m.cols(); ++j) + { + s << fmt.coeffSeparator; + if(width) { + s.fill(fmt.fill); + s.width(width); + } + s << static_cast(m.coeff(i, j)); + } + s << fmt.rowSuffix; + if( i < m.rows() - 1) + s << fmt.rowSeparator; + } + s << fmt.matSuffix; + if(explicit_precision) s.precision(old_precision); + if(width) { + s.fill(old_fill_character); + s.width(old_width); + } + return s; +} + +} // end namespace internal + +/** \relates DenseBase + * + * Outputs the matrix, to the given stream. + * + * If you wish to print the matrix with a format different than the default, use DenseBase::format(). + * + * It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers. + * If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default parameters. + * + * \sa DenseBase::format() + */ +template +std::ostream & operator << +(std::ostream & s, + const DenseBase & m) +{ + return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT); +} + +} // end namespace Eigen + +#endif // EIGEN_IO_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/IndexedView.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/IndexedView.h new file mode 100644 index 000000000..08476251d --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/IndexedView.h @@ -0,0 +1,237 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INDEXED_VIEW_H +#define EIGEN_INDEXED_VIEW_H + +namespace Eigen { + +namespace internal { + +template +struct traits > + : traits +{ + enum { + RowsAtCompileTime = int(array_size::value), + ColsAtCompileTime = int(array_size::value), + MaxRowsAtCompileTime = RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) : Dynamic, + MaxColsAtCompileTime = ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) : Dynamic, + + XprTypeIsRowMajor = (int(traits::Flags)&RowMajorBit) != 0, + IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 + : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 + : XprTypeIsRowMajor, + + RowIncr = int(get_compile_time_incr::value), + ColIncr = int(get_compile_time_incr::value), + InnerIncr = IsRowMajor ? ColIncr : RowIncr, + OuterIncr = IsRowMajor ? RowIncr : ColIncr, + + HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), + XprInnerStride = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time::ret) : int(outer_stride_at_compile_time::ret), + XprOuterstride = HasSameStorageOrderAsXprType ? int(outer_stride_at_compile_time::ret) : int(inner_stride_at_compile_time::ret), + + InnerSize = XprTypeIsRowMajor ? ColsAtCompileTime : RowsAtCompileTime, + IsBlockAlike = InnerIncr==1 && OuterIncr==1, + IsInnerPannel = HasSameStorageOrderAsXprType && is_same,typename conditional::type>::value, + + InnerStrideAtCompileTime = InnerIncr<0 || InnerIncr==DynamicIndex || XprInnerStride==Dynamic ? Dynamic : XprInnerStride * InnerIncr, + OuterStrideAtCompileTime = OuterIncr<0 || OuterIncr==DynamicIndex || XprOuterstride==Dynamic ? Dynamic : XprOuterstride * OuterIncr, + + ReturnAsScalar = is_same::value && is_same::value, + ReturnAsBlock = (!ReturnAsScalar) && IsBlockAlike, + ReturnAsIndexedView = (!ReturnAsScalar) && (!ReturnAsBlock), + + // FIXME we deal with compile-time strides if and only if we have DirectAccessBit flag, + // but this is too strict regarding negative strides... + DirectAccessMask = (int(InnerIncr)!=UndefinedIncr && int(OuterIncr)!=UndefinedIncr && InnerIncr>=0 && OuterIncr>=0) ? DirectAccessBit : 0, + FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, + Flags = (traits::Flags & (HereditaryBits | DirectAccessMask )) | FlagsLvalueBit | FlagsRowMajorBit | FlagsLinearAccessBit + }; + + typedef Block BlockType; +}; + +} + +template +class IndexedViewImpl; + + +/** \class IndexedView + * \ingroup Core_Module + * + * \brief Expression of a non-sequential sub-matrix defined by arbitrary sequences of row and column indices + * + * \tparam XprType the type of the expression in which we are taking the intersections of sub-rows and sub-columns + * \tparam RowIndices the type of the object defining the sequence of row indices + * \tparam ColIndices the type of the object defining the sequence of column indices + * + * This class represents an expression of a sub-matrix (or sub-vector) defined as the intersection + * of sub-sets of rows and columns, that are themself defined by generic sequences of row indices \f$ \{r_0,r_1,..r_{m-1}\} \f$ + * and column indices \f$ \{c_0,c_1,..c_{n-1} \}\f$. Let \f$ A \f$ be the nested matrix, then the resulting matrix \f$ B \f$ has \c m + * rows and \c n columns, and its entries are given by: \f$ B(i,j) = A(r_i,c_j) \f$. + * + * The \c RowIndices and \c ColIndices types must be compatible with the following API: + * \code + * operator[](Index) const; + * Index size() const; + * \endcode + * + * Typical supported types thus include: + * - std::vector + * - std::valarray + * - std::array + * - Plain C arrays: int[N] + * - Eigen::ArrayXi + * - decltype(ArrayXi::LinSpaced(...)) + * - Any view/expressions of the previous types + * - Eigen::ArithmeticSequence + * - Eigen::internal::AllRange (helper for Eigen::all) + * - Eigen::internal::SingleRange (helper for single index) + * - etc. + * + * In typical usages of %Eigen, this class should never be used directly. It is the return type of + * DenseBase::operator()(const RowIndices&, const ColIndices&). + * + * \sa class Block + */ +template +class IndexedView : public IndexedViewImpl::StorageKind> +{ +public: + typedef typename IndexedViewImpl::StorageKind>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(IndexedView) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(IndexedView) + + typedef typename internal::ref_selector::non_const_type MatrixTypeNested; + typedef typename internal::remove_all::type NestedExpression; + + template + IndexedView(XprType& xpr, const T0& rowIndices, const T1& colIndices) + : m_xpr(xpr), m_rowIndices(rowIndices), m_colIndices(colIndices) + {} + + /** \returns number of rows */ + Index rows() const { return internal::size(m_rowIndices); } + + /** \returns number of columns */ + Index cols() const { return internal::size(m_colIndices); } + + /** \returns the nested expression */ + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + /** \returns the nested expression */ + typename internal::remove_reference::type& + nestedExpression() { return m_xpr; } + + /** \returns a const reference to the object storing/generating the row indices */ + const RowIndices& rowIndices() const { return m_rowIndices; } + + /** \returns a const reference to the object storing/generating the column indices */ + const ColIndices& colIndices() const { return m_colIndices; } + +protected: + MatrixTypeNested m_xpr; + RowIndices m_rowIndices; + ColIndices m_colIndices; +}; + + +// Generic API dispatcher +template +class IndexedViewImpl + : public internal::generic_xpr_base >::type +{ +public: + typedef typename internal::generic_xpr_base >::type Base; +}; + +namespace internal { + + +template +struct unary_evaluator, IndexBased> + : evaluator_base > +{ + typedef IndexedView XprType; + + enum { + CoeffReadCost = evaluator::CoeffReadCost /* TODO + cost of row/col index */, + + FlagsLinearAccessBit = (traits::RowsAtCompileTime == 1 || traits::ColsAtCompileTime == 1) ? LinearAccessBit : 0, + + FlagsRowMajorBit = traits::FlagsRowMajorBit, + + Flags = (evaluator::Flags & (HereditaryBits & ~RowMajorBit /*| LinearAccessBit | DirectAccessBit*/)) | FlagsLinearAccessBit | FlagsRowMajorBit, + + Alignment = 0 + }; + + EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_xpr(xpr) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeff(Index row, Index col) const + { + return m_argImpl.coeff(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index row, Index col) + { + return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Scalar& coeffRef(Index index) + { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + Index row = XprType::RowsAtCompileTime == 1 ? 0 : index; + Index col = XprType::RowsAtCompileTime == 1 ? index : 0; + return m_argImpl.coeffRef( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Scalar& coeffRef(Index index) const + { + Index row = XprType::RowsAtCompileTime == 1 ? 0 : index; + Index col = XprType::RowsAtCompileTime == 1 ? index : 0; + return m_argImpl.coeffRef( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const CoeffReturnType coeff(Index index) const + { + Index row = XprType::RowsAtCompileTime == 1 ? 0 : index; + Index col = XprType::RowsAtCompileTime == 1 ? index : 0; + return m_argImpl.coeff( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); + } + +protected: + + evaluator m_argImpl; + const XprType& m_xpr; + +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_INDEXED_VIEW_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Inverse.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Inverse.h new file mode 100644 index 000000000..c514438c4 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Inverse.h @@ -0,0 +1,117 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014-2019 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INVERSE_H +#define EIGEN_INVERSE_H + +namespace Eigen { + +template class InverseImpl; + +namespace internal { + +template +struct traits > + : traits +{ + typedef typename XprType::PlainObject PlainObject; + typedef traits BaseTraits; + enum { + Flags = BaseTraits::Flags & RowMajorBit + }; +}; + +} // end namespace internal + +/** \class Inverse + * + * \brief Expression of the inverse of another expression + * + * \tparam XprType the type of the expression we are taking the inverse + * + * This class represents an abstract expression of A.inverse() + * and most of the time this is the only way it is used. + * + */ +template +class Inverse : public InverseImpl::StorageKind> +{ +public: + typedef typename XprType::StorageIndex StorageIndex; + typedef typename XprType::Scalar Scalar; + typedef typename internal::ref_selector::type XprTypeNested; + typedef typename internal::remove_all::type XprTypeNestedCleaned; + typedef typename internal::ref_selector::type Nested; + typedef typename internal::remove_all::type NestedExpression; + + explicit EIGEN_DEVICE_FUNC Inverse(const XprType &xpr) + : m_xpr(xpr) + {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_xpr.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_xpr.rows(); } + + EIGEN_DEVICE_FUNC const XprTypeNestedCleaned& nestedExpression() const { return m_xpr; } + +protected: + XprTypeNested m_xpr; +}; + +// Generic API dispatcher +template +class InverseImpl + : public internal::generic_xpr_base >::type +{ +public: + typedef typename internal::generic_xpr_base >::type Base; + typedef typename XprType::Scalar Scalar; +private: + + Scalar coeff(Index row, Index col) const; + Scalar coeff(Index i) const; +}; + +namespace internal { + +/** \internal + * \brief Default evaluator for Inverse expression. + * + * This default evaluator for Inverse expression simply evaluate the inverse into a temporary + * by a call to internal::call_assignment_no_alias. + * Therefore, inverse implementers only have to specialize Assignment, ...> for + * there own nested expression. + * + * \sa class Inverse + */ +template +struct unary_evaluator > + : public evaluator::PlainObject> +{ + typedef Inverse InverseType; + typedef typename InverseType::PlainObject PlainObject; + typedef evaluator Base; + + enum { Flags = Base::Flags | EvalBeforeNestingBit }; + + unary_evaluator(const InverseType& inv_xpr) + : m_result(inv_xpr.rows(), inv_xpr.cols()) + { + ::new (static_cast(this)) Base(m_result); + internal::call_assignment_no_alias(m_result, inv_xpr); + } + +protected: + PlainObject m_result; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_INVERSE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Map.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Map.h new file mode 100644 index 000000000..218cc157f --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Map.h @@ -0,0 +1,171 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MAP_H +#define EIGEN_MAP_H + +namespace Eigen { + +namespace internal { +template +struct traits > + : public traits +{ + typedef traits TraitsBase; + enum { + PlainObjectTypeInnerSize = ((traits::Flags&RowMajorBit)==RowMajorBit) + ? PlainObjectType::ColsAtCompileTime + : PlainObjectType::RowsAtCompileTime, + + InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 + ? int(PlainObjectType::InnerStrideAtCompileTime) + : int(StrideType::InnerStrideAtCompileTime), + OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 + ? (InnerStrideAtCompileTime==Dynamic || PlainObjectTypeInnerSize==Dynamic + ? Dynamic + : int(InnerStrideAtCompileTime) * int(PlainObjectTypeInnerSize)) + : int(StrideType::OuterStrideAtCompileTime), + Alignment = int(MapOptions)&int(AlignedMask), + Flags0 = TraitsBase::Flags & (~NestByRefBit), + Flags = is_lvalue::value ? int(Flags0) : (int(Flags0) & ~LvalueBit) + }; +private: + enum { Options }; // Expressions don't have Options +}; +} + +/** \class Map + * \ingroup Core_Module + * + * \brief A matrix or vector expression mapping an existing array of data. + * + * \tparam PlainObjectType the equivalent matrix type of the mapped data + * \tparam MapOptions specifies the pointer alignment in bytes. It can be: \c #Aligned128, \c #Aligned64, \c #Aligned32, \c #Aligned16, \c #Aligned8 or \c #Unaligned. + * The default is \c #Unaligned. + * \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout + * of an ordinary, contiguous array. This can be overridden by specifying strides. + * The type passed here must be a specialization of the Stride template, see examples below. + * + * This class represents a matrix or vector expression mapping an existing array of data. + * It can be used to let Eigen interface without any overhead with non-Eigen data structures, + * such as plain C arrays or structures from other libraries. By default, it assumes that the + * data is laid out contiguously in memory. You can however override this by explicitly specifying + * inner and outer strides. + * + * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix: + * \include Map_simple.cpp + * Output: \verbinclude Map_simple.out + * + * If you need to map non-contiguous arrays, you can do so by specifying strides: + * + * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer + * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time + * fixed value. + * \include Map_inner_stride.cpp + * Output: \verbinclude Map_inner_stride.out + * + * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping + * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns. + * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is + * a short version of \c OuterStride because the default template parameter of OuterStride + * is \c Dynamic + * \include Map_outer_stride.cpp + * Output: \verbinclude Map_outer_stride.out + * + * For more details and for an example of specifying both an inner and an outer stride, see class Stride. + * + * \b Tip: to change the array of data mapped by a Map object, you can use the C++ + * placement new syntax: + * + * Example: \include Map_placement_new.cpp + * Output: \verbinclude Map_placement_new.out + * + * This class is the return type of PlainObjectBase::Map() but can also be used directly. + * + * \sa PlainObjectBase::Map(), \ref TopicStorageOrders + */ +template class Map + : public MapBase > +{ + public: + + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Map) + + typedef typename Base::PointerType PointerType; + typedef PointerType PointerArgType; + EIGEN_DEVICE_FUNC + inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index innerStride() const + { + return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index outerStride() const + { + return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() + : internal::traits::OuterStrideAtCompileTime != Dynamic ? Index(internal::traits::OuterStrideAtCompileTime) + : IsVectorAtCompileTime ? (this->size() * innerStride()) + : int(Flags)&RowMajorBit ? (this->cols() * innerStride()) + : (this->rows() * innerStride()); + } + + /** Constructor in the fixed-size case. + * + * \param dataPtr pointer to the array to map + * \param stride optional Stride object, passing the strides. + */ + EIGEN_DEVICE_FUNC + explicit inline Map(PointerArgType dataPtr, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(dataPtr)), m_stride(stride) + { + PlainObjectType::Base::_check_template_params(); + } + + /** Constructor in the dynamic-size vector case. + * + * \param dataPtr pointer to the array to map + * \param size the size of the vector expression + * \param stride optional Stride object, passing the strides. + */ + EIGEN_DEVICE_FUNC + inline Map(PointerArgType dataPtr, Index size, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(dataPtr), size), m_stride(stride) + { + PlainObjectType::Base::_check_template_params(); + } + + /** Constructor in the dynamic-size matrix case. + * + * \param dataPtr pointer to the array to map + * \param rows the number of rows of the matrix expression + * \param cols the number of columns of the matrix expression + * \param stride optional Stride object, passing the strides. + */ + EIGEN_DEVICE_FUNC + inline Map(PointerArgType dataPtr, Index rows, Index cols, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(dataPtr), rows, cols), m_stride(stride) + { + PlainObjectType::Base::_check_template_params(); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) + + protected: + StrideType m_stride; +}; + + +} // end namespace Eigen + +#endif // EIGEN_MAP_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MapBase.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MapBase.h new file mode 100644 index 000000000..d856447f0 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MapBase.h @@ -0,0 +1,310 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MAPBASE_H +#define EIGEN_MAPBASE_H + +#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \ + EIGEN_STATIC_ASSERT((int(internal::evaluator::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \ + YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT) + +namespace Eigen { + +/** \ingroup Core_Module + * + * \brief Base class for dense Map and Block expression with direct access + * + * This base class provides the const low-level accessors (e.g. coeff, coeffRef) of dense + * Map and Block objects with direct access. + * Typical users do not have to directly deal with this class. + * + * This class can be extended by through the macro plugin \c EIGEN_MAPBASE_PLUGIN. + * See \link TopicCustomizing_Plugins customizing Eigen \endlink for details. + * + * The \c Derived class has to provide the following two methods describing the memory layout: + * \code Index innerStride() const; \endcode + * \code Index outerStride() const; \endcode + * + * \sa class Map, class Block + */ +template class MapBase + : public internal::dense_xpr_base::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + enum { + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + InnerStrideAtCompileTime = internal::traits::InnerStrideAtCompileTime, + SizeAtCompileTime = Base::SizeAtCompileTime + }; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + typedef typename internal::conditional< + bool(internal::is_lvalue::value), + Scalar *, + const Scalar *>::type + PointerType; + + using Base::derived; +// using Base::RowsAtCompileTime; +// using Base::ColsAtCompileTime; +// using Base::SizeAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::IsVectorAtCompileTime; + using Base::Flags; + using Base::IsRowMajor; + + using Base::rows; + using Base::cols; + using Base::size; + using Base::coeff; + using Base::coeffRef; + using Base::lazyAssign; + using Base::eval; + + using Base::innerStride; + using Base::outerStride; + using Base::rowStride; + using Base::colStride; + + // bug 217 - compile error on ICC 11.1 + using Base::operator=; + + typedef typename Base::CoeffReturnType CoeffReturnType; + + /** \copydoc DenseBase::rows() */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index rows() const EIGEN_NOEXCEPT { return m_rows.value(); } + /** \copydoc DenseBase::cols() */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index cols() const EIGEN_NOEXCEPT { return m_cols.value(); } + + /** Returns a pointer to the first coefficient of the matrix or vector. + * + * \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride(). + * + * \sa innerStride(), outerStride() + */ + EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_data; } + + /** \copydoc PlainObjectBase::coeff(Index,Index) const */ + EIGEN_DEVICE_FUNC + inline const Scalar& coeff(Index rowId, Index colId) const + { + return m_data[colId * colStride() + rowId * rowStride()]; + } + + /** \copydoc PlainObjectBase::coeff(Index) const */ + EIGEN_DEVICE_FUNC + inline const Scalar& coeff(Index index) const + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return m_data[index * innerStride()]; + } + + /** \copydoc PlainObjectBase::coeffRef(Index,Index) const */ + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index rowId, Index colId) const + { + return this->m_data[colId * colStride() + rowId * rowStride()]; + } + + /** \copydoc PlainObjectBase::coeffRef(Index) const */ + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index index) const + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return this->m_data[index * innerStride()]; + } + + /** \internal */ + template + inline PacketScalar packet(Index rowId, Index colId) const + { + return internal::ploadt + (m_data + (colId * colStride() + rowId * rowStride())); + } + + /** \internal */ + template + inline PacketScalar packet(Index index) const + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return internal::ploadt(m_data + index * innerStride()); + } + + /** \internal Constructor for fixed size matrices or vectors */ + EIGEN_DEVICE_FUNC + explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) + { + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + checkSanity(); + } + + /** \internal Constructor for dynamically sized vectors */ + EIGEN_DEVICE_FUNC + inline MapBase(PointerType dataPtr, Index vecSize) + : m_data(dataPtr), + m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)), + m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime)) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + eigen_assert(vecSize >= 0); + eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize); + checkSanity(); + } + + /** \internal Constructor for dynamically sized matrices */ + EIGEN_DEVICE_FUNC + inline MapBase(PointerType dataPtr, Index rows, Index cols) + : m_data(dataPtr), m_rows(rows), m_cols(cols) + { + eigen_assert( (dataPtr == 0) + || ( rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) + && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols))); + checkSanity(); + } + + #ifdef EIGEN_MAPBASE_PLUGIN + #include EIGEN_MAPBASE_PLUGIN + #endif + + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase) + + template + EIGEN_DEVICE_FUNC + void checkSanity(typename internal::enable_if<(internal::traits::Alignment>0),void*>::type = 0) const + { +#if EIGEN_MAX_ALIGN_BYTES>0 + // innerStride() is not set yet when this function is called, so we optimistically assume the lowest plausible value: + const Index minInnerStride = InnerStrideAtCompileTime == Dynamic ? 1 : Index(InnerStrideAtCompileTime); + EIGEN_ONLY_USED_FOR_DEBUG(minInnerStride); + eigen_assert(( ((internal::UIntPtr(m_data) % internal::traits::Alignment) == 0) + || (cols() * rows() * minInnerStride * sizeof(Scalar)) < internal::traits::Alignment ) && "data is not aligned"); +#endif + } + + template + EIGEN_DEVICE_FUNC + void checkSanity(typename internal::enable_if::Alignment==0,void*>::type = 0) const + {} + + PointerType m_data; + const internal::variable_if_dynamic m_rows; + const internal::variable_if_dynamic m_cols; +}; + +/** \ingroup Core_Module + * + * \brief Base class for non-const dense Map and Block expression with direct access + * + * This base class provides the non-const low-level accessors (e.g. coeff and coeffRef) of + * dense Map and Block objects with direct access. + * It inherits MapBase which defines the const variant for reading specific entries. + * + * \sa class Map, class Block + */ +template class MapBase + : public MapBase +{ + typedef MapBase ReadOnlyMapBase; + public: + + typedef MapBase Base; + + typedef typename Base::Scalar Scalar; + typedef typename Base::PacketScalar PacketScalar; + typedef typename Base::StorageIndex StorageIndex; + typedef typename Base::PointerType PointerType; + + using Base::derived; + using Base::rows; + using Base::cols; + using Base::size; + using Base::coeff; + using Base::coeffRef; + + using Base::innerStride; + using Base::outerStride; + using Base::rowStride; + using Base::colStride; + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + EIGEN_DEVICE_FUNC + inline const Scalar* data() const { return this->m_data; } + EIGEN_DEVICE_FUNC + inline ScalarWithConstIfNotLvalue* data() { return this->m_data; } // no const-cast here so non-const-correct code will give a compile error + + EIGEN_DEVICE_FUNC + inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col) + { + return this->m_data[col * colStride() + row * rowStride()]; + } + + EIGEN_DEVICE_FUNC + inline ScalarWithConstIfNotLvalue& coeffRef(Index index) + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return this->m_data[index * innerStride()]; + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& val) + { + internal::pstoret + (this->m_data + (col * colStride() + row * rowStride()), val); + } + + template + inline void writePacket(Index index, const PacketScalar& val) + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + internal::pstoret + (this->m_data + index * innerStride(), val); + } + + EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {} + EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {} + EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols) : Base(dataPtr, rows, cols) {} + + EIGEN_DEVICE_FUNC + Derived& operator=(const MapBase& other) + { + ReadOnlyMapBase::Base::operator=(other); + return derived(); + } + + // In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base, + // see bugs 821 and 920. + using ReadOnlyMapBase::Base::operator=; + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase) +}; + +#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS + +} // end namespace Eigen + +#endif // EIGEN_MAPBASE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MathFunctions.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MathFunctions.h new file mode 100644 index 000000000..61b78f4f2 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MathFunctions.h @@ -0,0 +1,2057 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATHFUNCTIONS_H +#define EIGEN_MATHFUNCTIONS_H + +// TODO this should better be moved to NumTraits +// Source: WolframAlpha +#define EIGEN_PI 3.141592653589793238462643383279502884197169399375105820974944592307816406L +#define EIGEN_LOG2E 1.442695040888963407359924681001892137426645954152985934135449406931109219L +#define EIGEN_LN2 0.693147180559945309417232121458176568075500134360255254120680009493393621L + +namespace Eigen { + +// On WINCE, std::abs is defined for int only, so let's defined our own overloads: +// This issue has been confirmed with MSVC 2008 only, but the issue might exist for more recent versions too. +#if EIGEN_OS_WINCE && EIGEN_COMP_MSVC && EIGEN_COMP_MSVC<=1500 +long abs(long x) { return (labs(x)); } +double abs(double x) { return (fabs(x)); } +float abs(float x) { return (fabsf(x)); } +long double abs(long double x) { return (fabsl(x)); } +#endif + +namespace internal { + +/** \internal \class global_math_functions_filtering_base + * + * What it does: + * Defines a typedef 'type' as follows: + * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then + * global_math_functions_filtering_base::type is a typedef for it. + * - otherwise, global_math_functions_filtering_base::type is a typedef for T. + * + * How it's used: + * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions. + * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know + * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase. + * So we must make sure to use sin_impl > and not sin_impl, otherwise our partial specialization + * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it. + * + * How it's implemented: + * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace + * the typename dummy by an integer template parameter, it doesn't work anymore! + */ + +template +struct global_math_functions_filtering_base +{ + typedef T type; +}; + +template struct always_void { typedef void type; }; + +template +struct global_math_functions_filtering_base + ::type + > +{ + typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type; +}; + +#define EIGEN_MATHFUNC_IMPL(func, scalar) Eigen::internal::func##_impl::type> +#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename Eigen::internal::func##_retval::type>::type + +/**************************************************************************** +* Implementation of real * +****************************************************************************/ + +template::IsComplex> +struct real_default_impl +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + return x; + } +}; + +template +struct real_default_impl +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + using std::real; + return real(x); + } +}; + +template struct real_impl : real_default_impl {}; + +#if defined(EIGEN_GPU_COMPILE_PHASE) +template +struct real_impl > +{ + typedef T RealScalar; + EIGEN_DEVICE_FUNC + static inline T run(const std::complex& x) + { + return x.real(); + } +}; +#endif + +template +struct real_retval +{ + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** +* Implementation of imag * +****************************************************************************/ + +template::IsComplex> +struct imag_default_impl +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar&) + { + return RealScalar(0); + } +}; + +template +struct imag_default_impl +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + using std::imag; + return imag(x); + } +}; + +template struct imag_impl : imag_default_impl {}; + +#if defined(EIGEN_GPU_COMPILE_PHASE) +template +struct imag_impl > +{ + typedef T RealScalar; + EIGEN_DEVICE_FUNC + static inline T run(const std::complex& x) + { + return x.imag(); + } +}; +#endif + +template +struct imag_retval +{ + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** +* Implementation of real_ref * +****************************************************************************/ + +template +struct real_ref_impl +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar& run(Scalar& x) + { + return reinterpret_cast(&x)[0]; + } + EIGEN_DEVICE_FUNC + static inline const RealScalar& run(const Scalar& x) + { + return reinterpret_cast(&x)[0]; + } +}; + +template +struct real_ref_retval +{ + typedef typename NumTraits::Real & type; +}; + +/**************************************************************************** +* Implementation of imag_ref * +****************************************************************************/ + +template +struct imag_ref_default_impl +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar& run(Scalar& x) + { + return reinterpret_cast(&x)[1]; + } + EIGEN_DEVICE_FUNC + static inline const RealScalar& run(const Scalar& x) + { + return reinterpret_cast(&x)[1]; + } +}; + +template +struct imag_ref_default_impl +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline Scalar run(Scalar&) + { + return Scalar(0); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline const Scalar run(const Scalar&) + { + return Scalar(0); + } +}; + +template +struct imag_ref_impl : imag_ref_default_impl::IsComplex> {}; + +template +struct imag_ref_retval +{ + typedef typename NumTraits::Real & type; +}; + +/**************************************************************************** +* Implementation of conj * +****************************************************************************/ + +template::IsComplex> +struct conj_default_impl +{ + EIGEN_DEVICE_FUNC + static inline Scalar run(const Scalar& x) + { + return x; + } +}; + +template +struct conj_default_impl +{ + EIGEN_DEVICE_FUNC + static inline Scalar run(const Scalar& x) + { + using std::conj; + return conj(x); + } +}; + +template::IsComplex> +struct conj_impl : conj_default_impl {}; + +template +struct conj_retval +{ + typedef Scalar type; +}; + +/**************************************************************************** +* Implementation of abs2 * +****************************************************************************/ + +template +struct abs2_impl_default +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + return x*x; + } +}; + +template +struct abs2_impl_default // IsComplex +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + return x.real()*x.real() + x.imag()*x.imag(); + } +}; + +template +struct abs2_impl +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + return abs2_impl_default::IsComplex>::run(x); + } +}; + +template +struct abs2_retval +{ + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** +* Implementation of sqrt/rsqrt * +****************************************************************************/ + +template +struct sqrt_impl +{ + EIGEN_DEVICE_FUNC + static EIGEN_ALWAYS_INLINE Scalar run(const Scalar& x) + { + EIGEN_USING_STD(sqrt); + return sqrt(x); + } +}; + +// Complex sqrt defined in MathFunctionsImpl.h. +template EIGEN_DEVICE_FUNC std::complex complex_sqrt(const std::complex& a_x); + +// Custom implementation is faster than `std::sqrt`, works on +// GPU, and correctly handles special cases (unlike MSVC). +template +struct sqrt_impl > +{ + EIGEN_DEVICE_FUNC + static EIGEN_ALWAYS_INLINE std::complex run(const std::complex& x) + { + return complex_sqrt(x); + } +}; + +template +struct sqrt_retval +{ + typedef Scalar type; +}; + +// Default implementation relies on numext::sqrt, at bottom of file. +template +struct rsqrt_impl; + +// Complex rsqrt defined in MathFunctionsImpl.h. +template EIGEN_DEVICE_FUNC std::complex complex_rsqrt(const std::complex& a_x); + +template +struct rsqrt_impl > +{ + EIGEN_DEVICE_FUNC + static EIGEN_ALWAYS_INLINE std::complex run(const std::complex& x) + { + return complex_rsqrt(x); + } +}; + +template +struct rsqrt_retval +{ + typedef Scalar type; +}; + +/**************************************************************************** +* Implementation of norm1 * +****************************************************************************/ + +template +struct norm1_default_impl; + +template +struct norm1_default_impl +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + EIGEN_USING_STD(abs); + return abs(x.real()) + abs(x.imag()); + } +}; + +template +struct norm1_default_impl +{ + EIGEN_DEVICE_FUNC + static inline Scalar run(const Scalar& x) + { + EIGEN_USING_STD(abs); + return abs(x); + } +}; + +template +struct norm1_impl : norm1_default_impl::IsComplex> {}; + +template +struct norm1_retval +{ + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** +* Implementation of hypot * +****************************************************************************/ + +template struct hypot_impl; + +template +struct hypot_retval +{ + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** +* Implementation of cast * +****************************************************************************/ + +template +struct cast_impl +{ + EIGEN_DEVICE_FUNC + static inline NewType run(const OldType& x) + { + return static_cast(x); + } +}; + +// Casting from S -> Complex leads to an implicit conversion from S to T, +// generating warnings on clang. Here we explicitly cast the real component. +template +struct cast_impl::IsComplex && NumTraits::IsComplex + >::type> +{ + EIGEN_DEVICE_FUNC + static inline NewType run(const OldType& x) + { + typedef typename NumTraits::Real NewReal; + return static_cast(static_cast(x)); + } +}; + +// here, for once, we're plainly returning NewType: we don't want cast to do weird things. + +template +EIGEN_DEVICE_FUNC +inline NewType cast(const OldType& x) +{ + return cast_impl::run(x); +} + +/**************************************************************************** +* Implementation of round * +****************************************************************************/ + +template +struct round_impl +{ + EIGEN_DEVICE_FUNC + static inline Scalar run(const Scalar& x) + { + EIGEN_STATIC_ASSERT((!NumTraits::IsComplex), NUMERIC_TYPE_MUST_BE_REAL) +#if EIGEN_HAS_CXX11_MATH + EIGEN_USING_STD(round); +#endif + return Scalar(round(x)); + } +}; + +#if !EIGEN_HAS_CXX11_MATH +#if EIGEN_HAS_C99_MATH +// Use ::roundf for float. +template<> +struct round_impl { + EIGEN_DEVICE_FUNC + static inline float run(const float& x) + { + return ::roundf(x); + } +}; +#else +template +struct round_using_floor_ceil_impl +{ + EIGEN_DEVICE_FUNC + static inline Scalar run(const Scalar& x) + { + EIGEN_STATIC_ASSERT((!NumTraits::IsComplex), NUMERIC_TYPE_MUST_BE_REAL) + // Without C99 round/roundf, resort to floor/ceil. + EIGEN_USING_STD(floor); + EIGEN_USING_STD(ceil); + // If not enough precision to resolve a decimal at all, return the input. + // Otherwise, adding 0.5 can trigger an increment by 1. + const Scalar limit = Scalar(1ull << (NumTraits::digits() - 1)); + if (x >= limit || x <= -limit) { + return x; + } + return (x > Scalar(0)) ? Scalar(floor(x + Scalar(0.5))) : Scalar(ceil(x - Scalar(0.5))); + } +}; + +template<> +struct round_impl : round_using_floor_ceil_impl {}; + +template<> +struct round_impl : round_using_floor_ceil_impl {}; +#endif // EIGEN_HAS_C99_MATH +#endif // !EIGEN_HAS_CXX11_MATH + +template +struct round_retval +{ + typedef Scalar type; +}; + +/**************************************************************************** +* Implementation of rint * +****************************************************************************/ + +template +struct rint_impl { + EIGEN_DEVICE_FUNC + static inline Scalar run(const Scalar& x) + { + EIGEN_STATIC_ASSERT((!NumTraits::IsComplex), NUMERIC_TYPE_MUST_BE_REAL) +#if EIGEN_HAS_CXX11_MATH + EIGEN_USING_STD(rint); +#endif + return rint(x); + } +}; + +#if !EIGEN_HAS_CXX11_MATH +template<> +struct rint_impl { + EIGEN_DEVICE_FUNC + static inline double run(const double& x) + { + return ::rint(x); + } +}; +template<> +struct rint_impl { + EIGEN_DEVICE_FUNC + static inline float run(const float& x) + { + return ::rintf(x); + } +}; +#endif + +template +struct rint_retval +{ + typedef Scalar type; +}; + +/**************************************************************************** +* Implementation of arg * +****************************************************************************/ + +// Visual Studio 2017 has a bug where arg(float) returns 0 for negative inputs. +// This seems to be fixed in VS 2019. +#if EIGEN_HAS_CXX11_MATH && (!EIGEN_COMP_MSVC || EIGEN_COMP_MSVC >= 1920) +// std::arg is only defined for types of std::complex, or integer types or float/double/long double +template::IsComplex || is_integral::value + || is_same::value || is_same::value + || is_same::value > +struct arg_default_impl; + +template +struct arg_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + #if defined(EIGEN_HIP_DEVICE_COMPILE) + // HIP does not seem to have a native device side implementation for the math routine "arg" + using std::arg; + #else + EIGEN_USING_STD(arg); + #endif + return static_cast(arg(x)); + } +}; + +// Must be non-complex floating-point type (e.g. half/bfloat16). +template +struct arg_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + return (x < Scalar(0)) ? RealScalar(EIGEN_PI) : RealScalar(0); + } +}; +#else +template::IsComplex> +struct arg_default_impl +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + return (x < RealScalar(0)) ? RealScalar(EIGEN_PI) : RealScalar(0); + } +}; + +template +struct arg_default_impl +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline RealScalar run(const Scalar& x) + { + EIGEN_USING_STD(arg); + return arg(x); + } +}; +#endif +template struct arg_impl : arg_default_impl {}; + +template +struct arg_retval +{ + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** +* Implementation of expm1 * +****************************************************************************/ + +// This implementation is based on GSL Math's expm1. +namespace std_fallback { + // fallback expm1 implementation in case there is no expm1(Scalar) function in namespace of Scalar, + // or that there is no suitable std::expm1 function available. Implementation + // attributed to Kahan. See: http://www.plunk.org/~hatch/rightway.php. + template + EIGEN_DEVICE_FUNC inline Scalar expm1(const Scalar& x) { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + typedef typename NumTraits::Real RealScalar; + + EIGEN_USING_STD(exp); + Scalar u = exp(x); + if (numext::equal_strict(u, Scalar(1))) { + return x; + } + Scalar um1 = u - RealScalar(1); + if (numext::equal_strict(um1, Scalar(-1))) { + return RealScalar(-1); + } + + EIGEN_USING_STD(log); + Scalar logu = log(u); + return numext::equal_strict(u, logu) ? u : (u - RealScalar(1)) * x / logu; + } +} + +template +struct expm1_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + #if EIGEN_HAS_CXX11_MATH + using std::expm1; + #else + using std_fallback::expm1; + #endif + return expm1(x); + } +}; + +template +struct expm1_retval +{ + typedef Scalar type; +}; + +/**************************************************************************** +* Implementation of log * +****************************************************************************/ + +// Complex log defined in MathFunctionsImpl.h. +template EIGEN_DEVICE_FUNC std::complex complex_log(const std::complex& z); + +template +struct log_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) + { + EIGEN_USING_STD(log); + return static_cast(log(x)); + } +}; + +template +struct log_impl > { + EIGEN_DEVICE_FUNC static inline std::complex run(const std::complex& z) + { + return complex_log(z); + } +}; + +/**************************************************************************** +* Implementation of log1p * +****************************************************************************/ + +namespace std_fallback { + // fallback log1p implementation in case there is no log1p(Scalar) function in namespace of Scalar, + // or that there is no suitable std::log1p function available + template + EIGEN_DEVICE_FUNC inline Scalar log1p(const Scalar& x) { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + typedef typename NumTraits::Real RealScalar; + EIGEN_USING_STD(log); + Scalar x1p = RealScalar(1) + x; + Scalar log_1p = log_impl::run(x1p); + const bool is_small = numext::equal_strict(x1p, Scalar(1)); + const bool is_inf = numext::equal_strict(x1p, log_1p); + return (is_small || is_inf) ? x : x * (log_1p / (x1p - RealScalar(1))); + } +} + +template +struct log1p_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + #if EIGEN_HAS_CXX11_MATH + using std::log1p; + #else + using std_fallback::log1p; + #endif + return log1p(x); + } +}; + +// Specialization for complex types that are not supported by std::log1p. +template +struct log1p_impl > { + EIGEN_DEVICE_FUNC static inline std::complex run( + const std::complex& x) { + EIGEN_STATIC_ASSERT_NON_INTEGER(RealScalar) + return std_fallback::log1p(x); + } +}; + +template +struct log1p_retval +{ + typedef Scalar type; +}; + +/**************************************************************************** +* Implementation of pow * +****************************************************************************/ + +template::IsInteger&&NumTraits::IsInteger> +struct pow_impl +{ + //typedef Scalar retval; + typedef typename ScalarBinaryOpTraits >::ReturnType result_type; + static EIGEN_DEVICE_FUNC inline result_type run(const ScalarX& x, const ScalarY& y) + { + EIGEN_USING_STD(pow); + return pow(x, y); + } +}; + +template +struct pow_impl +{ + typedef ScalarX result_type; + static EIGEN_DEVICE_FUNC inline ScalarX run(ScalarX x, ScalarY y) + { + ScalarX res(1); + eigen_assert(!NumTraits::IsSigned || y >= 0); + if(y & 1) res *= x; + y >>= 1; + while(y) + { + x *= x; + if(y&1) res *= x; + y >>= 1; + } + return res; + } +}; + +/**************************************************************************** +* Implementation of random * +****************************************************************************/ + +template +struct random_default_impl {}; + +template +struct random_impl : random_default_impl::IsComplex, NumTraits::IsInteger> {}; + +template +struct random_retval +{ + typedef Scalar type; +}; + +template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y); +template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(); + +template +struct random_default_impl +{ + static inline Scalar run(const Scalar& x, const Scalar& y) + { + return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX); + } + static inline Scalar run() + { + return run(Scalar(NumTraits::IsSigned ? -1 : 0), Scalar(1)); + } +}; + +enum { + meta_floor_log2_terminate, + meta_floor_log2_move_up, + meta_floor_log2_move_down, + meta_floor_log2_bogus +}; + +template struct meta_floor_log2_selector +{ + enum { middle = (lower + upper) / 2, + value = (upper <= lower + 1) ? int(meta_floor_log2_terminate) + : (n < (1 << middle)) ? int(meta_floor_log2_move_down) + : (n==0) ? int(meta_floor_log2_bogus) + : int(meta_floor_log2_move_up) + }; +}; + +template::value> +struct meta_floor_log2 {}; + +template +struct meta_floor_log2 +{ + enum { value = meta_floor_log2::middle>::value }; +}; + +template +struct meta_floor_log2 +{ + enum { value = meta_floor_log2::middle, upper>::value }; +}; + +template +struct meta_floor_log2 +{ + enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower }; +}; + +template +struct meta_floor_log2 +{ + // no value, error at compile time +}; + +template +struct random_default_impl +{ + static inline Scalar run(const Scalar& x, const Scalar& y) + { + if (y <= x) + return x; + // ScalarU is the unsigned counterpart of Scalar, possibly Scalar itself. + typedef typename make_unsigned::type ScalarU; + // ScalarX is the widest of ScalarU and unsigned int. + // We'll deal only with ScalarX and unsigned int below thus avoiding signed + // types and arithmetic and signed overflows (which are undefined behavior). + typedef typename conditional<(ScalarU(-1) > unsigned(-1)), ScalarU, unsigned>::type ScalarX; + // The following difference doesn't overflow, provided our integer types are two's + // complement and have the same number of padding bits in signed and unsigned variants. + // This is the case in most modern implementations of C++. + ScalarX range = ScalarX(y) - ScalarX(x); + ScalarX offset = 0; + ScalarX divisor = 1; + ScalarX multiplier = 1; + const unsigned rand_max = RAND_MAX; + if (range <= rand_max) divisor = (rand_max + 1) / (range + 1); + else multiplier = 1 + range / (rand_max + 1); + // Rejection sampling. + do { + offset = (unsigned(std::rand()) * multiplier) / divisor; + } while (offset > range); + return Scalar(ScalarX(x) + offset); + } + + static inline Scalar run() + { +#ifdef EIGEN_MAKING_DOCS + return run(Scalar(NumTraits::IsSigned ? -10 : 0), Scalar(10)); +#else + enum { rand_bits = meta_floor_log2<(unsigned int)(RAND_MAX)+1>::value, + scalar_bits = sizeof(Scalar) * CHAR_BIT, + shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)), + offset = NumTraits::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0 + }; + return Scalar((std::rand() >> shift) - offset); +#endif + } +}; + +template +struct random_default_impl +{ + static inline Scalar run(const Scalar& x, const Scalar& y) + { + return Scalar(random(x.real(), y.real()), + random(x.imag(), y.imag())); + } + static inline Scalar run() + { + typedef typename NumTraits::Real RealScalar; + return Scalar(random(), random()); + } +}; + +template +inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y) +{ + return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y); +} + +template +inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() +{ + return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(); +} + +// Implementation of is* functions + +// std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang. +#if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG) +#define EIGEN_USE_STD_FPCLASSIFY 1 +#else +#define EIGEN_USE_STD_FPCLASSIFY 0 +#endif + +template +EIGEN_DEVICE_FUNC +typename internal::enable_if::value,bool>::type +isnan_impl(const T&) { return false; } + +template +EIGEN_DEVICE_FUNC +typename internal::enable_if::value,bool>::type +isinf_impl(const T&) { return false; } + +template +EIGEN_DEVICE_FUNC +typename internal::enable_if::value,bool>::type +isfinite_impl(const T&) { return true; } + +template +EIGEN_DEVICE_FUNC +typename internal::enable_if<(!internal::is_integral::value)&&(!NumTraits::IsComplex),bool>::type +isfinite_impl(const T& x) +{ + #if defined(EIGEN_GPU_COMPILE_PHASE) + return (::isfinite)(x); + #elif EIGEN_USE_STD_FPCLASSIFY + using std::isfinite; + return isfinite EIGEN_NOT_A_MACRO (x); + #else + return x<=NumTraits::highest() && x>=NumTraits::lowest(); + #endif +} + +template +EIGEN_DEVICE_FUNC +typename internal::enable_if<(!internal::is_integral::value)&&(!NumTraits::IsComplex),bool>::type +isinf_impl(const T& x) +{ + #if defined(EIGEN_GPU_COMPILE_PHASE) + return (::isinf)(x); + #elif EIGEN_USE_STD_FPCLASSIFY + using std::isinf; + return isinf EIGEN_NOT_A_MACRO (x); + #else + return x>NumTraits::highest() || x::lowest(); + #endif +} + +template +EIGEN_DEVICE_FUNC +typename internal::enable_if<(!internal::is_integral::value)&&(!NumTraits::IsComplex),bool>::type +isnan_impl(const T& x) +{ + #if defined(EIGEN_GPU_COMPILE_PHASE) + return (::isnan)(x); + #elif EIGEN_USE_STD_FPCLASSIFY + using std::isnan; + return isnan EIGEN_NOT_A_MACRO (x); + #else + return x != x; + #endif +} + +#if (!EIGEN_USE_STD_FPCLASSIFY) + +#if EIGEN_COMP_MSVC + +template EIGEN_DEVICE_FUNC bool isinf_msvc_helper(T x) +{ + return _fpclass(x)==_FPCLASS_NINF || _fpclass(x)==_FPCLASS_PINF; +} + +//MSVC defines a _isnan builtin function, but for double only +EIGEN_DEVICE_FUNC inline bool isnan_impl(const long double& x) { return _isnan(x)!=0; } +EIGEN_DEVICE_FUNC inline bool isnan_impl(const double& x) { return _isnan(x)!=0; } +EIGEN_DEVICE_FUNC inline bool isnan_impl(const float& x) { return _isnan(x)!=0; } + +EIGEN_DEVICE_FUNC inline bool isinf_impl(const long double& x) { return isinf_msvc_helper(x); } +EIGEN_DEVICE_FUNC inline bool isinf_impl(const double& x) { return isinf_msvc_helper(x); } +EIGEN_DEVICE_FUNC inline bool isinf_impl(const float& x) { return isinf_msvc_helper(x); } + +#elif (defined __FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ && EIGEN_COMP_GNUC) + +#if EIGEN_GNUC_AT_LEAST(5,0) + #define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((optimize("no-finite-math-only"))) +#else + // NOTE the inline qualifier and noinline attribute are both needed: the former is to avoid linking issue (duplicate symbol), + // while the second prevent too aggressive optimizations in fast-math mode: + #define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((noinline,optimize("no-finite-math-only"))) +#endif + +template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const long double& x) { return __builtin_isnan(x); } +template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const double& x) { return __builtin_isnan(x); } +template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const float& x) { return __builtin_isnan(x); } +template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const double& x) { return __builtin_isinf(x); } +template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const float& x) { return __builtin_isinf(x); } +template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const long double& x) { return __builtin_isinf(x); } + +#undef EIGEN_TMP_NOOPT_ATTRIB + +#endif + +#endif + +// The following overload are defined at the end of this file +template EIGEN_DEVICE_FUNC bool isfinite_impl(const std::complex& x); +template EIGEN_DEVICE_FUNC bool isnan_impl(const std::complex& x); +template EIGEN_DEVICE_FUNC bool isinf_impl(const std::complex& x); + +template T generic_fast_tanh_float(const T& a_x); +} // end namespace internal + +/**************************************************************************** +* Generic math functions * +****************************************************************************/ + +namespace numext { + +#if (!defined(EIGEN_GPUCC) || defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC)) +template +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y) +{ + EIGEN_USING_STD(min) + return min EIGEN_NOT_A_MACRO (x,y); +} + +template +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y) +{ + EIGEN_USING_STD(max) + return max EIGEN_NOT_A_MACRO (x,y); +} +#else +template +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y) +{ + return y < x ? y : x; +} +template<> +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE float mini(const float& x, const float& y) +{ + return fminf(x, y); +} +template<> +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE double mini(const double& x, const double& y) +{ + return fmin(x, y); +} +template<> +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE long double mini(const long double& x, const long double& y) +{ +#if defined(EIGEN_HIPCC) + // no "fminl" on HIP yet + return (x < y) ? x : y; +#else + return fminl(x, y); +#endif +} + +template +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y) +{ + return x < y ? y : x; +} +template<> +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y) +{ + return fmaxf(x, y); +} +template<> +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE double maxi(const double& x, const double& y) +{ + return fmax(x, y); +} +template<> +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE long double maxi(const long double& x, const long double& y) +{ +#if defined(EIGEN_HIPCC) + // no "fmaxl" on HIP yet + return (x > y) ? x : y; +#else + return fmaxl(x, y); +#endif +} +#endif + +#if defined(SYCL_DEVICE_ONLY) + + +#define SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_char) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_short) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_int) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_long) +#define SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_char) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_short) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_int) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_long) +#define SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_uchar) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_ushort) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_uint) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_ulong) +#define SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_uchar) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_ushort) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_uint) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_ulong) +#define SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(NAME, FUNC) \ + SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \ + SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) +#define SYCL_SPECIALIZE_INTEGER_TYPES_UNARY(NAME, FUNC) \ + SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \ + SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) +#define SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(NAME, FUNC) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_float) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC,cl::sycl::cl_double) +#define SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(NAME, FUNC) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_float) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC,cl::sycl::cl_double) +#define SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(NAME, FUNC, RET_TYPE) \ + SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, cl::sycl::cl_float) \ + SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, cl::sycl::cl_double) + +#define SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE) \ +template<> \ + EIGEN_DEVICE_FUNC \ + EIGEN_ALWAYS_INLINE RET_TYPE NAME(const ARG_TYPE& x) { \ + return cl::sycl::FUNC(x); \ + } + +#define SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, TYPE) \ + SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, TYPE, TYPE) + +#define SYCL_SPECIALIZE_GEN1_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE1, ARG_TYPE2) \ + template<> \ + EIGEN_DEVICE_FUNC \ + EIGEN_ALWAYS_INLINE RET_TYPE NAME(const ARG_TYPE1& x, const ARG_TYPE2& y) { \ + return cl::sycl::FUNC(x, y); \ + } + +#define SYCL_SPECIALIZE_GEN2_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE) \ + SYCL_SPECIALIZE_GEN1_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE, ARG_TYPE) + +#define SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, TYPE) \ + SYCL_SPECIALIZE_GEN2_BINARY_FUNC(NAME, FUNC, TYPE, TYPE) + +SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(mini, min) +SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(mini, fmin) +SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(maxi, max) +SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(maxi, fmax) + +#endif + + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC +inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x) +{ + return internal::real_ref_impl::run(x); +} + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(arg, Scalar) arg(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(arg, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC +inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x) +{ + return internal::imag_ref_impl::run(x); +} + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x); +} + +EIGEN_DEVICE_FUNC +inline bool abs2(bool x) { return x; } + +template +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE T absdiff(const T& x, const T& y) +{ + return x > y ? x - y : y - x; +} +template<> +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE float absdiff(const float& x, const float& y) +{ + return fabsf(x - y); +} +template<> +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE double absdiff(const double& x, const double& y) +{ + return fabs(x - y); +} + +#if !defined(EIGEN_GPUCC) +// HIP and CUDA do not support long double. +template<> +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE long double absdiff(const long double& x, const long double& y) { + return fabsl(x - y); +} +#endif + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y) +{ + return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y); +} + +#if defined(SYCL_DEVICE_ONLY) + SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(hypot, hypot) +#endif + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(log1p, Scalar) log1p(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(log1p, Scalar)::run(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(log1p, log1p) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float log1p(const float &x) { return ::log1pf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double log1p(const double &x) { return ::log1p(x); } +#endif + +template +EIGEN_DEVICE_FUNC +inline typename internal::pow_impl::result_type pow(const ScalarX& x, const ScalarY& y) +{ + return internal::pow_impl::run(x, y); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(pow, pow) +#endif + +template EIGEN_DEVICE_FUNC bool (isnan) (const T &x) { return internal::isnan_impl(x); } +template EIGEN_DEVICE_FUNC bool (isinf) (const T &x) { return internal::isinf_impl(x); } +template EIGEN_DEVICE_FUNC bool (isfinite)(const T &x) { return internal::isfinite_impl(x); } + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isnan, isnan, bool) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isinf, isinf, bool) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isfinite, isfinite, bool) +#endif + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(rint, Scalar) rint(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(rint, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(round, Scalar) round(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(round, Scalar)::run(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(round, round) +#endif + +template +EIGEN_DEVICE_FUNC +T (floor)(const T& x) +{ + EIGEN_USING_STD(floor) + return floor(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(floor, floor) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float floor(const float &x) { return ::floorf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double floor(const double &x) { return ::floor(x); } +#endif + +template +EIGEN_DEVICE_FUNC +T (ceil)(const T& x) +{ + EIGEN_USING_STD(ceil); + return ceil(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(ceil, ceil) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float ceil(const float &x) { return ::ceilf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double ceil(const double &x) { return ::ceil(x); } +#endif + + +/** Log base 2 for 32 bits positive integers. + * Conveniently returns 0 for x==0. */ +inline int log2(int x) +{ + eigen_assert(x>=0); + unsigned int v(x); + static const int table[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return table[(v * 0x07C4ACDDU) >> 27]; +} + +/** \returns the square root of \a x. + * + * It is essentially equivalent to + * \code using std::sqrt; return sqrt(x); \endcode + * but slightly faster for float/double and some compilers (e.g., gcc), thanks to + * specializations when SSE is enabled. + * + * It's usage is justified in performance critical functions, like norm/normalize. + */ +template +EIGEN_DEVICE_FUNC +EIGEN_ALWAYS_INLINE EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x); +} + +// Boolean specialization, avoids implicit float to bool conversion (-Wimplicit-conversion-floating-point-to-bool). +template<> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_DEVICE_FUNC +bool sqrt(const bool &x) { return x; } + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sqrt, sqrt) +#endif + +/** \returns the reciprocal square root of \a x. **/ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T rsqrt(const T& x) +{ + return internal::rsqrt_impl::run(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T log(const T &x) { + return internal::log_impl::run(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(log, log) +#endif + + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float log(const float &x) { return ::logf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double log(const double &x) { return ::log(x); } +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +typename internal::enable_if::IsSigned || NumTraits::IsComplex,typename NumTraits::Real>::type +abs(const T &x) { + EIGEN_USING_STD(abs); + return abs(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +typename internal::enable_if::IsSigned || NumTraits::IsComplex),typename NumTraits::Real>::type +abs(const T &x) { + return x; +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_INTEGER_TYPES_UNARY(abs, abs) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(abs, fabs) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float abs(const float &x) { return ::fabsf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double abs(const double &x) { return ::fabs(x); } + +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float abs(const std::complex& x) { + return ::hypotf(x.real(), x.imag()); +} + +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double abs(const std::complex& x) { + return ::hypot(x.real(), x.imag()); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T exp(const T &x) { + EIGEN_USING_STD(exp); + return exp(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(exp, exp) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float exp(const float &x) { return ::expf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double exp(const double &x) { return ::exp(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +std::complex exp(const std::complex& x) { + float com = ::expf(x.real()); + float res_real = com * ::cosf(x.imag()); + float res_imag = com * ::sinf(x.imag()); + return std::complex(res_real, res_imag); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +std::complex exp(const std::complex& x) { + double com = ::exp(x.real()); + double res_real = com * ::cos(x.imag()); + double res_imag = com * ::sin(x.imag()); + return std::complex(res_real, res_imag); +} +#endif + +template +EIGEN_DEVICE_FUNC +inline EIGEN_MATHFUNC_RETVAL(expm1, Scalar) expm1(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(expm1, Scalar)::run(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(expm1, expm1) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float expm1(const float &x) { return ::expm1f(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double expm1(const double &x) { return ::expm1(x); } +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T cos(const T &x) { + EIGEN_USING_STD(cos); + return cos(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(cos,cos) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float cos(const float &x) { return ::cosf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double cos(const double &x) { return ::cos(x); } +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T sin(const T &x) { + EIGEN_USING_STD(sin); + return sin(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sin, sin) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float sin(const float &x) { return ::sinf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double sin(const double &x) { return ::sin(x); } +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T tan(const T &x) { + EIGEN_USING_STD(tan); + return tan(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(tan, tan) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float tan(const float &x) { return ::tanf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double tan(const double &x) { return ::tan(x); } +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T acos(const T &x) { + EIGEN_USING_STD(acos); + return acos(x); +} + +#if EIGEN_HAS_CXX11_MATH +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T acosh(const T &x) { + EIGEN_USING_STD(acosh); + return static_cast(acosh(x)); +} +#endif + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(acos, acos) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(acosh, acosh) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float acos(const float &x) { return ::acosf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double acos(const double &x) { return ::acos(x); } +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T asin(const T &x) { + EIGEN_USING_STD(asin); + return asin(x); +} + +#if EIGEN_HAS_CXX11_MATH +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T asinh(const T &x) { + EIGEN_USING_STD(asinh); + return static_cast(asinh(x)); +} +#endif + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(asin, asin) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(asinh, asinh) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float asin(const float &x) { return ::asinf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double asin(const double &x) { return ::asin(x); } +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T atan(const T &x) { + EIGEN_USING_STD(atan); + return static_cast(atan(x)); +} + +#if EIGEN_HAS_CXX11_MATH +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T atanh(const T &x) { + EIGEN_USING_STD(atanh); + return static_cast(atanh(x)); +} +#endif + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(atan, atan) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(atanh, atanh) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float atan(const float &x) { return ::atanf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double atan(const double &x) { return ::atan(x); } +#endif + + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T cosh(const T &x) { + EIGEN_USING_STD(cosh); + return static_cast(cosh(x)); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(cosh, cosh) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float cosh(const float &x) { return ::coshf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double cosh(const double &x) { return ::cosh(x); } +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T sinh(const T &x) { + EIGEN_USING_STD(sinh); + return static_cast(sinh(x)); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sinh, sinh) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float sinh(const float &x) { return ::sinhf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double sinh(const double &x) { return ::sinh(x); } +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T tanh(const T &x) { + EIGEN_USING_STD(tanh); + return tanh(x); +} + +#if (!defined(EIGEN_GPUCC)) && EIGEN_FAST_MATH && !defined(SYCL_DEVICE_ONLY) +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float tanh(float x) { return internal::generic_fast_tanh_float(x); } +#endif + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(tanh, tanh) +#endif + +#if defined(EIGEN_GPUCC) +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float tanh(const float &x) { return ::tanhf(x); } + +template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double tanh(const double &x) { return ::tanh(x); } +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T fmod(const T& a, const T& b) { + EIGEN_USING_STD(fmod); + return fmod(a, b); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(fmod, fmod) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float fmod(const float& a, const float& b) { + return ::fmodf(a, b); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double fmod(const double& a, const double& b) { + return ::fmod(a, b); +} +#endif + +#if defined(SYCL_DEVICE_ONLY) +#undef SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY +#undef SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY +#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY +#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY +#undef SYCL_SPECIALIZE_INTEGER_TYPES_BINARY +#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY +#undef SYCL_SPECIALIZE_FLOATING_TYPES_BINARY +#undef SYCL_SPECIALIZE_FLOATING_TYPES_UNARY +#undef SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE +#undef SYCL_SPECIALIZE_GEN_UNARY_FUNC +#undef SYCL_SPECIALIZE_UNARY_FUNC +#undef SYCL_SPECIALIZE_GEN1_BINARY_FUNC +#undef SYCL_SPECIALIZE_GEN2_BINARY_FUNC +#undef SYCL_SPECIALIZE_BINARY_FUNC +#endif + +} // end namespace numext + +namespace internal { + +template +EIGEN_DEVICE_FUNC bool isfinite_impl(const std::complex& x) +{ + return (numext::isfinite)(numext::real(x)) && (numext::isfinite)(numext::imag(x)); +} + +template +EIGEN_DEVICE_FUNC bool isnan_impl(const std::complex& x) +{ + return (numext::isnan)(numext::real(x)) || (numext::isnan)(numext::imag(x)); +} + +template +EIGEN_DEVICE_FUNC bool isinf_impl(const std::complex& x) +{ + return ((numext::isinf)(numext::real(x)) || (numext::isinf)(numext::imag(x))) && (!(numext::isnan)(x)); +} + +/**************************************************************************** +* Implementation of fuzzy comparisons * +****************************************************************************/ + +template +struct scalar_fuzzy_default_impl {}; + +template +struct scalar_fuzzy_default_impl +{ + typedef typename NumTraits::Real RealScalar; + template EIGEN_DEVICE_FUNC + static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) + { + return numext::abs(x) <= numext::abs(y) * prec; + } + EIGEN_DEVICE_FUNC + static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) + { + return numext::abs(x - y) <= numext::mini(numext::abs(x), numext::abs(y)) * prec; + } + EIGEN_DEVICE_FUNC + static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec) + { + return x <= y || isApprox(x, y, prec); + } +}; + +template +struct scalar_fuzzy_default_impl +{ + typedef typename NumTraits::Real RealScalar; + template EIGEN_DEVICE_FUNC + static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&) + { + return x == Scalar(0); + } + EIGEN_DEVICE_FUNC + static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&) + { + return x == y; + } + EIGEN_DEVICE_FUNC + static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&) + { + return x <= y; + } +}; + +template +struct scalar_fuzzy_default_impl +{ + typedef typename NumTraits::Real RealScalar; + template EIGEN_DEVICE_FUNC + static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) + { + return numext::abs2(x) <= numext::abs2(y) * prec * prec; + } + EIGEN_DEVICE_FUNC + static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) + { + return numext::abs2(x - y) <= numext::mini(numext::abs2(x), numext::abs2(y)) * prec * prec; + } +}; + +template +struct scalar_fuzzy_impl : scalar_fuzzy_default_impl::IsComplex, NumTraits::IsInteger> {}; + +template EIGEN_DEVICE_FUNC +inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, + const typename NumTraits::Real &precision = NumTraits::dummy_precision()) +{ + return scalar_fuzzy_impl::template isMuchSmallerThan(x, y, precision); +} + +template EIGEN_DEVICE_FUNC +inline bool isApprox(const Scalar& x, const Scalar& y, + const typename NumTraits::Real &precision = NumTraits::dummy_precision()) +{ + return scalar_fuzzy_impl::isApprox(x, y, precision); +} + +template EIGEN_DEVICE_FUNC +inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, + const typename NumTraits::Real &precision = NumTraits::dummy_precision()) +{ + return scalar_fuzzy_impl::isApproxOrLessThan(x, y, precision); +} + +/****************************************** +*** The special case of the bool type *** +******************************************/ + +template<> struct random_impl +{ + static inline bool run() + { + return random(0,1)==0 ? false : true; + } + + static inline bool run(const bool& a, const bool& b) + { + return random(a, b)==0 ? false : true; + } +}; + +template<> struct scalar_fuzzy_impl +{ + typedef bool RealScalar; + + template EIGEN_DEVICE_FUNC + static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&) + { + return !x; + } + + EIGEN_DEVICE_FUNC + static inline bool isApprox(bool x, bool y, bool) + { + return x == y; + } + + EIGEN_DEVICE_FUNC + static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&) + { + return (!x) || y; + } + +}; + +} // end namespace internal + +// Default implementations that rely on other numext implementations +namespace internal { + +// Specialization for complex types that are not supported by std::expm1. +template +struct expm1_impl > { + EIGEN_DEVICE_FUNC static inline std::complex run( + const std::complex& x) { + EIGEN_STATIC_ASSERT_NON_INTEGER(RealScalar) + RealScalar xr = x.real(); + RealScalar xi = x.imag(); + // expm1(z) = exp(z) - 1 + // = exp(x + i * y) - 1 + // = exp(x) * (cos(y) + i * sin(y)) - 1 + // = exp(x) * cos(y) - 1 + i * exp(x) * sin(y) + // Imag(expm1(z)) = exp(x) * sin(y) + // Real(expm1(z)) = exp(x) * cos(y) - 1 + // = exp(x) * cos(y) - 1. + // = expm1(x) + exp(x) * (cos(y) - 1) + // = expm1(x) + exp(x) * (2 * sin(y / 2) ** 2) + RealScalar erm1 = numext::expm1(xr); + RealScalar er = erm1 + RealScalar(1.); + RealScalar sin2 = numext::sin(xi / RealScalar(2.)); + sin2 = sin2 * sin2; + RealScalar s = numext::sin(xi); + RealScalar real_part = erm1 - RealScalar(2.) * er * sin2; + return std::complex(real_part, er * s); + } +}; + +template +struct rsqrt_impl { + EIGEN_DEVICE_FUNC + static EIGEN_ALWAYS_INLINE T run(const T& x) { + return T(1)/numext::sqrt(x); + } +}; + +#if defined(EIGEN_GPU_COMPILE_PHASE) +template +struct conj_impl, true> +{ + EIGEN_DEVICE_FUNC + static inline std::complex run(const std::complex& x) + { + return std::complex(numext::real(x), -numext::imag(x)); + } +}; +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATHFUNCTIONS_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MathFunctionsImpl.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MathFunctionsImpl.h new file mode 100644 index 000000000..4eaaaa784 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MathFunctionsImpl.h @@ -0,0 +1,200 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com) +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATHFUNCTIONSIMPL_H +#define EIGEN_MATHFUNCTIONSIMPL_H + +namespace Eigen { + +namespace internal { + +/** \internal \returns the hyperbolic tan of \a a (coeff-wise) + Doesn't do anything fancy, just a 13/6-degree rational interpolant which + is accurate up to a couple of ulps in the (approximate) range [-8, 8], + outside of which tanh(x) = +/-1 in single precision. The input is clamped + to the range [-c, c]. The value c is chosen as the smallest value where + the approximation evaluates to exactly 1. In the reange [-0.0004, 0.0004] + the approxmation tanh(x) ~= x is used for better accuracy as x tends to zero. + + This implementation works on both scalars and packets. +*/ +template +T generic_fast_tanh_float(const T& a_x) +{ + // Clamp the inputs to the range [-c, c] +#ifdef EIGEN_VECTORIZE_FMA + const T plus_clamp = pset1(7.99881172180175781f); + const T minus_clamp = pset1(-7.99881172180175781f); +#else + const T plus_clamp = pset1(7.90531110763549805f); + const T minus_clamp = pset1(-7.90531110763549805f); +#endif + const T tiny = pset1(0.0004f); + const T x = pmax(pmin(a_x, plus_clamp), minus_clamp); + const T tiny_mask = pcmp_lt(pabs(a_x), tiny); + // The monomial coefficients of the numerator polynomial (odd). + const T alpha_1 = pset1(4.89352455891786e-03f); + const T alpha_3 = pset1(6.37261928875436e-04f); + const T alpha_5 = pset1(1.48572235717979e-05f); + const T alpha_7 = pset1(5.12229709037114e-08f); + const T alpha_9 = pset1(-8.60467152213735e-11f); + const T alpha_11 = pset1(2.00018790482477e-13f); + const T alpha_13 = pset1(-2.76076847742355e-16f); + + // The monomial coefficients of the denominator polynomial (even). + const T beta_0 = pset1(4.89352518554385e-03f); + const T beta_2 = pset1(2.26843463243900e-03f); + const T beta_4 = pset1(1.18534705686654e-04f); + const T beta_6 = pset1(1.19825839466702e-06f); + + // Since the polynomials are odd/even, we need x^2. + const T x2 = pmul(x, x); + + // Evaluate the numerator polynomial p. + T p = pmadd(x2, alpha_13, alpha_11); + p = pmadd(x2, p, alpha_9); + p = pmadd(x2, p, alpha_7); + p = pmadd(x2, p, alpha_5); + p = pmadd(x2, p, alpha_3); + p = pmadd(x2, p, alpha_1); + p = pmul(x, p); + + // Evaluate the denominator polynomial q. + T q = pmadd(x2, beta_6, beta_4); + q = pmadd(x2, q, beta_2); + q = pmadd(x2, q, beta_0); + + // Divide the numerator by the denominator. + return pselect(tiny_mask, x, pdiv(p, q)); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +RealScalar positive_real_hypot(const RealScalar& x, const RealScalar& y) +{ + // IEEE IEC 6059 special cases. + if ((numext::isinf)(x) || (numext::isinf)(y)) + return NumTraits::infinity(); + if ((numext::isnan)(x) || (numext::isnan)(y)) + return NumTraits::quiet_NaN(); + + EIGEN_USING_STD(sqrt); + RealScalar p, qp; + p = numext::maxi(x,y); + if(p==RealScalar(0)) return RealScalar(0); + qp = numext::mini(y,x) / p; + return p * sqrt(RealScalar(1) + qp*qp); +} + +template +struct hypot_impl +{ + typedef typename NumTraits::Real RealScalar; + static EIGEN_DEVICE_FUNC + inline RealScalar run(const Scalar& x, const Scalar& y) + { + EIGEN_USING_STD(abs); + return positive_real_hypot(abs(x), abs(y)); + } +}; + +// Generic complex sqrt implementation that correctly handles corner cases +// according to https://en.cppreference.com/w/cpp/numeric/complex/sqrt +template +EIGEN_DEVICE_FUNC std::complex complex_sqrt(const std::complex& z) { + // Computes the principal sqrt of the input. + // + // For a complex square root of the number x + i*y. We want to find real + // numbers u and v such that + // (u + i*v)^2 = x + i*y <=> + // u^2 - v^2 + i*2*u*v = x + i*v. + // By equating the real and imaginary parts we get: + // u^2 - v^2 = x + // 2*u*v = y. + // + // For x >= 0, this has the numerically stable solution + // u = sqrt(0.5 * (x + sqrt(x^2 + y^2))) + // v = y / (2 * u) + // and for x < 0, + // v = sign(y) * sqrt(0.5 * (-x + sqrt(x^2 + y^2))) + // u = y / (2 * v) + // + // Letting w = sqrt(0.5 * (|x| + |z|)), + // if x == 0: u = w, v = sign(y) * w + // if x > 0: u = w, v = y / (2 * w) + // if x < 0: u = |y| / (2 * w), v = sign(y) * w + + const T x = numext::real(z); + const T y = numext::imag(z); + const T zero = T(0); + const T w = numext::sqrt(T(0.5) * (numext::abs(x) + numext::hypot(x, y))); + + return + (numext::isinf)(y) ? std::complex(NumTraits::infinity(), y) + : x == zero ? std::complex(w, y < zero ? -w : w) + : x > zero ? std::complex(w, y / (2 * w)) + : std::complex(numext::abs(y) / (2 * w), y < zero ? -w : w ); +} + +// Generic complex rsqrt implementation. +template +EIGEN_DEVICE_FUNC std::complex complex_rsqrt(const std::complex& z) { + // Computes the principal reciprocal sqrt of the input. + // + // For a complex reciprocal square root of the number z = x + i*y. We want to + // find real numbers u and v such that + // (u + i*v)^2 = 1 / (x + i*y) <=> + // u^2 - v^2 + i*2*u*v = x/|z|^2 - i*v/|z|^2. + // By equating the real and imaginary parts we get: + // u^2 - v^2 = x/|z|^2 + // 2*u*v = y/|z|^2. + // + // For x >= 0, this has the numerically stable solution + // u = sqrt(0.5 * (x + |z|)) / |z| + // v = -y / (2 * u * |z|) + // and for x < 0, + // v = -sign(y) * sqrt(0.5 * (-x + |z|)) / |z| + // u = -y / (2 * v * |z|) + // + // Letting w = sqrt(0.5 * (|x| + |z|)), + // if x == 0: u = w / |z|, v = -sign(y) * w / |z| + // if x > 0: u = w / |z|, v = -y / (2 * w * |z|) + // if x < 0: u = |y| / (2 * w * |z|), v = -sign(y) * w / |z| + + const T x = numext::real(z); + const T y = numext::imag(z); + const T zero = T(0); + + const T abs_z = numext::hypot(x, y); + const T w = numext::sqrt(T(0.5) * (numext::abs(x) + abs_z)); + const T woz = w / abs_z; + // Corner cases consistent with 1/sqrt(z) on gcc/clang. + return + abs_z == zero ? std::complex(NumTraits::infinity(), NumTraits::quiet_NaN()) + : ((numext::isinf)(x) || (numext::isinf)(y)) ? std::complex(zero, zero) + : x == zero ? std::complex(woz, y < zero ? woz : -woz) + : x > zero ? std::complex(woz, -y / (2 * w * abs_z)) + : std::complex(numext::abs(y) / (2 * w * abs_z), y < zero ? woz : -woz ); +} + +template +EIGEN_DEVICE_FUNC std::complex complex_log(const std::complex& z) { + // Computes complex log. + T a = numext::abs(z); + EIGEN_USING_STD(atan2); + T b = atan2(z.imag(), z.real()); + return std::complex(numext::log(a), b); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATHFUNCTIONSIMPL_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Matrix.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Matrix.h new file mode 100644 index 000000000..f0e59a911 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Matrix.h @@ -0,0 +1,565 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_H +#define EIGEN_MATRIX_H + +namespace Eigen { + +namespace internal { +template +struct traits > +{ +private: + enum { size = internal::size_at_compile_time<_Rows,_Cols>::ret }; + typedef typename find_best_packet<_Scalar,size>::type PacketScalar; + enum { + row_major_bit = _Options&RowMajor ? RowMajorBit : 0, + is_dynamic_size_storage = _MaxRows==Dynamic || _MaxCols==Dynamic, + max_size = is_dynamic_size_storage ? Dynamic : _MaxRows*_MaxCols, + default_alignment = compute_default_alignment<_Scalar,max_size>::value, + actual_alignment = ((_Options&DontAlign)==0) ? default_alignment : 0, + required_alignment = unpacket_traits::alignment, + packet_access_bit = (packet_traits<_Scalar>::Vectorizable && (EIGEN_UNALIGNED_VECTORIZE || (actual_alignment>=required_alignment))) ? PacketAccessBit : 0 + }; + +public: + typedef _Scalar Scalar; + typedef Dense StorageKind; + typedef Eigen::Index StorageIndex; + typedef MatrixXpr XprKind; + enum { + RowsAtCompileTime = _Rows, + ColsAtCompileTime = _Cols, + MaxRowsAtCompileTime = _MaxRows, + MaxColsAtCompileTime = _MaxCols, + Flags = compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret, + Options = _Options, + InnerStrideAtCompileTime = 1, + OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime, + + // FIXME, the following flag in only used to define NeedsToAlign in PlainObjectBase + EvaluatorFlags = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit, + Alignment = actual_alignment + }; +}; +} + +/** \class Matrix + * \ingroup Core_Module + * + * \brief The matrix class, also used for vectors and row-vectors + * + * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen. + * Vectors are matrices with one column, and row-vectors are matrices with one row. + * + * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note"). + * + * The first three template parameters are required: + * \tparam _Scalar Numeric type, e.g. float, double, int or std::complex. + * User defined scalar types are supported as well (see \ref user_defined_scalars "here"). + * \tparam _Rows Number of rows, or \b Dynamic + * \tparam _Cols Number of columns, or \b Dynamic + * + * The remaining template parameters are optional -- in most cases you don't have to worry about them. + * \tparam _Options A combination of either \b #RowMajor or \b #ColMajor, and of either + * \b #AutoAlign or \b #DontAlign. + * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required + * for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size. + * \tparam _MaxRows Maximum number of rows. Defaults to \a _Rows (\ref maxrows "note"). + * \tparam _MaxCols Maximum number of columns. Defaults to \a _Cols (\ref maxrows "note"). + * + * Eigen provides a number of typedefs covering the usual cases. Here are some examples: + * + * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix) + * \li \c Vector4f is a vector of 4 floats (\c Matrix) + * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix) + * + * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix) + * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix) + * + * \li \c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\c Matrix) + * \li \c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\c Matrix) + * + * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs. + * + * You can access elements of vectors and matrices using normal subscripting: + * + * \code + * Eigen::VectorXd v(10); + * v[0] = 0.1; + * v[1] = 0.2; + * v(0) = 0.3; + * v(1) = 0.4; + * + * Eigen::MatrixXi m(10, 10); + * m(0, 1) = 1; + * m(0, 2) = 2; + * m(0, 3) = 3; + * \endcode + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN. + * + * Some notes: + * + *
+ *
\anchor dense Dense versus sparse:
+ *
This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the Sparse module. + * + * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary contiguous array. + * This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero coefficients.
+ * + *
\anchor fixedsize Fixed-size versus dynamic-size:
+ *
Fixed-size means that the numbers of rows and columns are known are compile-time. In this case, Eigen allocates the array + * of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, typically up to 4x4, sometimes up + * to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time. + * + * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime + * variables, and the array of coefficients is allocated dynamically on the heap. + * + * Note that \em dense matrices, be they Fixed-size or Dynamic-size, do not expand dynamically in the sense of a std::map. + * If you want this behavior, see the Sparse module.
+ * + *
\anchor maxrows _MaxRows and _MaxCols:
+ *
In most cases, one just leaves these parameters to the default values. + * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases + * when the exact numbers of rows and columns are not known are compile-time, but it is known at compile-time that they cannot + * exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case _MaxRows and _MaxCols + * are the dimensions of the original matrix, while _Rows and _Cols are Dynamic.
+ *
+ * + * ABI and storage layout + * + * The table below summarizes the ABI of some possible Matrix instances which is fixed thorough the lifetime of Eigen 3. + * + * + * + * + * + * + *
Matrix typeEquivalent C structure
\code Matrix \endcode\code + * struct { + * T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0 + * Eigen::Index rows, cols; + * }; + * \endcode
\code + * Matrix + * Matrix \endcode\code + * struct { + * T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0 + * Eigen::Index size; + * }; + * \endcode
\code Matrix \endcode\code + * struct { + * T data[Rows*Cols]; // with (size_t(data)%A(Rows*Cols*sizeof(T)))==0 + * }; + * \endcode
\code Matrix \endcode\code + * struct { + * T data[MaxRows*MaxCols]; // with (size_t(data)%A(MaxRows*MaxCols*sizeof(T)))==0 + * Eigen::Index rows, cols; + * }; + * \endcode
+ * Note that in this table Rows, Cols, MaxRows and MaxCols are all positive integers. A(S) is defined to the largest possible power-of-two + * smaller to EIGEN_MAX_STATIC_ALIGN_BYTES. + * + * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy, + * \ref TopicStorageOrders + */ + +template +class Matrix + : public PlainObjectBase > +{ + public: + + /** \brief Base class typedef. + * \sa PlainObjectBase + */ + typedef PlainObjectBase Base; + + enum { Options = _Options }; + + EIGEN_DENSE_PUBLIC_INTERFACE(Matrix) + + typedef typename Base::PlainObject PlainObject; + + using Base::base; + using Base::coeffRef; + + /** + * \brief Assigns matrices to each other. + * + * \note This is a special case of the templated operator=. Its purpose is + * to prevent a default operator= from hiding the templated operator=. + * + * \callgraph + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Matrix& operator=(const Matrix& other) + { + return Base::_set(other); + } + + /** \internal + * \brief Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Matrix& operator=(const DenseBase& other) + { + return Base::_set(other); + } + + /* Here, doxygen failed to copy the brief information when using \copydoc */ + + /** + * \brief Copies the generic expression \a other into *this. + * \copydetails DenseBase::operator=(const EigenBase &other) + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase &other) + { + return Base::operator=(other); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue& func) + { + return Base::operator=(func); + } + + /** \brief Default constructor. + * + * For fixed-size matrices, does nothing. + * + * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix + * is called a null matrix. This constructor is the unique way to create null matrices: resizing + * a matrix to 0 is not supported. + * + * \sa resize(Index,Index) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Matrix() : Base() + { + Base::_check_template_params(); + EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + } + + // FIXME is it still needed + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit Matrix(internal::constructor_without_unaligned_array_assert) + : Base(internal::constructor_without_unaligned_array_assert()) + { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } + +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Matrix(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible::value) + : Base(std::move(other)) + { + Base::_check_template_params(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Matrix& operator=(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable::value) + { + Base::operator=(std::move(other)); + return *this; + } +#endif + +#if EIGEN_HAS_CXX11 + /** \copydoc PlainObjectBase(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&... args) + * + * Example: \include Matrix_variadic_ctor_cxx11.cpp + * Output: \verbinclude Matrix_variadic_ctor_cxx11.out + * + * \sa Matrix(const std::initializer_list>&) + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + : Base(a0, a1, a2, a3, args...) {} + + /** \brief Constructs a Matrix and initializes it from the coefficients given as initializer-lists grouped by row. \cpp11 + * + * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients: + * + * Example: \include Matrix_initializer_list_23_cxx11.cpp + * Output: \verbinclude Matrix_initializer_list_23_cxx11.out + * + * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is triggered. + * + * In the case of a compile-time column vector, implicit transposition from a single row is allowed. + * Therefore VectorXd{{1,2,3,4,5}} is legal and the more verbose syntax + * RowVectorXd{{1},{2},{3},{4},{5}} can be avoided: + * + * Example: \include Matrix_initializer_list_vector_cxx11.cpp + * Output: \verbinclude Matrix_initializer_list_vector_cxx11.out + * + * In the case of fixed-sized matrices, the initializer list sizes must exactly match the matrix sizes, + * and implicit transposition is allowed for compile-time vectors only. + * + * \sa Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + */ + EIGEN_DEVICE_FUNC + explicit EIGEN_STRONG_INLINE Matrix(const std::initializer_list>& list) : Base(list) {} +#endif // end EIGEN_HAS_CXX11 + +#ifndef EIGEN_PARSED_BY_DOXYGEN + + // This constructor is for both 1x1 matrices and dynamic vectors + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit Matrix(const T& x) + { + Base::_check_template_params(); + Base::template _init1(x); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Matrix(const T0& x, const T1& y) + { + Base::_check_template_params(); + Base::template _init2(x, y); + } + + +#else + /** \brief Constructs a fixed-sized matrix initialized with coefficients starting at \a data */ + EIGEN_DEVICE_FUNC + explicit Matrix(const Scalar *data); + + /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors + * + * This is useful for dynamic-size vectors. For fixed-size vectors, + * it is redundant to pass these parameters, so one should use the default constructor + * Matrix() instead. + * + * \warning This constructor is disabled for fixed-size \c 1x1 matrices. For instance, + * calling Matrix(1) will call the initialization constructor: Matrix(const Scalar&). + * For fixed-size \c 1x1 matrices it is therefore recommended to use the default + * constructor Matrix() instead, especially when using one of the non standard + * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives). + */ + EIGEN_STRONG_INLINE explicit Matrix(Index dim); + /** \brief Constructs an initialized 1x1 matrix with the given coefficient + * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */ + Matrix(const Scalar& x); + /** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns. + * + * This is useful for dynamic-size matrices. For fixed-size matrices, + * it is redundant to pass these parameters, so one should use the default constructor + * Matrix() instead. + * + * \warning This constructor is disabled for fixed-size \c 1x2 and \c 2x1 vectors. For instance, + * calling Matrix2f(2,1) will call the initialization constructor: Matrix(const Scalar& x, const Scalar& y). + * For fixed-size \c 1x2 or \c 2x1 vectors it is therefore recommended to use the default + * constructor Matrix() instead, especially when using one of the non standard + * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives). + */ + EIGEN_DEVICE_FUNC + Matrix(Index rows, Index cols); + + /** \brief Constructs an initialized 2D vector with given coefficients + * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */ + Matrix(const Scalar& x, const Scalar& y); + #endif // end EIGEN_PARSED_BY_DOXYGEN + + /** \brief Constructs an initialized 3D vector with given coefficients + * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z) + { + Base::_check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + m_storage.data()[2] = z; + } + /** \brief Constructs an initialized 4D vector with given coefficients + * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) + { + Base::_check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + m_storage.data()[2] = z; + m_storage.data()[3] = w; + } + + + /** \brief Copy constructor */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Matrix(const Matrix& other) : Base(other) + { } + + /** \brief Copy constructor for generic expressions. + * \sa MatrixBase::operator=(const EigenBase&) + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Matrix(const EigenBase &other) + : Base(other.derived()) + { } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index innerStride() const EIGEN_NOEXCEPT { return 1; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index outerStride() const EIGEN_NOEXCEPT { return this->innerSize(); } + + /////////// Geometry module /////////// + + template + EIGEN_DEVICE_FUNC + explicit Matrix(const RotationBase& r); + template + EIGEN_DEVICE_FUNC + Matrix& operator=(const RotationBase& r); + + // allow to extend Matrix outside Eigen + #ifdef EIGEN_MATRIX_PLUGIN + #include EIGEN_MATRIX_PLUGIN + #endif + + protected: + template + friend struct internal::conservative_resize_like_impl; + + using Base::m_storage; +}; + +/** \defgroup matrixtypedefs Global matrix typedefs + * + * \ingroup Core_Module + * + * %Eigen defines several typedef shortcuts for most common matrix and vector types. + * + * The general patterns are the following: + * + * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, + * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd + * for complex double. + * + * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of floats. + * + * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is + * a fixed-size vector of 4 complex floats. + * + * With \cpp11, template alias are also defined for common sizes. + * They follow the same pattern as above except that the scalar type suffix is replaced by a + * template parameter, i.e.: + * - `MatrixSize` where `Size` can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size. + * - `MatrixXSize` and `MatrixSizeX` where `Size` can be \c 2,\c 3,\c 4 for hybrid dynamic/fixed matrices. + * - `VectorSize` and `RowVectorSize` for column and row vectors. + * + * With \cpp11, you can also use fully generic column and row vector types: `Vector` and `RowVector`. + * + * \sa class Matrix + */ + +#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix Matrix##SizeSuffix##TypeSuffix; \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix Vector##SizeSuffix##TypeSuffix; \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix RowVector##SizeSuffix##TypeSuffix; + +#define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix Matrix##Size##X##TypeSuffix; \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix Matrix##X##Size##TypeSuffix; + +#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ +EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ +EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ +EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4) + +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cf) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cd) + +#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES +#undef EIGEN_MAKE_TYPEDEFS +#undef EIGEN_MAKE_FIXED_TYPEDEFS + +#if EIGEN_HAS_CXX11 + +#define EIGEN_MAKE_TYPEDEFS(Size, SizeSuffix) \ +/** \ingroup matrixtypedefs */ \ +/** \brief \cpp11 */ \ +template \ +using Matrix##SizeSuffix = Matrix; \ +/** \ingroup matrixtypedefs */ \ +/** \brief \cpp11 */ \ +template \ +using Vector##SizeSuffix = Matrix; \ +/** \ingroup matrixtypedefs */ \ +/** \brief \cpp11 */ \ +template \ +using RowVector##SizeSuffix = Matrix; + +#define EIGEN_MAKE_FIXED_TYPEDEFS(Size) \ +/** \ingroup matrixtypedefs */ \ +/** \brief \cpp11 */ \ +template \ +using Matrix##Size##X = Matrix; \ +/** \ingroup matrixtypedefs */ \ +/** \brief \cpp11 */ \ +template \ +using Matrix##X##Size = Matrix; + +EIGEN_MAKE_TYPEDEFS(2, 2) +EIGEN_MAKE_TYPEDEFS(3, 3) +EIGEN_MAKE_TYPEDEFS(4, 4) +EIGEN_MAKE_TYPEDEFS(Dynamic, X) +EIGEN_MAKE_FIXED_TYPEDEFS(2) +EIGEN_MAKE_FIXED_TYPEDEFS(3) +EIGEN_MAKE_FIXED_TYPEDEFS(4) + +/** \ingroup matrixtypedefs + * \brief \cpp11 */ +template +using Vector = Matrix; + +/** \ingroup matrixtypedefs + * \brief \cpp11 */ +template +using RowVector = Matrix; + +#undef EIGEN_MAKE_TYPEDEFS +#undef EIGEN_MAKE_FIXED_TYPEDEFS + +#endif // EIGEN_HAS_CXX11 + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MatrixBase.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MatrixBase.h new file mode 100644 index 000000000..45c3a596e --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/MatrixBase.h @@ -0,0 +1,547 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2009 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIXBASE_H +#define EIGEN_MATRIXBASE_H + +namespace Eigen { + +/** \class MatrixBase + * \ingroup Core_Module + * + * \brief Base class for all dense matrices, vectors, and expressions + * + * This class is the base that is inherited by all matrix, vector, and related expression + * types. Most of the Eigen API is contained in this class, and its base classes. Other important + * classes for the Eigen API are Matrix, and VectorwiseOp. + * + * Note that some methods are defined in other modules such as the \ref LU_Module LU module + * for all functions related to matrix inversions. + * + * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc. + * + * When writing a function taking Eigen objects as argument, if you want your function + * to take as argument any matrix, vector, or expression, just let it take a + * MatrixBase argument. As an example, here is a function printFirstRow which, given + * a matrix, vector, or expression \a x, prints the first row of \a x. + * + * \code + template + void printFirstRow(const Eigen::MatrixBase& x) + { + cout << x.row(0) << endl; + } + * \endcode + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN. + * + * \sa \blank \ref TopicClassHierarchy + */ +template class MatrixBase + : public DenseBase +{ + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef MatrixBase StorageBaseType; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::StorageIndex StorageIndex; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + + typedef DenseBase Base; + using Base::RowsAtCompileTime; + using Base::ColsAtCompileTime; + using Base::SizeAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::IsVectorAtCompileTime; + using Base::Flags; + + using Base::derived; + using Base::const_cast_derived; + using Base::rows; + using Base::cols; + using Base::size; + using Base::coeff; + using Base::coeffRef; + using Base::lazyAssign; + using Base::eval; + using Base::operator-; + using Base::operator+=; + using Base::operator-=; + using Base::operator*=; + using Base::operator/=; + + typedef typename Base::CoeffReturnType CoeffReturnType; + typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType; + typedef typename Base::RowXpr RowXpr; + typedef typename Base::ColXpr ColXpr; +#endif // not EIGEN_PARSED_BY_DOXYGEN + + + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** type of the equivalent square matrix */ + typedef Matrix SquareMatrixType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + + /** \returns the size of the main diagonal, which is min(rows(),cols()). + * \sa rows(), cols(), SizeAtCompileTime. */ + EIGEN_DEVICE_FUNC + inline Index diagonalSize() const { return (numext::mini)(rows(),cols()); } + + typedef typename Base::PlainObject PlainObject; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp,PlainObject> ConstantReturnType; + /** \internal the return type of MatrixBase::adjoint() */ + typedef typename internal::conditional::IsComplex, + CwiseUnaryOp, ConstTransposeReturnType>, + ConstTransposeReturnType + >::type AdjointReturnType; + /** \internal Return type of eigenvalues() */ + typedef Matrix, internal::traits::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType; + /** \internal the return type of identity */ + typedef CwiseNullaryOp,PlainObject> IdentityReturnType; + /** \internal the return type of unit vectors */ + typedef Block, SquareMatrixType>, + internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime> BasisReturnType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase +#define EIGEN_DOC_UNARY_ADDONS(X,Y) +# include "../plugins/CommonCwiseBinaryOps.h" +# include "../plugins/MatrixCwiseUnaryOps.h" +# include "../plugins/MatrixCwiseBinaryOps.h" +# ifdef EIGEN_MATRIXBASE_PLUGIN +# include EIGEN_MATRIXBASE_PLUGIN +# endif +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS +#undef EIGEN_DOC_UNARY_ADDONS + + /** Special case of the template operator=, in order to prevent the compiler + * from generating a default operator= (issue hit with g++ 4.1) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator=(const MatrixBase& other); + + // We cannot inherit here via Base::operator= since it is causing + // trouble with MSVC. + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator=(const DenseBase& other); + + template + EIGEN_DEVICE_FUNC + Derived& operator=(const EigenBase& other); + + template + EIGEN_DEVICE_FUNC + Derived& operator=(const ReturnByValue& other); + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator+=(const MatrixBase& other); + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator-=(const MatrixBase& other); + + template + EIGEN_DEVICE_FUNC + const Product + operator*(const MatrixBase &other) const; + + template + EIGEN_DEVICE_FUNC + const Product + lazyProduct(const MatrixBase &other) const; + + template + Derived& operator*=(const EigenBase& other); + + template + void applyOnTheLeft(const EigenBase& other); + + template + void applyOnTheRight(const EigenBase& other); + + template + EIGEN_DEVICE_FUNC + const Product + operator*(const DiagonalBase &diagonal) const; + + template + EIGEN_DEVICE_FUNC + typename ScalarBinaryOpTraits::Scalar,typename internal::traits::Scalar>::ReturnType + dot(const MatrixBase& other) const; + + EIGEN_DEVICE_FUNC RealScalar squaredNorm() const; + EIGEN_DEVICE_FUNC RealScalar norm() const; + RealScalar stableNorm() const; + RealScalar blueNorm() const; + RealScalar hypotNorm() const; + EIGEN_DEVICE_FUNC const PlainObject normalized() const; + EIGEN_DEVICE_FUNC const PlainObject stableNormalized() const; + EIGEN_DEVICE_FUNC void normalize(); + EIGEN_DEVICE_FUNC void stableNormalize(); + + EIGEN_DEVICE_FUNC const AdjointReturnType adjoint() const; + EIGEN_DEVICE_FUNC void adjointInPlace(); + + typedef Diagonal DiagonalReturnType; + EIGEN_DEVICE_FUNC + DiagonalReturnType diagonal(); + + typedef typename internal::add_const >::type ConstDiagonalReturnType; + EIGEN_DEVICE_FUNC + ConstDiagonalReturnType diagonal() const; + + template struct DiagonalIndexReturnType { typedef Diagonal Type; }; + template struct ConstDiagonalIndexReturnType { typedef const Diagonal Type; }; + + template + EIGEN_DEVICE_FUNC + typename DiagonalIndexReturnType::Type diagonal(); + + template + EIGEN_DEVICE_FUNC + typename ConstDiagonalIndexReturnType::Type diagonal() const; + + typedef Diagonal DiagonalDynamicIndexReturnType; + typedef typename internal::add_const >::type ConstDiagonalDynamicIndexReturnType; + + EIGEN_DEVICE_FUNC + DiagonalDynamicIndexReturnType diagonal(Index index); + EIGEN_DEVICE_FUNC + ConstDiagonalDynamicIndexReturnType diagonal(Index index) const; + + template struct TriangularViewReturnType { typedef TriangularView Type; }; + template struct ConstTriangularViewReturnType { typedef const TriangularView Type; }; + + template + EIGEN_DEVICE_FUNC + typename TriangularViewReturnType::Type triangularView(); + template + EIGEN_DEVICE_FUNC + typename ConstTriangularViewReturnType::Type triangularView() const; + + template struct SelfAdjointViewReturnType { typedef SelfAdjointView Type; }; + template struct ConstSelfAdjointViewReturnType { typedef const SelfAdjointView Type; }; + + template + EIGEN_DEVICE_FUNC + typename SelfAdjointViewReturnType::Type selfadjointView(); + template + EIGEN_DEVICE_FUNC + typename ConstSelfAdjointViewReturnType::Type selfadjointView() const; + + const SparseView sparseView(const Scalar& m_reference = Scalar(0), + const typename NumTraits::Real& m_epsilon = NumTraits::dummy_precision()) const; + EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(); + EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(Index rows, Index cols); + EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index size, Index i); + EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index i); + EIGEN_DEVICE_FUNC static const BasisReturnType UnitX(); + EIGEN_DEVICE_FUNC static const BasisReturnType UnitY(); + EIGEN_DEVICE_FUNC static const BasisReturnType UnitZ(); + EIGEN_DEVICE_FUNC static const BasisReturnType UnitW(); + + EIGEN_DEVICE_FUNC + const DiagonalWrapper asDiagonal() const; + const PermutationWrapper asPermutation() const; + + EIGEN_DEVICE_FUNC + Derived& setIdentity(); + EIGEN_DEVICE_FUNC + Derived& setIdentity(Index rows, Index cols); + EIGEN_DEVICE_FUNC Derived& setUnit(Index i); + EIGEN_DEVICE_FUNC Derived& setUnit(Index newSize, Index i); + + bool isIdentity(const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isDiagonal(const RealScalar& prec = NumTraits::dummy_precision()) const; + + bool isUpperTriangular(const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isLowerTriangular(const RealScalar& prec = NumTraits::dummy_precision()) const; + + template + bool isOrthogonal(const MatrixBase& other, + const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isUnitary(const RealScalar& prec = NumTraits::dummy_precision()) const; + + /** \returns true if each coefficients of \c *this and \a other are all exactly equal. + * \warning When using floating point scalar values you probably should rather use a + * fuzzy comparison such as isApprox() + * \sa isApprox(), operator!= */ + template + EIGEN_DEVICE_FUNC inline bool operator==(const MatrixBase& other) const + { return cwiseEqual(other).all(); } + + /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other. + * \warning When using floating point scalar values you probably should rather use a + * fuzzy comparison such as isApprox() + * \sa isApprox(), operator== */ + template + EIGEN_DEVICE_FUNC inline bool operator!=(const MatrixBase& other) const + { return cwiseNotEqual(other).any(); } + + NoAlias EIGEN_DEVICE_FUNC noalias(); + + // TODO forceAlignedAccess is temporarily disabled + // Need to find a nicer workaround. + inline const Derived& forceAlignedAccess() const { return derived(); } + inline Derived& forceAlignedAccess() { return derived(); } + template inline const Derived& forceAlignedAccessIf() const { return derived(); } + template inline Derived& forceAlignedAccessIf() { return derived(); } + + EIGEN_DEVICE_FUNC Scalar trace() const; + + template EIGEN_DEVICE_FUNC RealScalar lpNorm() const; + + EIGEN_DEVICE_FUNC MatrixBase& matrix() { return *this; } + EIGEN_DEVICE_FUNC const MatrixBase& matrix() const { return *this; } + + /** \returns an \link Eigen::ArrayBase Array \endlink expression of this matrix + * \sa ArrayBase::matrix() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ArrayWrapper array() { return ArrayWrapper(derived()); } + /** \returns a const \link Eigen::ArrayBase Array \endlink expression of this matrix + * \sa ArrayBase::matrix() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const ArrayWrapper array() const { return ArrayWrapper(derived()); } + +/////////// LU module /////////// + + inline const FullPivLU fullPivLu() const; + inline const PartialPivLU partialPivLu() const; + + inline const PartialPivLU lu() const; + + EIGEN_DEVICE_FUNC + inline const Inverse inverse() const; + + template + inline void computeInverseAndDetWithCheck( + ResultType& inverse, + typename ResultType::Scalar& determinant, + bool& invertible, + const RealScalar& absDeterminantThreshold = NumTraits::dummy_precision() + ) const; + + template + inline void computeInverseWithCheck( + ResultType& inverse, + bool& invertible, + const RealScalar& absDeterminantThreshold = NumTraits::dummy_precision() + ) const; + + EIGEN_DEVICE_FUNC + Scalar determinant() const; + +/////////// Cholesky module /////////// + + inline const LLT llt() const; + inline const LDLT ldlt() const; + +/////////// QR module /////////// + + inline const HouseholderQR householderQr() const; + inline const ColPivHouseholderQR colPivHouseholderQr() const; + inline const FullPivHouseholderQR fullPivHouseholderQr() const; + inline const CompleteOrthogonalDecomposition completeOrthogonalDecomposition() const; + +/////////// Eigenvalues module /////////// + + inline EigenvaluesReturnType eigenvalues() const; + inline RealScalar operatorNorm() const; + +/////////// SVD module /////////// + + inline JacobiSVD jacobiSvd(unsigned int computationOptions = 0) const; + inline BDCSVD bdcSvd(unsigned int computationOptions = 0) const; + +/////////// Geometry module /////////// + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /// \internal helper struct to form the return type of the cross product + template struct cross_product_return_type { + typedef typename ScalarBinaryOpTraits::Scalar,typename internal::traits::Scalar>::ReturnType Scalar; + typedef Matrix type; + }; + #endif // EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC +#ifndef EIGEN_PARSED_BY_DOXYGEN + inline typename cross_product_return_type::type +#else + inline PlainObject +#endif + cross(const MatrixBase& other) const; + + template + EIGEN_DEVICE_FUNC + inline PlainObject cross3(const MatrixBase& other) const; + + EIGEN_DEVICE_FUNC + inline PlainObject unitOrthogonal(void) const; + + EIGEN_DEVICE_FUNC + inline Matrix eulerAngles(Index a0, Index a1, Index a2) const; + + // put this as separate enum value to work around possible GCC 4.3 bug (?) + enum { HomogeneousReturnTypeDirection = ColsAtCompileTime==1&&RowsAtCompileTime==1 ? ((internal::traits::Flags&RowMajorBit)==RowMajorBit ? Horizontal : Vertical) + : ColsAtCompileTime==1 ? Vertical : Horizontal }; + typedef Homogeneous HomogeneousReturnType; + EIGEN_DEVICE_FUNC + inline HomogeneousReturnType homogeneous() const; + + enum { + SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1 + }; + typedef Block::ColsAtCompileTime==1 ? SizeMinusOne : 1, + internal::traits::ColsAtCompileTime==1 ? 1 : SizeMinusOne> ConstStartMinusOne; + typedef EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(ConstStartMinusOne,Scalar,quotient) HNormalizedReturnType; + EIGEN_DEVICE_FUNC + inline const HNormalizedReturnType hnormalized() const; + +////////// Householder module /////////// + + EIGEN_DEVICE_FUNC + void makeHouseholderInPlace(Scalar& tau, RealScalar& beta); + template + EIGEN_DEVICE_FUNC + void makeHouseholder(EssentialPart& essential, + Scalar& tau, RealScalar& beta) const; + template + EIGEN_DEVICE_FUNC + void applyHouseholderOnTheLeft(const EssentialPart& essential, + const Scalar& tau, + Scalar* workspace); + template + EIGEN_DEVICE_FUNC + void applyHouseholderOnTheRight(const EssentialPart& essential, + const Scalar& tau, + Scalar* workspace); + +///////// Jacobi module ///////// + + template + EIGEN_DEVICE_FUNC + void applyOnTheLeft(Index p, Index q, const JacobiRotation& j); + template + EIGEN_DEVICE_FUNC + void applyOnTheRight(Index p, Index q, const JacobiRotation& j); + +///////// SparseCore module ///////// + + template + EIGEN_STRONG_INLINE const typename SparseMatrixBase::template CwiseProductDenseReturnType::Type + cwiseProduct(const SparseMatrixBase &other) const + { + return other.cwiseProduct(derived()); + } + +///////// MatrixFunctions module ///////// + + typedef typename internal::stem_function::type StemFunction; +#define EIGEN_MATRIX_FUNCTION(ReturnType, Name, Description) \ + /** \returns an expression of the matrix Description of \c *this. \brief This function requires the unsupported MatrixFunctions module. To compute the coefficient-wise Description use ArrayBase::##Name . */ \ + const ReturnType Name() const; +#define EIGEN_MATRIX_FUNCTION_1(ReturnType, Name, Description, Argument) \ + /** \returns an expression of the matrix Description of \c *this. \brief This function requires the unsupported MatrixFunctions module. To compute the coefficient-wise Description use ArrayBase::##Name . */ \ + const ReturnType Name(Argument) const; + + EIGEN_MATRIX_FUNCTION(MatrixExponentialReturnValue, exp, exponential) + /** \brief Helper function for the unsupported MatrixFunctions module.*/ + const MatrixFunctionReturnValue matrixFunction(StemFunction f) const; + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cosh, hyperbolic cosine) + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sinh, hyperbolic sine) +#if EIGEN_HAS_CXX11_MATH + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, atanh, inverse hyperbolic cosine) + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, acosh, inverse hyperbolic cosine) + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, asinh, inverse hyperbolic sine) +#endif + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cos, cosine) + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sin, sine) + EIGEN_MATRIX_FUNCTION(MatrixSquareRootReturnValue, sqrt, square root) + EIGEN_MATRIX_FUNCTION(MatrixLogarithmReturnValue, log, logarithm) + EIGEN_MATRIX_FUNCTION_1(MatrixPowerReturnValue, pow, power to \c p, const RealScalar& p) + EIGEN_MATRIX_FUNCTION_1(MatrixComplexPowerReturnValue, pow, power to \c p, const std::complex& p) + + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(MatrixBase) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MatrixBase) + + private: + EIGEN_DEVICE_FUNC explicit MatrixBase(int); + EIGEN_DEVICE_FUNC MatrixBase(int,int); + template EIGEN_DEVICE_FUNC explicit MatrixBase(const MatrixBase&); + protected: + // mixing arrays and matrices is not legal + template Derived& operator+=(const ArrayBase& ) + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} + // mixing arrays and matrices is not legal + template Derived& operator-=(const ArrayBase& ) + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} +}; + + +/*************************************************************************** +* Implementation of matrix base methods +***************************************************************************/ + +/** replaces \c *this by \c *this * \a other. + * + * \returns a reference to \c *this + * + * Example: \include MatrixBase_applyOnTheRight.cpp + * Output: \verbinclude MatrixBase_applyOnTheRight.out + */ +template +template +inline Derived& +MatrixBase::operator*=(const EigenBase &other) +{ + other.derived().applyThisOnTheRight(derived()); + return derived(); +} + +/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=(). + * + * Example: \include MatrixBase_applyOnTheRight.cpp + * Output: \verbinclude MatrixBase_applyOnTheRight.out + */ +template +template +inline void MatrixBase::applyOnTheRight(const EigenBase &other) +{ + other.derived().applyThisOnTheRight(derived()); +} + +/** replaces \c *this by \a other * \c *this. + * + * Example: \include MatrixBase_applyOnTheLeft.cpp + * Output: \verbinclude MatrixBase_applyOnTheLeft.out + */ +template +template +inline void MatrixBase::applyOnTheLeft(const EigenBase &other) +{ + other.derived().applyThisOnTheLeft(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_MATRIXBASE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/NestByValue.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/NestByValue.h new file mode 100644 index 000000000..b4275768a --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/NestByValue.h @@ -0,0 +1,85 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NESTBYVALUE_H +#define EIGEN_NESTBYVALUE_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + enum { + Flags = traits::Flags & ~NestByRefBit + }; +}; +} + +/** \class NestByValue + * \ingroup Core_Module + * + * \brief Expression which must be nested by value + * + * \tparam ExpressionType the type of the object of which we are requiring nesting-by-value + * + * This class is the return type of MatrixBase::nestByValue() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::nestByValue() + */ +template class NestByValue + : public internal::dense_xpr_base< NestByValue >::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue) + + EIGEN_DEVICE_FUNC explicit inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } + + EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; } + + EIGEN_DEVICE_FUNC const ExpressionType& nestedExpression() const { return m_expression; } + + protected: + const ExpressionType m_expression; +}; + +/** \returns an expression of the temporary version of *this. + */ +template +EIGEN_DEVICE_FUNC inline const NestByValue +DenseBase::nestByValue() const +{ + return NestByValue(derived()); +} + +namespace internal { + +// Evaluator of Solve -> eval into a temporary +template +struct evaluator > + : public evaluator +{ + typedef evaluator Base; + + EIGEN_DEVICE_FUNC explicit evaluator(const NestByValue& xpr) + : Base(xpr.nestedExpression()) + {} +}; +} + +} // end namespace Eigen + +#endif // EIGEN_NESTBYVALUE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/NoAlias.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/NoAlias.h new file mode 100644 index 000000000..570283d90 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/NoAlias.h @@ -0,0 +1,109 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NOALIAS_H +#define EIGEN_NOALIAS_H + +namespace Eigen { + +/** \class NoAlias + * \ingroup Core_Module + * + * \brief Pseudo expression providing an operator = assuming no aliasing + * + * \tparam ExpressionType the type of the object on which to do the lazy assignment + * + * This class represents an expression with special assignment operators + * assuming no aliasing between the target expression and the source expression. + * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression. + * It is the return type of MatrixBase::noalias() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::noalias() + */ +template class StorageBase> +class NoAlias +{ + public: + typedef typename ExpressionType::Scalar Scalar; + + EIGEN_DEVICE_FUNC + explicit NoAlias(ExpressionType& expression) : m_expression(expression) {} + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase& other) + { + call_assignment_no_alias(m_expression, other.derived(), internal::assign_op()); + return m_expression; + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase& other) + { + call_assignment_no_alias(m_expression, other.derived(), internal::add_assign_op()); + return m_expression; + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase& other) + { + call_assignment_no_alias(m_expression, other.derived(), internal::sub_assign_op()); + return m_expression; + } + + EIGEN_DEVICE_FUNC + ExpressionType& expression() const + { + return m_expression; + } + + protected: + ExpressionType& m_expression; +}; + +/** \returns a pseudo expression of \c *this with an operator= assuming + * no aliasing between \c *this and the source expression. + * + * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag. + * Currently, even though several expressions may alias, only product + * expressions have this flag. Therefore, noalias() is only useful when + * the source expression contains a matrix product. + * + * Here are some examples where noalias is useful: + * \code + * D.noalias() = A * B; + * D.noalias() += A.transpose() * B; + * D.noalias() -= 2 * A * B.adjoint(); + * \endcode + * + * On the other hand the following example will lead to a \b wrong result: + * \code + * A.noalias() = A * B; + * \endcode + * because the result matrix A is also an operand of the matrix product. Therefore, + * there is no alternative than evaluating A * B in a temporary, that is the default + * behavior when you write: + * \code + * A = A * B; + * \endcode + * + * \sa class NoAlias + */ +template +NoAlias EIGEN_DEVICE_FUNC MatrixBase::noalias() +{ + return NoAlias(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_NOALIAS_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/NumTraits.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/NumTraits.h new file mode 100644 index 000000000..72eac5a93 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/NumTraits.h @@ -0,0 +1,335 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NUMTRAITS_H +#define EIGEN_NUMTRAITS_H + +namespace Eigen { + +namespace internal { + +// default implementation of digits10(), based on numeric_limits if specialized, +// 0 for integer types, and log10(epsilon()) otherwise. +template< typename T, + bool use_numeric_limits = std::numeric_limits::is_specialized, + bool is_integer = NumTraits::IsInteger> +struct default_digits10_impl +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static int run() { return std::numeric_limits::digits10; } +}; + +template +struct default_digits10_impl // Floating point +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static int run() { + using std::log10; + using std::ceil; + typedef typename NumTraits::Real Real; + return int(ceil(-log10(NumTraits::epsilon()))); + } +}; + +template +struct default_digits10_impl // Integer +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static int run() { return 0; } +}; + + +// default implementation of digits(), based on numeric_limits if specialized, +// 0 for integer types, and log2(epsilon()) otherwise. +template< typename T, + bool use_numeric_limits = std::numeric_limits::is_specialized, + bool is_integer = NumTraits::IsInteger> +struct default_digits_impl +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static int run() { return std::numeric_limits::digits; } +}; + +template +struct default_digits_impl // Floating point +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static int run() { + using std::log; + using std::ceil; + typedef typename NumTraits::Real Real; + return int(ceil(-log(NumTraits::epsilon())/log(static_cast(2)))); + } +}; + +template +struct default_digits_impl // Integer +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static int run() { return 0; } +}; + +} // end namespace internal + +namespace numext { +/** \internal bit-wise cast without changing the underlying bit representation. */ + +// TODO: Replace by std::bit_cast (available in C++20) +template +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Tgt bit_cast(const Src& src) { +#if EIGEN_HAS_TYPE_TRAITS + // The behaviour of memcpy is not specified for non-trivially copyable types + EIGEN_STATIC_ASSERT(std::is_trivially_copyable::value, THIS_TYPE_IS_NOT_SUPPORTED); + EIGEN_STATIC_ASSERT(std::is_trivially_copyable::value && std::is_default_constructible::value, + THIS_TYPE_IS_NOT_SUPPORTED); +#endif + + EIGEN_STATIC_ASSERT(sizeof(Src) == sizeof(Tgt), THIS_TYPE_IS_NOT_SUPPORTED); + Tgt tgt; + EIGEN_USING_STD(memcpy) + memcpy(&tgt, &src, sizeof(Tgt)); + return tgt; +} +} // namespace numext + +/** \class NumTraits + * \ingroup Core_Module + * + * \brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen. + * + * \tparam T the numeric type at hand + * + * This class stores enums, typedefs and static methods giving information about a numeric type. + * + * The provided data consists of: + * \li A typedef \c Real, giving the "real part" type of \a T. If \a T is already real, + * then \c Real is just a typedef to \a T. If \a T is \c std::complex then \c Real + * is a typedef to \a U. + * \li A typedef \c NonInteger, giving the type that should be used for operations producing non-integral values, + * such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives + * \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to + * take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is + * only intended as a helper for code that needs to explicitly promote types. + * \li A typedef \c Literal giving the type to use for numeric literals such as "2" or "0.5". For instance, for \c std::complex, Literal is defined as \c U. + * Of course, this type must be fully compatible with \a T. In doubt, just use \a T here. + * \li A typedef \a Nested giving the type to use to nest a value inside of the expression tree. If you don't know what + * this means, just use \a T here. + * \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c std::complex + * type, and to 0 otherwise. + * \li An enum value \a IsInteger. It is equal to \c 1 if \a T is an integer type such as \c int, + * and to \c 0 otherwise. + * \li Enum values ReadCost, AddCost and MulCost representing a rough estimate of the number of CPU cycles needed + * to by move / add / mul instructions respectively, assuming the data is already stored in CPU registers. + * Stay vague here. No need to do architecture-specific stuff. If you don't know what this means, just use \c Eigen::HugeCost. + * \li An enum value \a IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T is unsigned. + * \li An enum value \a RequireInitialization. It is equal to \c 1 if the constructor of the numeric type \a T must + * be called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 otherwise. + * \li An epsilon() function which, unlike std::numeric_limits::epsilon(), + * it returns a \a Real instead of a \a T. + * \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default + * value by the fuzzy comparison operators. + * \li highest() and lowest() functions returning the highest and lowest possible values respectively. + * \li digits() function returning the number of radix digits (non-sign digits for integers, mantissa for floating-point). This is + * the analogue of std::numeric_limits::digits + * which is used as the default implementation if specialized. + * \li digits10() function returning the number of decimal digits that can be represented without change. This is + * the analogue of std::numeric_limits::digits10 + * which is used as the default implementation if specialized. + * \li min_exponent() and max_exponent() functions returning the highest and lowest possible values, respectively, + * such that the radix raised to the power exponent-1 is a normalized floating-point number. These are equivalent to + * std::numeric_limits::min_exponent/ + * std::numeric_limits::max_exponent. + * \li infinity() function returning a representation of positive infinity, if available. + * \li quiet_NaN function returning a non-signaling "not-a-number", if available. + */ + +template struct GenericNumTraits +{ + enum { + IsInteger = std::numeric_limits::is_integer, + IsSigned = std::numeric_limits::is_signed, + IsComplex = 0, + RequireInitialization = internal::is_arithmetic::value ? 0 : 1, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; + + typedef T Real; + typedef typename internal::conditional< + IsInteger, + typename internal::conditional::type, + T + >::type NonInteger; + typedef T Nested; + typedef T Literal; + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline Real epsilon() + { + return numext::numeric_limits::epsilon(); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline int digits10() + { + return internal::default_digits10_impl::run(); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline int digits() + { + return internal::default_digits_impl::run(); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline int min_exponent() + { + return numext::numeric_limits::min_exponent; + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline int max_exponent() + { + return numext::numeric_limits::max_exponent; + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline Real dummy_precision() + { + // make sure to override this for floating-point types + return Real(0); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline T highest() { + return (numext::numeric_limits::max)(); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline T lowest() { + return IsInteger ? (numext::numeric_limits::min)() + : static_cast(-(numext::numeric_limits::max)()); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline T infinity() { + return numext::numeric_limits::infinity(); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline T quiet_NaN() { + return numext::numeric_limits::quiet_NaN(); + } +}; + +template struct NumTraits : GenericNumTraits +{}; + +template<> struct NumTraits + : GenericNumTraits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline float dummy_precision() { return 1e-5f; } +}; + +template<> struct NumTraits : GenericNumTraits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline double dummy_precision() { return 1e-12; } +}; + +template<> struct NumTraits + : GenericNumTraits +{ + EIGEN_CONSTEXPR + static inline long double dummy_precision() { return 1e-15l; } +}; + +template struct NumTraits > + : GenericNumTraits > +{ + typedef _Real Real; + typedef typename NumTraits<_Real>::Literal Literal; + enum { + IsComplex = 1, + RequireInitialization = NumTraits<_Real>::RequireInitialization, + ReadCost = 2 * NumTraits<_Real>::ReadCost, + AddCost = 2 * NumTraits::AddCost, + MulCost = 4 * NumTraits::MulCost + 2 * NumTraits::AddCost + }; + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline Real epsilon() { return NumTraits::epsilon(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline Real dummy_precision() { return NumTraits::dummy_precision(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline int digits10() { return NumTraits::digits10(); } +}; + +template +struct NumTraits > +{ + typedef Array ArrayType; + typedef typename NumTraits::Real RealScalar; + typedef Array Real; + typedef typename NumTraits::NonInteger NonIntegerScalar; + typedef Array NonInteger; + typedef ArrayType & Nested; + typedef typename NumTraits::Literal Literal; + + enum { + IsComplex = NumTraits::IsComplex, + IsInteger = NumTraits::IsInteger, + IsSigned = NumTraits::IsSigned, + RequireInitialization = 1, + ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * int(NumTraits::ReadCost), + AddCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * int(NumTraits::AddCost), + MulCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * int(NumTraits::MulCost) + }; + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline RealScalar epsilon() { return NumTraits::epsilon(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static inline RealScalar dummy_precision() { return NumTraits::dummy_precision(); } + + EIGEN_CONSTEXPR + static inline int digits10() { return NumTraits::digits10(); } +}; + +template<> struct NumTraits + : GenericNumTraits +{ + enum { + RequireInitialization = 1, + ReadCost = HugeCost, + AddCost = HugeCost, + MulCost = HugeCost + }; + + EIGEN_CONSTEXPR + static inline int digits10() { return 0; } + +private: + static inline std::string epsilon(); + static inline std::string dummy_precision(); + static inline std::string lowest(); + static inline std::string highest(); + static inline std::string infinity(); + static inline std::string quiet_NaN(); +}; + +// Empty specialization for void to allow template specialization based on NumTraits::Real with T==void and SFINAE. +template<> struct NumTraits {}; + +template<> struct NumTraits : GenericNumTraits {}; + +} // end namespace Eigen + +#endif // EIGEN_NUMTRAITS_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/PartialReduxEvaluator.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/PartialReduxEvaluator.h new file mode 100644 index 000000000..29abf35b9 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/PartialReduxEvaluator.h @@ -0,0 +1,232 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2018 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PARTIALREDUX_H +#define EIGEN_PARTIALREDUX_H + +namespace Eigen { + +namespace internal { + + +/*************************************************************************** +* +* This file provides evaluators for partial reductions. +* There are two modes: +* +* - scalar path: simply calls the respective function on the column or row. +* -> nothing special here, all the tricky part is handled by the return +* types of VectorwiseOp's members. They embed the functor calling the +* respective DenseBase's member function. +* +* - vectorized path: implements a packet-wise reductions followed by +* some (optional) processing of the outcome, e.g., division by n for mean. +* +* For the vectorized path let's observe that the packet-size and outer-unrolling +* are both decided by the assignement logic. So all we have to do is to decide +* on the inner unrolling. +* +* For the unrolling, we can reuse "internal::redux_vec_unroller" from Redux.h, +* but be need to be careful to specify correct increment. +* +***************************************************************************/ + + +/* logic deciding a strategy for unrolling of vectorized paths */ +template +struct packetwise_redux_traits +{ + enum { + OuterSize = int(Evaluator::IsRowMajor) ? Evaluator::RowsAtCompileTime : Evaluator::ColsAtCompileTime, + Cost = OuterSize == Dynamic ? HugeCost + : OuterSize * Evaluator::CoeffReadCost + (OuterSize-1) * functor_traits::Cost, + Unrolling = Cost <= EIGEN_UNROLLING_LIMIT ? CompleteUnrolling : NoUnrolling + }; + +}; + +/* Value to be returned when size==0 , by default let's return 0 */ +template +EIGEN_DEVICE_FUNC +PacketType packetwise_redux_empty_value(const Func& ) { return pset1(0); } + +/* For products the default is 1 */ +template +EIGEN_DEVICE_FUNC +PacketType packetwise_redux_empty_value(const scalar_product_op& ) { return pset1(1); } + +/* Perform the actual reduction */ +template::Unrolling +> +struct packetwise_redux_impl; + +/* Perform the actual reduction with unrolling */ +template +struct packetwise_redux_impl +{ + typedef redux_novec_unroller Base; + typedef typename Evaluator::Scalar Scalar; + + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE + PacketType run(const Evaluator &eval, const Func& func, Index /*size*/) + { + return redux_vec_unroller::OuterSize>::template run(eval,func); + } +}; + +/* Add a specialization of redux_vec_unroller for size==0 at compiletime. + * This specialization is not required for general reductions, which is + * why it is defined here. + */ +template +struct redux_vec_unroller +{ + template + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE PacketType run(const Evaluator &, const Func& f) + { + return packetwise_redux_empty_value(f); + } +}; + +/* Perform the actual reduction for dynamic sizes */ +template +struct packetwise_redux_impl +{ + typedef typename Evaluator::Scalar Scalar; + typedef typename redux_traits::PacketType PacketScalar; + + template + EIGEN_DEVICE_FUNC + static PacketType run(const Evaluator &eval, const Func& func, Index size) + { + if(size==0) + return packetwise_redux_empty_value(func); + + const Index size4 = (size-1)&(~3); + PacketType p = eval.template packetByOuterInner(0,0); + Index i = 1; + // This loop is optimized for instruction pipelining: + // - each iteration generates two independent instructions + // - thanks to branch prediction and out-of-order execution we have independent instructions across loops + for(; i(i+0,0),eval.template packetByOuterInner(i+1,0)), + func.packetOp(eval.template packetByOuterInner(i+2,0),eval.template packetByOuterInner(i+3,0)))); + for(; i(i,0)); + return p; + } +}; + +template< typename ArgType, typename MemberOp, int Direction> +struct evaluator > + : evaluator_base > +{ + typedef PartialReduxExpr XprType; + typedef typename internal::nested_eval::type ArgTypeNested; + typedef typename internal::add_const_on_value_type::type ConstArgTypeNested; + typedef typename internal::remove_all::type ArgTypeNestedCleaned; + typedef typename ArgType::Scalar InputScalar; + typedef typename XprType::Scalar Scalar; + enum { + TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime) + }; + typedef typename MemberOp::template Cost CostOpType; + enum { + CoeffReadCost = TraversalSize==Dynamic ? HugeCost + : TraversalSize==0 ? 1 + : int(TraversalSize) * int(evaluator::CoeffReadCost) + int(CostOpType::value), + + _ArgFlags = evaluator::Flags, + + _Vectorizable = bool(int(_ArgFlags)&PacketAccessBit) + && bool(MemberOp::Vectorizable) + && (Direction==int(Vertical) ? bool(_ArgFlags&RowMajorBit) : (_ArgFlags&RowMajorBit)==0) + && (TraversalSize!=0), + + Flags = (traits::Flags&RowMajorBit) + | (evaluator::Flags&(HereditaryBits&(~RowMajorBit))) + | (_Vectorizable ? PacketAccessBit : 0) + | LinearAccessBit, + + Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized + }; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr) + : m_arg(xpr.nestedExpression()), m_functor(xpr.functor()) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : (TraversalSize==0 ? 1 : int(CostOpType::value))); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Scalar coeff(Index i, Index j) const + { + return coeff(Direction==Vertical ? j : i); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Scalar coeff(Index index) const + { + return m_functor(m_arg.template subVector(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketType packet(Index i, Index j) const + { + return packet(Direction==Vertical ? j : i); + } + + template + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC + PacketType packet(Index idx) const + { + enum { PacketSize = internal::unpacket_traits::size }; + typedef Block PanelType; + + PanelType panel(m_arg, + Direction==Vertical ? 0 : idx, + Direction==Vertical ? idx : 0, + Direction==Vertical ? m_arg.rows() : Index(PacketSize), + Direction==Vertical ? Index(PacketSize) : m_arg.cols()); + + // FIXME + // See bug 1612, currently if PacketSize==1 (i.e. complex with 128bits registers) then the storage-order of panel get reversed + // and methods like packetByOuterInner do not make sense anymore in this context. + // So let's just by pass "vectorization" in this case: + if(PacketSize==1) + return internal::pset1(coeff(idx)); + + typedef typename internal::redux_evaluator PanelEvaluator; + PanelEvaluator panel_eval(panel); + typedef typename MemberOp::BinaryOp BinaryOp; + PacketType p = internal::packetwise_redux_impl::template run(panel_eval,m_functor.binaryFunc(),m_arg.outerSize()); + return p; + } + +protected: + ConstArgTypeNested m_arg; + const MemberOp m_functor; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PARTIALREDUX_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/PermutationMatrix.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/PermutationMatrix.h new file mode 100644 index 000000000..69401bf41 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/PermutationMatrix.h @@ -0,0 +1,605 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2009-2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PERMUTATIONMATRIX_H +#define EIGEN_PERMUTATIONMATRIX_H + +namespace Eigen { + +namespace internal { + +enum PermPermProduct_t {PermPermProduct}; + +} // end namespace internal + +/** \class PermutationBase + * \ingroup Core_Module + * + * \brief Base class for permutations + * + * \tparam Derived the derived class + * + * This class is the base class for all expressions representing a permutation matrix, + * internally stored as a vector of integers. + * The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix + * \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have: + * \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f] + * This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have: + * \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f] + * + * Permutation matrices are square and invertible. + * + * Notice that in addition to the member functions and operators listed here, there also are non-member + * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase) + * on either side. + * + * \sa class PermutationMatrix, class PermutationWrapper + */ +template +class PermutationBase : public EigenBase +{ + typedef internal::traits Traits; + typedef EigenBase Base; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + enum { + Flags = Traits::Flags, + RowsAtCompileTime = Traits::RowsAtCompileTime, + ColsAtCompileTime = Traits::ColsAtCompileTime, + MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = Traits::MaxColsAtCompileTime + }; + typedef typename Traits::StorageIndex StorageIndex; + typedef Matrix + DenseMatrixType; + typedef PermutationMatrix + PlainPermutationType; + typedef PlainPermutationType PlainObject; + using Base::derived; + typedef Inverse InverseReturnType; + typedef void Scalar; + #endif + + /** Copies the other permutation into *this */ + template + Derived& operator=(const PermutationBase& other) + { + indices() = other.indices(); + return derived(); + } + + /** Assignment from the Transpositions \a tr */ + template + Derived& operator=(const TranspositionsBase& tr) + { + setIdentity(tr.size()); + for(Index k=size()-1; k>=0; --k) + applyTranspositionOnTheRight(k,tr.coeff(k)); + return derived(); + } + + /** \returns the number of rows */ + inline EIGEN_DEVICE_FUNC Index rows() const { return Index(indices().size()); } + + /** \returns the number of columns */ + inline EIGEN_DEVICE_FUNC Index cols() const { return Index(indices().size()); } + + /** \returns the size of a side of the respective square matrix, i.e., the number of indices */ + inline EIGEN_DEVICE_FUNC Index size() const { return Index(indices().size()); } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + void evalTo(MatrixBase& other) const + { + other.setZero(); + for (Index i=0; i=0 && j>=0 && i=0 && j>=0 && i + void assignTranspose(const PermutationBase& other) + { + for (Index i=0; i + void assignProduct(const Lhs& lhs, const Rhs& rhs) + { + eigen_assert(lhs.cols() == rhs.rows()); + for (Index i=0; i + inline PlainPermutationType operator*(const PermutationBase& other) const + { return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); } + + /** \returns the product of a permutation with another inverse permutation. + * + * \note \blank \note_try_to_help_rvo + */ + template + inline PlainPermutationType operator*(const InverseImpl& other) const + { return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); } + + /** \returns the product of an inverse permutation with another permutation. + * + * \note \blank \note_try_to_help_rvo + */ + template friend + inline PlainPermutationType operator*(const InverseImpl& other, const PermutationBase& perm) + { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); } + + /** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation. + * + * This function is O(\c n) procedure allocating a buffer of \c n booleans. + */ + Index determinant() const + { + Index res = 1; + Index n = size(); + Matrix mask(n); + mask.fill(false); + Index r = 0; + while(r < n) + { + // search for the next seed + while(r=n) + break; + // we got one, let's follow it until we are back to the seed + Index k0 = r++; + mask.coeffRef(k0) = true; + for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k)) + { + mask.coeffRef(k) = true; + res = -res; + } + } + return res; + } + + protected: + +}; + +namespace internal { +template +struct traits > + : traits > +{ + typedef PermutationStorage StorageKind; + typedef Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType; + typedef _StorageIndex StorageIndex; + typedef void Scalar; +}; +} + +/** \class PermutationMatrix + * \ingroup Core_Module + * + * \brief Permutation matrix + * + * \tparam SizeAtCompileTime the number of rows/cols, or Dynamic + * \tparam MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it. + * \tparam _StorageIndex the integer type of the indices + * + * This class represents a permutation matrix, internally stored as a vector of integers. + * + * \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix + */ +template +class PermutationMatrix : public PermutationBase > +{ + typedef PermutationBase Base; + typedef internal::traits Traits; + public: + + typedef const PermutationMatrix& Nested; + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + typedef typename Traits::StorageIndex StorageIndex; + #endif + + inline PermutationMatrix() + {} + + /** Constructs an uninitialized permutation matrix of given size. + */ + explicit inline PermutationMatrix(Index size) : m_indices(size) + { + eigen_internal_assert(size <= NumTraits::highest()); + } + + /** Copy constructor. */ + template + inline PermutationMatrix(const PermutationBase& other) + : m_indices(other.indices()) {} + + /** Generic constructor from expression of the indices. The indices + * array has the meaning that the permutations sends each integer i to indices[i]. + * + * \warning It is your responsibility to check that the indices array that you passes actually + * describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the + * array's size. + */ + template + explicit inline PermutationMatrix(const MatrixBase& indices) : m_indices(indices) + {} + + /** Convert the Transpositions \a tr to a permutation matrix */ + template + explicit PermutationMatrix(const TranspositionsBase& tr) + : m_indices(tr.size()) + { + *this = tr; + } + + /** Copies the other permutation into *this */ + template + PermutationMatrix& operator=(const PermutationBase& other) + { + m_indices = other.indices(); + return *this; + } + + /** Assignment from the Transpositions \a tr */ + template + PermutationMatrix& operator=(const TranspositionsBase& tr) + { + return Base::operator=(tr.derived()); + } + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + /** \returns a reference to the stored array representing the permutation. */ + IndicesType& indices() { return m_indices; } + + + /**** multiplication helpers to hopefully get RVO ****/ + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + PermutationMatrix(const InverseImpl& other) + : m_indices(other.derived().nestedExpression().size()) + { + eigen_internal_assert(m_indices.size() <= NumTraits::highest()); + StorageIndex end = StorageIndex(m_indices.size()); + for (StorageIndex i=0; i + PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs) + : m_indices(lhs.indices().size()) + { + Base::assignProduct(lhs,rhs); + } +#endif + + protected: + + IndicesType m_indices; +}; + + +namespace internal { +template +struct traits,_PacketAccess> > + : traits > +{ + typedef PermutationStorage StorageKind; + typedef Map, _PacketAccess> IndicesType; + typedef _StorageIndex StorageIndex; + typedef void Scalar; +}; +} + +template +class Map,_PacketAccess> + : public PermutationBase,_PacketAccess> > +{ + typedef PermutationBase Base; + typedef internal::traits Traits; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar StorageIndex; + #endif + + inline Map(const StorageIndex* indicesPtr) + : m_indices(indicesPtr) + {} + + inline Map(const StorageIndex* indicesPtr, Index size) + : m_indices(indicesPtr,size) + {} + + /** Copies the other permutation into *this */ + template + Map& operator=(const PermutationBase& other) + { return Base::operator=(other.derived()); } + + /** Assignment from the Transpositions \a tr */ + template + Map& operator=(const TranspositionsBase& tr) + { return Base::operator=(tr.derived()); } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + Map& operator=(const Map& other) + { + m_indices = other.m_indices; + return *this; + } + #endif + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + /** \returns a reference to the stored array representing the permutation. */ + IndicesType& indices() { return m_indices; } + + protected: + + IndicesType m_indices; +}; + +template class TranspositionsWrapper; +namespace internal { +template +struct traits > +{ + typedef PermutationStorage StorageKind; + typedef void Scalar; + typedef typename _IndicesType::Scalar StorageIndex; + typedef _IndicesType IndicesType; + enum { + RowsAtCompileTime = _IndicesType::SizeAtCompileTime, + ColsAtCompileTime = _IndicesType::SizeAtCompileTime, + MaxRowsAtCompileTime = IndicesType::MaxSizeAtCompileTime, + MaxColsAtCompileTime = IndicesType::MaxSizeAtCompileTime, + Flags = 0 + }; +}; +} + +/** \class PermutationWrapper + * \ingroup Core_Module + * + * \brief Class to view a vector of integers as a permutation matrix + * + * \tparam _IndicesType the type of the vector of integer (can be any compatible expression) + * + * This class allows to view any vector expression of integers as a permutation matrix. + * + * \sa class PermutationBase, class PermutationMatrix + */ +template +class PermutationWrapper : public PermutationBase > +{ + typedef PermutationBase Base; + typedef internal::traits Traits; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + #endif + + inline PermutationWrapper(const IndicesType& indices) + : m_indices(indices) + {} + + /** const version of indices(). */ + const typename internal::remove_all::type& + indices() const { return m_indices; } + + protected: + + typename IndicesType::Nested m_indices; +}; + + +/** \returns the matrix with the permutation applied to the columns. + */ +template +EIGEN_DEVICE_FUNC +const Product +operator*(const MatrixBase &matrix, + const PermutationBase& permutation) +{ + return Product + (matrix.derived(), permutation.derived()); +} + +/** \returns the matrix with the permutation applied to the rows. + */ +template +EIGEN_DEVICE_FUNC +const Product +operator*(const PermutationBase &permutation, + const MatrixBase& matrix) +{ + return Product + (permutation.derived(), matrix.derived()); +} + + +template +class InverseImpl + : public EigenBase > +{ + typedef typename PermutationType::PlainPermutationType PlainPermutationType; + typedef internal::traits PermTraits; + protected: + InverseImpl() {} + public: + typedef Inverse InverseType; + using EigenBase >::derived; + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename PermutationType::DenseMatrixType DenseMatrixType; + enum { + RowsAtCompileTime = PermTraits::RowsAtCompileTime, + ColsAtCompileTime = PermTraits::ColsAtCompileTime, + MaxRowsAtCompileTime = PermTraits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = PermTraits::MaxColsAtCompileTime + }; + #endif + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + void evalTo(MatrixBase& other) const + { + other.setZero(); + for (Index i=0; i friend + const Product + operator*(const MatrixBase& matrix, const InverseType& trPerm) + { + return Product(matrix.derived(), trPerm.derived()); + } + + /** \returns the matrix with the inverse permutation applied to the rows. + */ + template + const Product + operator*(const MatrixBase& matrix) const + { + return Product(derived(), matrix.derived()); + } +}; + +template +const PermutationWrapper MatrixBase::asPermutation() const +{ + return derived(); +} + +namespace internal { + +template<> struct AssignmentKind { typedef EigenBase2EigenBase Kind; }; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PERMUTATIONMATRIX_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/PlainObjectBase.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/PlainObjectBase.h new file mode 100644 index 000000000..e2ddbd1d5 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/PlainObjectBase.h @@ -0,0 +1,1128 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DENSESTORAGEBASE_H +#define EIGEN_DENSESTORAGEBASE_H + +#if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO) +# define EIGEN_INITIALIZE_COEFFS +# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(Index i=0;i::quiet_NaN(); +#else +# undef EIGEN_INITIALIZE_COEFFS +# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED +#endif + +namespace Eigen { + +namespace internal { + +template struct check_rows_cols_for_overflow { + template + EIGEN_DEVICE_FUNC + static EIGEN_ALWAYS_INLINE void run(Index, Index) + { + } +}; + +template<> struct check_rows_cols_for_overflow { + template + EIGEN_DEVICE_FUNC + static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols) + { + // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242 + // we assume Index is signed + Index max_index = (std::size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed + bool error = (rows == 0 || cols == 0) ? false + : (rows > max_index / cols); + if (error) + throw_std_bad_alloc(); + } +}; + +template +struct conservative_resize_like_impl; + +template struct matrix_swap_impl; + +} // end namespace internal + +#ifdef EIGEN_PARSED_BY_DOXYGEN +namespace doxygen { + +// This is a workaround to doxygen not being able to understand the inheritance logic +// when it is hidden by the dense_xpr_base helper struct. +// Moreover, doxygen fails to include members that are not documented in the declaration body of +// MatrixBase if we inherits MatrixBase >, +// this is why we simply inherits MatrixBase, though this does not make sense. + +/** This class is just a workaround for Doxygen and it does not not actually exist. */ +template struct dense_xpr_base_dispatcher; +/** This class is just a workaround for Doxygen and it does not not actually exist. */ +template +struct dense_xpr_base_dispatcher > + : public MatrixBase {}; +/** This class is just a workaround for Doxygen and it does not not actually exist. */ +template +struct dense_xpr_base_dispatcher > + : public ArrayBase {}; + +} // namespace doxygen + +/** \class PlainObjectBase + * \ingroup Core_Module + * \brief %Dense storage base class for matrices and arrays. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN. + * + * \tparam Derived is the derived type, e.g., a Matrix or Array + * + * \sa \ref TopicClassHierarchy + */ +template +class PlainObjectBase : public doxygen::dense_xpr_base_dispatcher +#else +template +class PlainObjectBase : public internal::dense_xpr_base::type +#endif +{ + public: + enum { Options = internal::traits::Options }; + typedef typename internal::dense_xpr_base::type Base; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Scalar Scalar; + + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + typedef Derived DenseType; + + using Base::RowsAtCompileTime; + using Base::ColsAtCompileTime; + using Base::SizeAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::IsVectorAtCompileTime; + using Base::Flags; + + typedef Eigen::Map MapType; + typedef const Eigen::Map ConstMapType; + typedef Eigen::Map AlignedMapType; + typedef const Eigen::Map ConstAlignedMapType; + template struct StridedMapType { typedef Eigen::Map type; }; + template struct StridedConstMapType { typedef Eigen::Map type; }; + template struct StridedAlignedMapType { typedef Eigen::Map type; }; + template struct StridedConstAlignedMapType { typedef Eigen::Map type; }; + + protected: + DenseStorage m_storage; + + public: + enum { NeedsToAlign = (SizeAtCompileTime != Dynamic) && (internal::traits::Alignment>0) }; + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) + + EIGEN_DEVICE_FUNC + Base& base() { return *static_cast(this); } + EIGEN_DEVICE_FUNC + const Base& base() const { return *static_cast(this); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index rows() const EIGEN_NOEXCEPT { return m_storage.rows(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index cols() const EIGEN_NOEXCEPT { return m_storage.cols(); } + + /** This is an overloaded version of DenseCoeffsBase::coeff(Index,Index) const + * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. + * + * See DenseCoeffsBase::coeff(Index) const for details. */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const + { + if(Flags & RowMajorBit) + return m_storage.data()[colId + rowId * m_storage.cols()]; + else // column-major + return m_storage.data()[rowId + colId * m_storage.rows()]; + } + + /** This is an overloaded version of DenseCoeffsBase::coeff(Index) const + * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. + * + * See DenseCoeffsBase::coeff(Index) const for details. */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const + { + return m_storage.data()[index]; + } + + /** This is an overloaded version of DenseCoeffsBase::coeffRef(Index,Index) const + * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. + * + * See DenseCoeffsBase::coeffRef(Index,Index) const for details. */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId) + { + if(Flags & RowMajorBit) + return m_storage.data()[colId + rowId * m_storage.cols()]; + else // column-major + return m_storage.data()[rowId + colId * m_storage.rows()]; + } + + /** This is an overloaded version of DenseCoeffsBase::coeffRef(Index) const + * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. + * + * See DenseCoeffsBase::coeffRef(Index) const for details. */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + return m_storage.data()[index]; + } + + /** This is the const version of coeffRef(Index,Index) which is thus synonym of coeff(Index,Index). + * It is provided for convenience. */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const + { + if(Flags & RowMajorBit) + return m_storage.data()[colId + rowId * m_storage.cols()]; + else // column-major + return m_storage.data()[rowId + colId * m_storage.rows()]; + } + + /** This is the const version of coeffRef(Index) which is thus synonym of coeff(Index). + * It is provided for convenience. */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const + { + return m_storage.data()[index]; + } + + /** \internal */ + template + EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const + { + return internal::ploadt + (m_storage.data() + (Flags & RowMajorBit + ? colId + rowId * m_storage.cols() + : rowId + colId * m_storage.rows())); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE PacketScalar packet(Index index) const + { + return internal::ploadt(m_storage.data() + index); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val) + { + internal::pstoret + (m_storage.data() + (Flags & RowMajorBit + ? colId + rowId * m_storage.cols() + : rowId + colId * m_storage.rows()), val); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val) + { + internal::pstoret(m_storage.data() + index, val); + } + + /** \returns a const pointer to the data array of this matrix */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const + { return m_storage.data(); } + + /** \returns a pointer to the data array of this matrix */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() + { return m_storage.data(); } + + /** Resizes \c *this to a \a rows x \a cols matrix. + * + * This method is intended for dynamic-size matrices, although it is legal to call it on any + * matrix as long as fixed dimensions are left unchanged. If you only want to change the number + * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t). + * + * If the current number of coefficients of \c *this exactly matches the + * product \a rows * \a cols, then no memory allocation is performed and + * the current values are left unchanged. In all other cases, including + * shrinking, the data is reallocated and all previous values are lost. + * + * Example: \include Matrix_resize_int_int.cpp + * Output: \verbinclude Matrix_resize_int_int.out + * + * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t) + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void resize(Index rows, Index cols) + { + eigen_assert( EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,rows==RowsAtCompileTime) + && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,cols==ColsAtCompileTime) + && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,rows<=MaxRowsAtCompileTime) + && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,cols<=MaxColsAtCompileTime) + && rows>=0 && cols>=0 && "Invalid sizes when resizing a matrix or array."); + internal::check_rows_cols_for_overflow::run(rows, cols); + #ifdef EIGEN_INITIALIZE_COEFFS + Index size = rows*cols; + bool size_changed = size != this->size(); + m_storage.resize(size, rows, cols); + if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + #else + m_storage.resize(rows*cols, rows, cols); + #endif + } + + /** Resizes \c *this to a vector of length \a size + * + * \only_for_vectors. This method does not work for + * partially dynamic matrices when the static dimension is anything other + * than 1. For example it will not work with Matrix. + * + * Example: \include Matrix_resize_int.cpp + * Output: \verbinclude Matrix_resize_int.out + * + * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t) + */ + EIGEN_DEVICE_FUNC + inline void resize(Index size) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase) + eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0); + #ifdef EIGEN_INITIALIZE_COEFFS + bool size_changed = size != this->size(); + #endif + if(RowsAtCompileTime == 1) + m_storage.resize(size, 1, size); + else + m_storage.resize(size, size, 1); + #ifdef EIGEN_INITIALIZE_COEFFS + if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + #endif + } + + /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \c NoChange + * as in the example below. + * + * Example: \include Matrix_resize_NoChange_int.cpp + * Output: \verbinclude Matrix_resize_NoChange_int.out + * + * \sa resize(Index,Index) + */ + EIGEN_DEVICE_FUNC + inline void resize(NoChange_t, Index cols) + { + resize(rows(), cols); + } + + /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange + * as in the example below. + * + * Example: \include Matrix_resize_int_NoChange.cpp + * Output: \verbinclude Matrix_resize_int_NoChange.out + * + * \sa resize(Index,Index) + */ + EIGEN_DEVICE_FUNC + inline void resize(Index rows, NoChange_t) + { + resize(rows, cols()); + } + + /** Resizes \c *this to have the same dimensions as \a other. + * Takes care of doing all the checking that's needed. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void resizeLike(const EigenBase& _other) + { + const OtherDerived& other = _other.derived(); + internal::check_rows_cols_for_overflow::run(other.rows(), other.cols()); + const Index othersize = other.rows()*other.cols(); + if(RowsAtCompileTime == 1) + { + eigen_assert(other.rows() == 1 || other.cols() == 1); + resize(1, othersize); + } + else if(ColsAtCompileTime == 1) + { + eigen_assert(other.rows() == 1 || other.cols() == 1); + resize(othersize, 1); + } + else resize(other.rows(), other.cols()); + } + + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * The method is intended for matrices of dynamic size. If you only want to change the number + * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or + * conservativeResize(Index, NoChange_t). + * + * Matrices are resized relative to the top-left element. In case values need to be + * appended to the matrix they will be uninitialized. + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols) + { + internal::conservative_resize_like_impl::run(*this, rows, cols); + } + + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * As opposed to conservativeResize(Index rows, Index cols), this version leaves + * the number of columns unchanged. + * + * In case the matrix is growing, new rows will be uninitialized. + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t) + { + // Note: see the comment in conservativeResize(Index,Index) + conservativeResize(rows, cols()); + } + + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * As opposed to conservativeResize(Index rows, Index cols), this version leaves + * the number of rows unchanged. + * + * In case the matrix is growing, new columns will be uninitialized. + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols) + { + // Note: see the comment in conservativeResize(Index,Index) + conservativeResize(rows(), cols); + } + + /** Resizes the vector to \a size while retaining old values. + * + * \only_for_vectors. This method does not work for + * partially dynamic matrices when the static dimension is anything other + * than 1. For example it will not work with Matrix. + * + * When values are appended, they will be uninitialized. + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void conservativeResize(Index size) + { + internal::conservative_resize_like_impl::run(*this, size); + } + + /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched. + * + * The method is intended for matrices of dynamic size. If you only want to change the number + * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or + * conservativeResize(Index, NoChange_t). + * + * Matrices are resized relative to the top-left element. In case values need to be + * appended to the matrix they will copied from \c other. + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase& other) + { + internal::conservative_resize_like_impl::run(*this, other); + } + + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other) + { + return _set(other); + } + + /** \sa MatrixBase::lazyAssign() */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase& other) + { + _resize_to_match(other); + return Base::lazyAssign(other.derived()); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue& func) + { + resize(func.rows(), func.cols()); + return Base::operator=(func); + } + + // Prevent user from trying to instantiate PlainObjectBase objects + // by making all its constructor protected. See bug 1074. + protected: + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PlainObjectBase() : m_storage() + { +// _check_template_params(); +// EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + // FIXME is it still needed ? + /** \internal */ + EIGEN_DEVICE_FUNC + explicit PlainObjectBase(internal::constructor_without_unaligned_array_assert) + : m_storage(internal::constructor_without_unaligned_array_assert()) + { +// _check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + } +#endif + +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC + PlainObjectBase(PlainObjectBase&& other) EIGEN_NOEXCEPT + : m_storage( std::move(other.m_storage) ) + { + } + + EIGEN_DEVICE_FUNC + PlainObjectBase& operator=(PlainObjectBase&& other) EIGEN_NOEXCEPT + { + _check_template_params(); + m_storage = std::move(other.m_storage); + return *this; + } +#endif + + /** Copy constructor */ + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other) + : Base(), m_storage(other.m_storage) { } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols) + : m_storage(size, rows, cols) + { +// _check_template_params(); +// EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + } + + #if EIGEN_HAS_CXX11 + /** \brief Construct a row of column vector with fixed size from an arbitrary number of coefficients. \cpp11 + * + * \only_for_vectors + * + * This constructor is for 1D array or vectors with more than 4 coefficients. + * There exists C++98 analogue constructors for fixed-size array/vector having 1, 2, 3, or 4 coefficients. + * + * \warning To construct a column (resp. row) vector of fixed length, the number of values passed to this + * constructor must match the the fixed number of rows (resp. columns) of \c *this. + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + : m_storage() + { + _check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, sizeof...(args) + 4); + m_storage.data()[0] = a0; + m_storage.data()[1] = a1; + m_storage.data()[2] = a2; + m_storage.data()[3] = a3; + Index i = 4; + auto x = {(m_storage.data()[i++] = args, 0)...}; + static_cast(x); + } + + /** \brief Constructs a Matrix or Array and initializes it by elements given by an initializer list of initializer + * lists \cpp11 + */ + EIGEN_DEVICE_FUNC + explicit EIGEN_STRONG_INLINE PlainObjectBase(const std::initializer_list>& list) + : m_storage() + { + _check_template_params(); + + size_t list_size = 0; + if (list.begin() != list.end()) { + list_size = list.begin()->size(); + } + + // This is to allow syntax like VectorXi {{1, 2, 3, 4}} + if (ColsAtCompileTime == 1 && list.size() == 1) { + eigen_assert(list_size == static_cast(RowsAtCompileTime) || RowsAtCompileTime == Dynamic); + resize(list_size, ColsAtCompileTime); + std::copy(list.begin()->begin(), list.begin()->end(), m_storage.data()); + } else { + eigen_assert(list.size() == static_cast(RowsAtCompileTime) || RowsAtCompileTime == Dynamic); + eigen_assert(list_size == static_cast(ColsAtCompileTime) || ColsAtCompileTime == Dynamic); + resize(list.size(), list_size); + + Index row_index = 0; + for (const std::initializer_list& row : list) { + eigen_assert(list_size == row.size()); + Index col_index = 0; + for (const Scalar& e : row) { + coeffRef(row_index, col_index) = e; + ++col_index; + } + ++row_index; + } + } + } + #endif // end EIGEN_HAS_CXX11 + + /** \sa PlainObjectBase::operator=(const EigenBase&) */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase &other) + : m_storage() + { + _check_template_params(); + resizeLike(other); + _set_noalias(other); + } + + /** \sa PlainObjectBase::operator=(const EigenBase&) */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase &other) + : m_storage() + { + _check_template_params(); + resizeLike(other); + *this = other.derived(); + } + /** \brief Copy constructor with in-place evaluation */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PlainObjectBase(const ReturnByValue& other) + { + _check_template_params(); + // FIXME this does not automatically transpose vectors if necessary + resize(other.rows(), other.cols()); + other.evalTo(this->derived()); + } + + public: + + /** \brief Copies the generic expression \a other into *this. + * \copydetails DenseBase::operator=(const EigenBase &other) + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& operator=(const EigenBase &other) + { + _resize_to_match(other); + Base::operator=(other.derived()); + return this->derived(); + } + + /** \name Map + * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects, + * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned + * \a data pointers. + * + * Here is an example using strides: + * \include Matrix_Map_stride.cpp + * Output: \verbinclude Matrix_Map_stride.out + * + * \see class Map + */ + //@{ + static inline ConstMapType Map(const Scalar* data) + { return ConstMapType(data); } + static inline MapType Map(Scalar* data) + { return MapType(data); } + static inline ConstMapType Map(const Scalar* data, Index size) + { return ConstMapType(data, size); } + static inline MapType Map(Scalar* data, Index size) + { return MapType(data, size); } + static inline ConstMapType Map(const Scalar* data, Index rows, Index cols) + { return ConstMapType(data, rows, cols); } + static inline MapType Map(Scalar* data, Index rows, Index cols) + { return MapType(data, rows, cols); } + + static inline ConstAlignedMapType MapAligned(const Scalar* data) + { return ConstAlignedMapType(data); } + static inline AlignedMapType MapAligned(Scalar* data) + { return AlignedMapType(data); } + static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size) + { return ConstAlignedMapType(data, size); } + static inline AlignedMapType MapAligned(Scalar* data, Index size) + { return AlignedMapType(data, size); } + static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) + { return ConstAlignedMapType(data, rows, cols); } + static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) + { return AlignedMapType(data, rows, cols); } + + template + static inline typename StridedConstMapType >::type Map(const Scalar* data, const Stride& stride) + { return typename StridedConstMapType >::type(data, stride); } + template + static inline typename StridedMapType >::type Map(Scalar* data, const Stride& stride) + { return typename StridedMapType >::type(data, stride); } + template + static inline typename StridedConstMapType >::type Map(const Scalar* data, Index size, const Stride& stride) + { return typename StridedConstMapType >::type(data, size, stride); } + template + static inline typename StridedMapType >::type Map(Scalar* data, Index size, const Stride& stride) + { return typename StridedMapType >::type(data, size, stride); } + template + static inline typename StridedConstMapType >::type Map(const Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedConstMapType >::type(data, rows, cols, stride); } + template + static inline typename StridedMapType >::type Map(Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedMapType >::type(data, rows, cols, stride); } + + template + static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, const Stride& stride) + { return typename StridedConstAlignedMapType >::type(data, stride); } + template + static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, const Stride& stride) + { return typename StridedAlignedMapType >::type(data, stride); } + template + static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index size, const Stride& stride) + { return typename StridedConstAlignedMapType >::type(data, size, stride); } + template + static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index size, const Stride& stride) + { return typename StridedAlignedMapType >::type(data, size, stride); } + template + static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedConstAlignedMapType >::type(data, rows, cols, stride); } + template + static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedAlignedMapType >::type(data, rows, cols, stride); } + //@} + + using Base::setConstant; + EIGEN_DEVICE_FUNC Derived& setConstant(Index size, const Scalar& val); + EIGEN_DEVICE_FUNC Derived& setConstant(Index rows, Index cols, const Scalar& val); + EIGEN_DEVICE_FUNC Derived& setConstant(NoChange_t, Index cols, const Scalar& val); + EIGEN_DEVICE_FUNC Derived& setConstant(Index rows, NoChange_t, const Scalar& val); + + using Base::setZero; + EIGEN_DEVICE_FUNC Derived& setZero(Index size); + EIGEN_DEVICE_FUNC Derived& setZero(Index rows, Index cols); + EIGEN_DEVICE_FUNC Derived& setZero(NoChange_t, Index cols); + EIGEN_DEVICE_FUNC Derived& setZero(Index rows, NoChange_t); + + using Base::setOnes; + EIGEN_DEVICE_FUNC Derived& setOnes(Index size); + EIGEN_DEVICE_FUNC Derived& setOnes(Index rows, Index cols); + EIGEN_DEVICE_FUNC Derived& setOnes(NoChange_t, Index cols); + EIGEN_DEVICE_FUNC Derived& setOnes(Index rows, NoChange_t); + + using Base::setRandom; + Derived& setRandom(Index size); + Derived& setRandom(Index rows, Index cols); + Derived& setRandom(NoChange_t, Index cols); + Derived& setRandom(Index rows, NoChange_t); + + #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN + #include EIGEN_PLAINOBJECTBASE_PLUGIN + #endif + + protected: + /** \internal Resizes *this in preparation for assigning \a other to it. + * Takes care of doing all the checking that's needed. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase& other) + { + #ifdef EIGEN_NO_AUTOMATIC_RESIZING + eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size()) + : (rows() == other.rows() && cols() == other.cols()))) + && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); + EIGEN_ONLY_USED_FOR_DEBUG(other); + #else + resizeLike(other); + #endif + } + + /** + * \brief Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + * + * \sa operator=(const MatrixBase&), _set_noalias() + * + * \internal + */ + // aliasing is dealt once in internal::call_assignment + // so at this stage we have to assume aliasing... and resising has to be done later. + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& _set(const DenseBase& other) + { + internal::call_assignment(this->derived(), other.derived()); + return this->derived(); + } + + /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which + * is the case when creating a new matrix) so one can enforce lazy evaluation. + * + * \sa operator=(const MatrixBase&), _set() + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase& other) + { + // I don't think we need this resize call since the lazyAssign will anyways resize + // and lazyAssign will be called by the assign selector. + //_resize_to_match(other); + // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because + // it wouldn't allow to copy a row-vector into a column-vector. + internal::call_assignment_no_alias(this->derived(), other.derived(), internal::assign_op()); + return this->derived(); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if::type* = 0) + { + const bool t0_is_integer_alike = internal::is_valid_index_type::value; + const bool t1_is_integer_alike = internal::is_valid_index_type::value; + EIGEN_STATIC_ASSERT(t0_is_integer_alike && + t1_is_integer_alike, + FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) + resize(rows,cols); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init2(const T0& val0, const T1& val1, typename internal::enable_if::type* = 0) + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) + m_storage.data()[0] = Scalar(val0); + m_storage.data()[1] = Scalar(val1); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init2(const Index& val0, const Index& val1, + typename internal::enable_if< (!internal::is_same::value) + && (internal::is_same::value) + && (internal::is_same::value) + && Base::SizeAtCompileTime==2,T1>::type* = 0) + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) + m_storage.data()[0] = Scalar(val0); + m_storage.data()[1] = Scalar(val1); + } + + // The argument is convertible to the Index type and we either have a non 1x1 Matrix, or a dynamic-sized Array, + // then the argument is meant to be the size of the object. + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(Index size, typename internal::enable_if< (Base::SizeAtCompileTime!=1 || !internal::is_convertible::value) + && ((!internal::is_same::XprKind,ArrayXpr>::value || Base::SizeAtCompileTime==Dynamic)),T>::type* = 0) + { + // NOTE MSVC 2008 complains if we directly put bool(NumTraits::IsInteger) as the EIGEN_STATIC_ASSERT argument. + const bool is_integer_alike = internal::is_valid_index_type::value; + EIGEN_UNUSED_VARIABLE(is_integer_alike); + EIGEN_STATIC_ASSERT(is_integer_alike, + FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) + resize(size); + } + + // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitly converted) + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if::value,T>::type* = 0) + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1) + m_storage.data()[0] = val0; + } + + // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type match the index type) + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(const Index& val0, + typename internal::enable_if< (!internal::is_same::value) + && (internal::is_same::value) + && Base::SizeAtCompileTime==1 + && internal::is_convertible::value,T*>::type* = 0) + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1) + m_storage.data()[0] = Scalar(val0); + } + + // Initialize a fixed size matrix from a pointer to raw data + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(const Scalar* data){ + this->_set_noalias(ConstMapType(data)); + } + + // Initialize an arbitrary matrix from a dense expression + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(const DenseBase& other){ + this->_set_noalias(other); + } + + // Initialize an arbitrary matrix from an object convertible to the Derived type. + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(const Derived& other){ + this->_set_noalias(other); + } + + // Initialize an arbitrary matrix from a generic Eigen expression + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(const EigenBase& other){ + this->derived() = other; + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(const ReturnByValue& other) + { + resize(other.rows(), other.cols()); + other.evalTo(this->derived()); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(const RotationBase& r) + { + this->derived() = r; + } + + // For fixed-size Array + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(const Scalar& val0, + typename internal::enable_if< Base::SizeAtCompileTime!=Dynamic + && Base::SizeAtCompileTime!=1 + && internal::is_convertible::value + && internal::is_same::XprKind,ArrayXpr>::value,T>::type* = 0) + { + Base::setConstant(val0); + } + + // For fixed-size Array + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _init1(const Index& val0, + typename internal::enable_if< (!internal::is_same::value) + && (internal::is_same::value) + && Base::SizeAtCompileTime!=Dynamic + && Base::SizeAtCompileTime!=1 + && internal::is_convertible::value + && internal::is_same::XprKind,ArrayXpr>::value,T*>::type* = 0) + { + Base::setConstant(val0); + } + + template + friend struct internal::matrix_swap_impl; + + public: + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal + * \brief Override DenseBase::swap() since for dynamic-sized matrices + * of same type it is enough to swap the data pointers. + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void swap(DenseBase & other) + { + enum { SwapPointers = internal::is_same::value && Base::SizeAtCompileTime==Dynamic }; + internal::matrix_swap_impl::run(this->derived(), other.derived()); + } + + /** \internal + * \brief const version forwarded to DenseBase::swap + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void swap(DenseBase const & other) + { Base::swap(other.derived()); } + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void _check_template_params() + { + EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (int(Options)&RowMajor)==RowMajor) + && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (int(Options)&RowMajor)==0) + && ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0)) + && ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0)) + && ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0)) + && ((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0)) + && (MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic) + && (MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic) + && (Options & (DontAlign|RowMajor)) == Options), + INVALID_MATRIX_TEMPLATE_PARAMETERS) + } + + enum { IsPlainObjectBase = 1 }; +#endif + public: + // These apparently need to be down here for nvcc+icc to prevent duplicate + // Map symbol. + template friend class Eigen::Map; + friend class Eigen::Map; + friend class Eigen::Map; +#if EIGEN_MAX_ALIGN_BYTES>0 + // for EIGEN_MAX_ALIGN_BYTES==0, AlignedMax==Unaligned, and many compilers generate warnings for friend-ing a class twice. + friend class Eigen::Map; + friend class Eigen::Map; +#endif +}; + +namespace internal { + +template +struct conservative_resize_like_impl +{ + #if EIGEN_HAS_TYPE_TRAITS + static const bool IsRelocatable = std::is_trivially_copyable::value; + #else + static const bool IsRelocatable = !NumTraits::RequireInitialization; + #endif + static void run(DenseBase& _this, Index rows, Index cols) + { + if (_this.rows() == rows && _this.cols() == cols) return; + EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) + + if ( IsRelocatable + && (( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows + (!Derived::IsRowMajor && _this.rows() == rows) )) // column-major and we change only the number of columns + { + internal::check_rows_cols_for_overflow::run(rows, cols); + _this.derived().m_storage.conservativeResize(rows*cols,rows,cols); + } + else + { + // The storage order does not allow us to use reallocation. + Derived tmp(rows,cols); + const Index common_rows = numext::mini(rows, _this.rows()); + const Index common_cols = numext::mini(cols, _this.cols()); + tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); + _this.derived().swap(tmp); + } + } + + static void run(DenseBase& _this, const DenseBase& other) + { + if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; + + // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index), + // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the + // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or + // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like + // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good. + EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) + EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived) + + if ( IsRelocatable && + (( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows + (!Derived::IsRowMajor && _this.rows() == other.rows()) )) // column-major and we change only the number of columns + { + const Index new_rows = other.rows() - _this.rows(); + const Index new_cols = other.cols() - _this.cols(); + _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols()); + if (new_rows>0) + _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows); + else if (new_cols>0) + _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols); + } + else + { + // The storage order does not allow us to use reallocation. + Derived tmp(other); + const Index common_rows = numext::mini(tmp.rows(), _this.rows()); + const Index common_cols = numext::mini(tmp.cols(), _this.cols()); + tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); + _this.derived().swap(tmp); + } + } +}; + +// Here, the specialization for vectors inherits from the general matrix case +// to allow calling .conservativeResize(rows,cols) on vectors. +template +struct conservative_resize_like_impl + : conservative_resize_like_impl +{ + typedef conservative_resize_like_impl Base; + using Base::run; + using Base::IsRelocatable; + + static void run(DenseBase& _this, Index size) + { + const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; + const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1; + if(IsRelocatable) + _this.derived().m_storage.conservativeResize(size,new_rows,new_cols); + else + Base::run(_this.derived(), new_rows, new_cols); + } + + static void run(DenseBase& _this, const DenseBase& other) + { + if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; + + const Index num_new_elements = other.size() - _this.size(); + + const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows(); + const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1; + if(IsRelocatable) + _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols); + else + Base::run(_this.derived(), new_rows, new_cols); + + if (num_new_elements > 0) + _this.tail(num_new_elements) = other.tail(num_new_elements); + } +}; + +template +struct matrix_swap_impl +{ + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run(MatrixTypeA& a, MatrixTypeB& b) + { + a.base().swap(b); + } +}; + +template +struct matrix_swap_impl +{ + EIGEN_DEVICE_FUNC + static inline void run(MatrixTypeA& a, MatrixTypeB& b) + { + static_cast(a).m_storage.swap(static_cast(b).m_storage); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_DENSESTORAGEBASE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Product.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Product.h new file mode 100644 index 000000000..70a6c1063 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Product.h @@ -0,0 +1,191 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PRODUCT_H +#define EIGEN_PRODUCT_H + +namespace Eigen { + +template class ProductImpl; + +namespace internal { + +template +struct traits > +{ + typedef typename remove_all::type LhsCleaned; + typedef typename remove_all::type RhsCleaned; + typedef traits LhsTraits; + typedef traits RhsTraits; + + typedef MatrixXpr XprKind; + + typedef typename ScalarBinaryOpTraits::Scalar, typename traits::Scalar>::ReturnType Scalar; + typedef typename product_promote_storage_type::ret>::ret StorageKind; + typedef typename promote_index_type::type StorageIndex; + + enum { + RowsAtCompileTime = LhsTraits::RowsAtCompileTime, + ColsAtCompileTime = RhsTraits::ColsAtCompileTime, + MaxRowsAtCompileTime = LhsTraits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime, + + // FIXME: only needed by GeneralMatrixMatrixTriangular + InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime), + + // The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator. + Flags = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? RowMajorBit + : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0 + : ( ((LhsTraits::Flags&NoPreferredStorageOrderBit) && (RhsTraits::Flags&RowMajorBit)) + || ((RhsTraits::Flags&NoPreferredStorageOrderBit) && (LhsTraits::Flags&RowMajorBit)) ) ? RowMajorBit + : NoPreferredStorageOrderBit + }; +}; + +} // end namespace internal + +/** \class Product + * \ingroup Core_Module + * + * \brief Expression of the product of two arbitrary matrices or vectors + * + * \tparam _Lhs the type of the left-hand side expression + * \tparam _Rhs the type of the right-hand side expression + * + * This class represents an expression of the product of two arbitrary matrices. + * + * The other template parameters are: + * \tparam Option can be DefaultProduct, AliasFreeProduct, or LazyProduct + * + */ +template +class Product : public ProductImpl<_Lhs,_Rhs,Option, + typename internal::product_promote_storage_type::StorageKind, + typename internal::traits<_Rhs>::StorageKind, + internal::product_type<_Lhs,_Rhs>::ret>::ret> +{ + public: + + typedef _Lhs Lhs; + typedef _Rhs Rhs; + + typedef typename ProductImpl< + Lhs, Rhs, Option, + typename internal::product_promote_storage_type::StorageKind, + typename internal::traits::StorageKind, + internal::product_type::ret>::ret>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(Product) + + typedef typename internal::ref_selector::type LhsNested; + typedef typename internal::ref_selector::type RhsNested; + typedef typename internal::remove_all::type LhsNestedCleaned; + typedef typename internal::remove_all::type RhsNestedCleaned; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) + { + eigen_assert(lhs.cols() == rhs.rows() + && "invalid matrix product" + && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index rows() const EIGEN_NOEXCEPT { return m_lhs.rows(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const LhsNestedCleaned& lhs() const { return m_lhs; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const RhsNestedCleaned& rhs() const { return m_rhs; } + + protected: + + LhsNested m_lhs; + RhsNested m_rhs; +}; + +namespace internal { + +template::ret> +class dense_product_base + : public internal::dense_xpr_base >::type +{}; + +/** Conversion to scalar for inner-products */ +template +class dense_product_base + : public internal::dense_xpr_base >::type +{ + typedef Product ProductXpr; + typedef typename internal::dense_xpr_base::type Base; +public: + using Base::derived; + typedef typename Base::Scalar Scalar; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator const Scalar() const + { + return internal::evaluator(derived()).coeff(0,0); + } +}; + +} // namespace internal + +// Generic API dispatcher +template +class ProductImpl : public internal::generic_xpr_base, MatrixXpr, StorageKind>::type +{ + public: + typedef typename internal::generic_xpr_base, MatrixXpr, StorageKind>::type Base; +}; + +template +class ProductImpl + : public internal::dense_product_base +{ + typedef Product Derived; + + public: + + typedef typename internal::dense_product_base Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) + protected: + enum { + IsOneByOne = (RowsAtCompileTime == 1 || RowsAtCompileTime == Dynamic) && + (ColsAtCompileTime == 1 || ColsAtCompileTime == Dynamic), + EnableCoeff = IsOneByOne || Option==LazyProduct + }; + + public: + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index row, Index col) const + { + EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS); + eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) ); + + return internal::evaluator(derived()).coeff(row,col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index i) const + { + EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS); + eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) ); + + return internal::evaluator(derived()).coeff(i); + } + + +}; + +} // end namespace Eigen + +#endif // EIGEN_PRODUCT_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ProductEvaluators.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ProductEvaluators.h new file mode 100644 index 000000000..8cf294b28 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ProductEvaluators.h @@ -0,0 +1,1179 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2011 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_PRODUCTEVALUATORS_H +#define EIGEN_PRODUCTEVALUATORS_H + +namespace Eigen { + +namespace internal { + +/** \internal + * Evaluator of a product expression. + * Since products require special treatments to handle all possible cases, + * we simply defer the evaluation logic to a product_evaluator class + * which offers more partial specialization possibilities. + * + * \sa class product_evaluator + */ +template +struct evaluator > + : public product_evaluator > +{ + typedef Product XprType; + typedef product_evaluator Base; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {} +}; + +// Catch "scalar * ( A * B )" and transform it to "(A*scalar) * B" +// TODO we should apply that rule only if that's really helpful +template +struct evaluator_assume_aliasing, + const CwiseNullaryOp, Plain1>, + const Product > > +{ + static const bool value = true; +}; +template +struct evaluator, + const CwiseNullaryOp, Plain1>, + const Product > > + : public evaluator > +{ + typedef CwiseBinaryOp, + const CwiseNullaryOp, Plain1>, + const Product > XprType; + typedef evaluator > Base; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) + : Base(xpr.lhs().functor().m_other * xpr.rhs().lhs() * xpr.rhs().rhs()) + {} +}; + + +template +struct evaluator, DiagIndex> > + : public evaluator, DiagIndex> > +{ + typedef Diagonal, DiagIndex> XprType; + typedef evaluator, DiagIndex> > Base; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) + : Base(Diagonal, DiagIndex>( + Product(xpr.nestedExpression().lhs(), xpr.nestedExpression().rhs()), + xpr.index() )) + {} +}; + + +// Helper class to perform a matrix product with the destination at hand. +// Depending on the sizes of the factors, there are different evaluation strategies +// as controlled by internal::product_type. +template< typename Lhs, typename Rhs, + typename LhsShape = typename evaluator_traits::Shape, + typename RhsShape = typename evaluator_traits::Shape, + int ProductType = internal::product_type::value> +struct generic_product_impl; + +template +struct evaluator_assume_aliasing > { + static const bool value = true; +}; + +// This is the default evaluator implementation for products: +// It creates a temporary and call generic_product_impl +template +struct product_evaluator, ProductTag, LhsShape, RhsShape> + : public evaluator::PlainObject> +{ + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + typedef evaluator Base; + enum { + Flags = Base::Flags | EvalBeforeNestingBit + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit product_evaluator(const XprType& xpr) + : m_result(xpr.rows(), xpr.cols()) + { + ::new (static_cast(this)) Base(m_result); + +// FIXME shall we handle nested_eval here?, +// if so, then we must take care at removing the call to nested_eval in the specializations (e.g., in permutation_matrix_product, transposition_matrix_product, etc.) +// typedef typename internal::nested_eval::type LhsNested; +// typedef typename internal::nested_eval::type RhsNested; +// typedef typename internal::remove_all::type LhsNestedCleaned; +// typedef typename internal::remove_all::type RhsNestedCleaned; +// +// const LhsNested lhs(xpr.lhs()); +// const RhsNested rhs(xpr.rhs()); +// +// generic_product_impl::evalTo(m_result, lhs, rhs); + + generic_product_impl::evalTo(m_result, xpr.lhs(), xpr.rhs()); + } + +protected: + PlainObject m_result; +}; + +// The following three shortcuts are enabled only if the scalar types match exactly. +// TODO: we could enable them for different scalar types when the product is not vectorized. + +// Dense = Product +template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> +struct Assignment, internal::assign_op, Dense2Dense, + typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type> +{ + typedef Product SrcXprType; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + // FIXME shall we handle nested_eval here? + generic_product_impl::evalTo(dst, src.lhs(), src.rhs()); + } +}; + +// Dense += Product +template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> +struct Assignment, internal::add_assign_op, Dense2Dense, + typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type> +{ + typedef Product SrcXprType; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op &) + { + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + // FIXME shall we handle nested_eval here? + generic_product_impl::addTo(dst, src.lhs(), src.rhs()); + } +}; + +// Dense -= Product +template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> +struct Assignment, internal::sub_assign_op, Dense2Dense, + typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type> +{ + typedef Product SrcXprType; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op &) + { + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + // FIXME shall we handle nested_eval here? + generic_product_impl::subTo(dst, src.lhs(), src.rhs()); + } +}; + + +// Dense ?= scalar * Product +// TODO we should apply that rule if that's really helpful +// for instance, this is not good for inner products +template< typename DstXprType, typename Lhs, typename Rhs, typename AssignFunc, typename Scalar, typename ScalarBis, typename Plain> +struct Assignment, const CwiseNullaryOp,Plain>, + const Product >, AssignFunc, Dense2Dense> +{ + typedef CwiseBinaryOp, + const CwiseNullaryOp,Plain>, + const Product > SrcXprType; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void run(DstXprType &dst, const SrcXprType &src, const AssignFunc& func) + { + call_assignment_no_alias(dst, (src.lhs().functor().m_other * src.rhs().lhs())*src.rhs().rhs(), func); + } +}; + +//---------------------------------------- +// Catch "Dense ?= xpr + Product<>" expression to save one temporary +// FIXME we could probably enable these rules for any product, i.e., not only Dense and DefaultProduct + +template +struct evaluator_assume_aliasing::Scalar>, const OtherXpr, + const Product >, DenseShape > { + static const bool value = true; +}; + +template +struct evaluator_assume_aliasing::Scalar>, const OtherXpr, + const Product >, DenseShape > { + static const bool value = true; +}; + +template +struct assignment_from_xpr_op_product +{ + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/) + { + call_assignment_no_alias(dst, src.lhs(), Func1()); + call_assignment_no_alias(dst, src.rhs(), Func2()); + } +}; + +#define EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(ASSIGN_OP,BINOP,ASSIGN_OP2) \ + template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, typename SrcScalar, typename OtherScalar,typename ProdScalar> \ + struct Assignment, const OtherXpr, \ + const Product >, internal::ASSIGN_OP, Dense2Dense> \ + : assignment_from_xpr_op_product, internal::ASSIGN_OP, internal::ASSIGN_OP2 > \ + {} + +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(assign_op, scalar_sum_op,add_assign_op); +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(add_assign_op,scalar_sum_op,add_assign_op); +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(sub_assign_op,scalar_sum_op,sub_assign_op); + +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(assign_op, scalar_difference_op,sub_assign_op); +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(add_assign_op,scalar_difference_op,sub_assign_op); +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(sub_assign_op,scalar_difference_op,add_assign_op); + +//---------------------------------------- + +template +struct generic_product_impl +{ + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + dst.coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + dst.coeffRef(0,0) += (lhs.transpose().cwiseProduct(rhs)).sum(); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { dst.coeffRef(0,0) -= (lhs.transpose().cwiseProduct(rhs)).sum(); } +}; + + +/*********************************************************************** +* Implementation of outer dense * dense vector product +***********************************************************************/ + +// Column major result +template +void EIGEN_DEVICE_FUNC outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&) +{ + evaluator rhsEval(rhs); + ei_declare_local_nested_eval(Lhs,lhs,Rhs::SizeAtCompileTime,actual_lhs); + // FIXME if cols is large enough, then it might be useful to make sure that lhs is sequentially stored + // FIXME not very good if rhs is real and lhs complex while alpha is real too + const Index cols = dst.cols(); + for (Index j=0; j +void EIGEN_DEVICE_FUNC outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&) +{ + evaluator lhsEval(lhs); + ei_declare_local_nested_eval(Rhs,rhs,Lhs::SizeAtCompileTime,actual_rhs); + // FIXME if rows is large enough, then it might be useful to make sure that rhs is sequentially stored + // FIXME not very good if lhs is real and rhs complex while alpha is real too + const Index rows = dst.rows(); + for (Index i=0; i +struct generic_product_impl +{ + template struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {}; + typedef typename Product::Scalar Scalar; + + // TODO it would be nice to be able to exploit our *_assign_op functors for that purpose + struct set { template EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } }; + struct add { template EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } }; + struct sub { template EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } }; + struct adds { + Scalar m_scale; + explicit adds(const Scalar& s) : m_scale(s) {} + template void EIGEN_DEVICE_FUNC operator()(const Dst& dst, const Src& src) const { + dst.const_cast_derived() += m_scale * src; + } + }; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + internal::outer_product_selector_run(dst, lhs, rhs, set(), is_row_major()); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + internal::outer_product_selector_run(dst, lhs, rhs, add(), is_row_major()); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + internal::outer_product_selector_run(dst, lhs, rhs, sub(), is_row_major()); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) + { + internal::outer_product_selector_run(dst, lhs, rhs, adds(alpha), is_row_major()); + } + +}; + + +// This base class provides default implementations for evalTo, addTo, subTo, in terms of scaleAndAddTo +template +struct generic_product_impl_base +{ + typedef typename Product::Scalar Scalar; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) + { Derived::scaleAndAddTo(dst,lhs,rhs,alpha); } + +}; + +template +struct generic_product_impl + : generic_product_impl_base > +{ + typedef typename nested_eval::type LhsNested; + typedef typename nested_eval::type RhsNested; + typedef typename Product::Scalar Scalar; + enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; + typedef typename internal::remove_all::type>::type MatrixType; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) + { + // Fallback to inner product if both the lhs and rhs is a runtime vector. + if (lhs.rows() == 1 && rhs.cols() == 1) { + dst.coeffRef(0,0) += alpha * lhs.row(0).conjugate().dot(rhs.col(0)); + return; + } + LhsNested actual_lhs(lhs); + RhsNested actual_rhs(rhs); + internal::gemv_dense_selector::HasUsableDirectAccess) + >::run(actual_lhs, actual_rhs, dst, alpha); + } +}; + +template +struct generic_product_impl +{ + typedef typename Product::Scalar Scalar; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + // Same as: dst.noalias() = lhs.lazyProduct(rhs); + // but easier on the compiler side + call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op()); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + // dst.noalias() += lhs.lazyProduct(rhs); + call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op()); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + // dst.noalias() -= lhs.lazyProduct(rhs); + call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op()); + } + + // This is a special evaluation path called from generic_product_impl<...,GemmProduct> in file GeneralMatrixMatrix.h + // This variant tries to extract scalar multiples from both the LHS and RHS and factor them out. For instance: + // dst {,+,-}= (s1*A)*(B*s2) + // will be rewritten as: + // dst {,+,-}= (s1*s2) * (A.lazyProduct(B)) + // There are at least four benefits of doing so: + // 1 - huge performance gain for heap-allocated matrix types as it save costly allocations. + // 2 - it is faster than simply by-passing the heap allocation through stack allocation. + // 3 - it makes this fallback consistent with the heavy GEMM routine. + // 4 - it fully by-passes huge stack allocation attempts when multiplying huge fixed-size matrices. + // (see https://stackoverflow.com/questions/54738495) + // For small fixed sizes matrices, howver, the gains are less obvious, it is sometimes x2 faster, but sometimes x3 slower, + // and the behavior depends also a lot on the compiler... This is why this re-writting strategy is currently + // enabled only when falling back from the main GEMM. + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void eval_dynamic(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Func &func) + { + enum { + HasScalarFactor = blas_traits::HasScalarFactor || blas_traits::HasScalarFactor, + ConjLhs = blas_traits::NeedToConjugate, + ConjRhs = blas_traits::NeedToConjugate + }; + // FIXME: in c++11 this should be auto, and extractScalarFactor should also return auto + // this is important for real*complex_mat + Scalar actualAlpha = combine_scalar_factors(lhs, rhs); + + eval_dynamic_impl(dst, + blas_traits::extract(lhs).template conjugateIf(), + blas_traits::extract(rhs).template conjugateIf(), + func, + actualAlpha, + typename conditional::type()); + } + +protected: + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void eval_dynamic_impl(Dst& dst, const LhsT& lhs, const RhsT& rhs, const Func &func, const Scalar& s /* == 1 */, false_type) + { + EIGEN_UNUSED_VARIABLE(s); + eigen_internal_assert(s==Scalar(1)); + call_restricted_packet_assignment_no_alias(dst, lhs.lazyProduct(rhs), func); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void eval_dynamic_impl(Dst& dst, const LhsT& lhs, const RhsT& rhs, const Func &func, const Scalar& s, true_type) + { + call_restricted_packet_assignment_no_alias(dst, s * lhs.lazyProduct(rhs), func); + } +}; + +// This specialization enforces the use of a coefficient-based evaluation strategy +template +struct generic_product_impl + : generic_product_impl {}; + +// Case 2: Evaluate coeff by coeff +// +// This is mostly taken from CoeffBasedProduct.h +// The main difference is that we add an extra argument to the etor_product_*_impl::run() function +// for the inner dimension of the product, because evaluator object do not know their size. + +template +struct etor_product_coeff_impl; + +template +struct etor_product_packet_impl; + +template +struct product_evaluator, ProductTag, DenseShape, DenseShape> + : evaluator_base > +{ + typedef Product XprType; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit product_evaluator(const XprType& xpr) + : m_lhs(xpr.lhs()), + m_rhs(xpr.rhs()), + m_lhsImpl(m_lhs), // FIXME the creation of the evaluator objects should result in a no-op, but check that! + m_rhsImpl(m_rhs), // Moreover, they are only useful for the packet path, so we could completely disable them when not needed, + // or perhaps declare them on the fly on the packet method... We have experiment to check what's best. + m_innerDim(xpr.lhs().cols()) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits::MulCost); + EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits::AddCost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); +#if 0 + std::cerr << "LhsOuterStrideBytes= " << LhsOuterStrideBytes << "\n"; + std::cerr << "RhsOuterStrideBytes= " << RhsOuterStrideBytes << "\n"; + std::cerr << "LhsAlignment= " << LhsAlignment << "\n"; + std::cerr << "RhsAlignment= " << RhsAlignment << "\n"; + std::cerr << "CanVectorizeLhs= " << CanVectorizeLhs << "\n"; + std::cerr << "CanVectorizeRhs= " << CanVectorizeRhs << "\n"; + std::cerr << "CanVectorizeInner= " << CanVectorizeInner << "\n"; + std::cerr << "EvalToRowMajor= " << EvalToRowMajor << "\n"; + std::cerr << "Alignment= " << Alignment << "\n"; + std::cerr << "Flags= " << Flags << "\n"; +#endif + } + + // Everything below here is taken from CoeffBasedProduct.h + + typedef typename internal::nested_eval::type LhsNested; + typedef typename internal::nested_eval::type RhsNested; + + typedef typename internal::remove_all::type LhsNestedCleaned; + typedef typename internal::remove_all::type RhsNestedCleaned; + + typedef evaluator LhsEtorType; + typedef evaluator RhsEtorType; + + enum { + RowsAtCompileTime = LhsNestedCleaned::RowsAtCompileTime, + ColsAtCompileTime = RhsNestedCleaned::ColsAtCompileTime, + InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsNestedCleaned::ColsAtCompileTime, RhsNestedCleaned::RowsAtCompileTime), + MaxRowsAtCompileTime = LhsNestedCleaned::MaxRowsAtCompileTime, + MaxColsAtCompileTime = RhsNestedCleaned::MaxColsAtCompileTime + }; + + typedef typename find_best_packet::type LhsVecPacketType; + typedef typename find_best_packet::type RhsVecPacketType; + + enum { + + LhsCoeffReadCost = LhsEtorType::CoeffReadCost, + RhsCoeffReadCost = RhsEtorType::CoeffReadCost, + CoeffReadCost = InnerSize==0 ? NumTraits::ReadCost + : InnerSize == Dynamic ? HugeCost + : InnerSize * (NumTraits::MulCost + int(LhsCoeffReadCost) + int(RhsCoeffReadCost)) + + (InnerSize - 1) * NumTraits::AddCost, + + Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT, + + LhsFlags = LhsEtorType::Flags, + RhsFlags = RhsEtorType::Flags, + + LhsRowMajor = LhsFlags & RowMajorBit, + RhsRowMajor = RhsFlags & RowMajorBit, + + LhsVecPacketSize = unpacket_traits::size, + RhsVecPacketSize = unpacket_traits::size, + + // Here, we don't care about alignment larger than the usable packet size. + LhsAlignment = EIGEN_PLAIN_ENUM_MIN(LhsEtorType::Alignment,LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))), + RhsAlignment = EIGEN_PLAIN_ENUM_MIN(RhsEtorType::Alignment,RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))), + + SameType = is_same::value, + + CanVectorizeRhs = bool(RhsRowMajor) && (RhsFlags & PacketAccessBit) && (ColsAtCompileTime!=1), + CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit) && (RowsAtCompileTime!=1), + + EvalToRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 + : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 + : (bool(RhsRowMajor) && !CanVectorizeLhs), + + Flags = ((int(LhsFlags) | int(RhsFlags)) & HereditaryBits & ~RowMajorBit) + | (EvalToRowMajor ? RowMajorBit : 0) + // TODO enable vectorization for mixed types + | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0) + | (XprType::IsVectorAtCompileTime ? LinearAccessBit : 0), + + LhsOuterStrideBytes = int(LhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename LhsNestedCleaned::Scalar)), + RhsOuterStrideBytes = int(RhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename RhsNestedCleaned::Scalar)), + + Alignment = bool(CanVectorizeLhs) ? (LhsOuterStrideBytes<=0 || (int(LhsOuterStrideBytes) % EIGEN_PLAIN_ENUM_MAX(1,LhsAlignment))!=0 ? 0 : LhsAlignment) + : bool(CanVectorizeRhs) ? (RhsOuterStrideBytes<=0 || (int(RhsOuterStrideBytes) % EIGEN_PLAIN_ENUM_MAX(1,RhsAlignment))!=0 ? 0 : RhsAlignment) + : 0, + + /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside + * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner + * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect + * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI. + */ + CanVectorizeInner = SameType + && LhsRowMajor + && (!RhsRowMajor) + && (int(LhsFlags) & int(RhsFlags) & ActualPacketAccessBit) + && (int(InnerSize) % packet_traits::size == 0) + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const + { + return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum(); + } + + /* Allow index-based non-packet access. It is impossible though to allow index-based packed access, + * which is why we don't set the LinearAccessBit. + * TODO: this seems possible when the result is a vector + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const CoeffReturnType coeff(Index index) const + { + const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index; + const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? index : 0; + return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum(); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const PacketType packet(Index row, Index col) const + { + PacketType res; + typedef etor_product_packet_impl PacketImpl; + PacketImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res); + return res; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const PacketType packet(Index index) const + { + const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index; + const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? index : 0; + return packet(row,col); + } + +protected: + typename internal::add_const_on_value_type::type m_lhs; + typename internal::add_const_on_value_type::type m_rhs; + + LhsEtorType m_lhsImpl; + RhsEtorType m_rhsImpl; + + // TODO: Get rid of m_innerDim if known at compile time + Index m_innerDim; +}; + +template +struct product_evaluator, LazyCoeffBasedProductMode, DenseShape, DenseShape> + : product_evaluator, CoeffBasedProductMode, DenseShape, DenseShape> +{ + typedef Product XprType; + typedef Product BaseProduct; + typedef product_evaluator Base; + enum { + Flags = Base::Flags | EvalBeforeNestingBit + }; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit product_evaluator(const XprType& xpr) + : Base(BaseProduct(xpr.lhs(),xpr.rhs())) + {} +}; + +/**************************************** +*** Coeff based product, Packet path *** +****************************************/ + +template +struct etor_product_packet_impl +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) + { + etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); + res = pmadd(pset1(lhs.coeff(row, Index(UnrollingIndex-1))), rhs.template packet(Index(UnrollingIndex-1), col), res); + } +}; + +template +struct etor_product_packet_impl +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) + { + etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); + res = pmadd(lhs.template packet(row, Index(UnrollingIndex-1)), pset1(rhs.coeff(Index(UnrollingIndex-1), col)), res); + } +}; + +template +struct etor_product_packet_impl +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) + { + res = pmul(pset1(lhs.coeff(row, Index(0))),rhs.template packet(Index(0), col)); + } +}; + +template +struct etor_product_packet_impl +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) + { + res = pmul(lhs.template packet(row, Index(0)), pset1(rhs.coeff(Index(0), col))); + } +}; + +template +struct etor_product_packet_impl +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res) + { + res = pset1(typename unpacket_traits::type(0)); + } +}; + +template +struct etor_product_packet_impl +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res) + { + res = pset1(typename unpacket_traits::type(0)); + } +}; + +template +struct etor_product_packet_impl +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) + { + res = pset1(typename unpacket_traits::type(0)); + for(Index i = 0; i < innerDim; ++i) + res = pmadd(pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); + } +}; + +template +struct etor_product_packet_impl +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) + { + res = pset1(typename unpacket_traits::type(0)); + for(Index i = 0; i < innerDim; ++i) + res = pmadd(lhs.template packet(row, i), pset1(rhs.coeff(i, col)), res); + } +}; + + +/*************************************************************************** +* Triangular products +***************************************************************************/ +template +struct triangular_product_impl; + +template +struct generic_product_impl + : generic_product_impl_base > +{ + typedef typename Product::Scalar Scalar; + + template + static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) + { + triangular_product_impl + ::run(dst, lhs.nestedExpression(), rhs, alpha); + } +}; + +template +struct generic_product_impl +: generic_product_impl_base > +{ + typedef typename Product::Scalar Scalar; + + template + static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) + { + triangular_product_impl::run(dst, lhs, rhs.nestedExpression(), alpha); + } +}; + + +/*************************************************************************** +* SelfAdjoint products +***************************************************************************/ +template +struct selfadjoint_product_impl; + +template +struct generic_product_impl + : generic_product_impl_base > +{ + typedef typename Product::Scalar Scalar; + + template + static EIGEN_DEVICE_FUNC + void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) + { + selfadjoint_product_impl::run(dst, lhs.nestedExpression(), rhs, alpha); + } +}; + +template +struct generic_product_impl +: generic_product_impl_base > +{ + typedef typename Product::Scalar Scalar; + + template + static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) + { + selfadjoint_product_impl::run(dst, lhs, rhs.nestedExpression(), alpha); + } +}; + + +/*************************************************************************** +* Diagonal products +***************************************************************************/ + +template +struct diagonal_product_evaluator_base + : evaluator_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType Scalar; +public: + enum { + CoeffReadCost = int(NumTraits::MulCost) + int(evaluator::CoeffReadCost) + int(evaluator::CoeffReadCost), + + MatrixFlags = evaluator::Flags, + DiagFlags = evaluator::Flags, + + _StorageOrder = (Derived::MaxRowsAtCompileTime==1 && Derived::MaxColsAtCompileTime!=1) ? RowMajor + : (Derived::MaxColsAtCompileTime==1 && Derived::MaxRowsAtCompileTime!=1) ? ColMajor + : MatrixFlags & RowMajorBit ? RowMajor : ColMajor, + _SameStorageOrder = _StorageOrder == (MatrixFlags & RowMajorBit ? RowMajor : ColMajor), + + _ScalarAccessOnDiag = !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft) + ||(int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheRight)), + _SameTypes = is_same::value, + // FIXME currently we need same types, but in the future the next rule should be the one + //_Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagFlags)&PacketAccessBit))), + _Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) + && _SameTypes + && (_SameStorageOrder || (MatrixFlags&LinearAccessBit)==LinearAccessBit) + && (_ScalarAccessOnDiag || (bool(int(DiagFlags)&PacketAccessBit))), + _LinearAccessMask = (MatrixType::RowsAtCompileTime==1 || MatrixType::ColsAtCompileTime==1) ? LinearAccessBit : 0, + Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixFlags)) | (_Vectorizable ? PacketAccessBit : 0), + Alignment = evaluator::Alignment, + + AsScalarProduct = (DiagonalType::SizeAtCompileTime==1) + || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::RowsAtCompileTime==1 && ProductOrder==OnTheLeft) + || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==1 && ProductOrder==OnTheRight) + }; + + EIGEN_DEVICE_FUNC diagonal_product_evaluator_base(const MatrixType &mat, const DiagonalType &diag) + : m_diagImpl(diag), m_matImpl(mat) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits::MulCost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const + { + if(AsScalarProduct) + return m_diagImpl.coeff(0) * m_matImpl.coeff(idx); + else + return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx); + } + +protected: + template + EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::true_type) const + { + return internal::pmul(m_matImpl.template packet(row, col), + internal::pset1(m_diagImpl.coeff(id))); + } + + template + EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::false_type) const + { + enum { + InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime, + DiagonalPacketLoadMode = EIGEN_PLAIN_ENUM_MIN(LoadMode,((InnerSize%16) == 0) ? int(Aligned16) : int(evaluator::Alignment)) // FIXME hardcoded 16!! + }; + return internal::pmul(m_matImpl.template packet(row, col), + m_diagImpl.template packet(id)); + } + + evaluator m_diagImpl; + evaluator m_matImpl; +}; + +// diagonal * dense +template +struct product_evaluator, ProductTag, DiagonalShape, DenseShape> + : diagonal_product_evaluator_base, OnTheLeft> +{ + typedef diagonal_product_evaluator_base, OnTheLeft> Base; + using Base::m_diagImpl; + using Base::m_matImpl; + using Base::coeff; + typedef typename Base::Scalar Scalar; + + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + typedef typename Lhs::DiagonalVectorType DiagonalType; + + + enum { StorageOrder = Base::_StorageOrder }; + + EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) + : Base(xpr.rhs(), xpr.lhs().diagonal()) + { + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const + { + return m_diagImpl.coeff(row) * m_matImpl.coeff(row, col); + } + +#ifndef EIGEN_GPUCC + template + EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const + { + // FIXME: NVCC used to complain about the template keyword, but we have to check whether this is still the case. + // See also similar calls below. + return this->template packet_impl(row,col, row, + typename internal::conditional::type()); + } + + template + EIGEN_STRONG_INLINE PacketType packet(Index idx) const + { + return packet(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx); + } +#endif +}; + +// dense * diagonal +template +struct product_evaluator, ProductTag, DenseShape, DiagonalShape> + : diagonal_product_evaluator_base, OnTheRight> +{ + typedef diagonal_product_evaluator_base, OnTheRight> Base; + using Base::m_diagImpl; + using Base::m_matImpl; + using Base::coeff; + typedef typename Base::Scalar Scalar; + + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + + enum { StorageOrder = Base::_StorageOrder }; + + EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) + : Base(xpr.lhs(), xpr.rhs().diagonal()) + { + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const + { + return m_matImpl.coeff(row, col) * m_diagImpl.coeff(col); + } + +#ifndef EIGEN_GPUCC + template + EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const + { + return this->template packet_impl(row,col, col, + typename internal::conditional::type()); + } + + template + EIGEN_STRONG_INLINE PacketType packet(Index idx) const + { + return packet(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx); + } +#endif +}; + +/*************************************************************************** +* Products with permutation matrices +***************************************************************************/ + +/** \internal + * \class permutation_matrix_product + * Internal helper class implementing the product between a permutation matrix and a matrix. + * This class is specialized for DenseShape below and for SparseShape in SparseCore/SparsePermutation.h + */ +template +struct permutation_matrix_product; + +template +struct permutation_matrix_product +{ + typedef typename nested_eval::type MatrixType; + typedef typename remove_all::type MatrixTypeCleaned; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr) + { + MatrixType mat(xpr); + const Index n = Side==OnTheLeft ? mat.rows() : mat.cols(); + // FIXME we need an is_same for expression that is not sensitive to constness. For instance + // is_same_xpr, Block >::value should be true. + //if(is_same::value && extract_data(dst) == extract_data(mat)) + if(is_same_dense(dst, mat)) + { + // apply the permutation inplace + Matrix mask(perm.size()); + mask.fill(false); + Index r = 0; + while(r < perm.size()) + { + // search for the next seed + while(r=perm.size()) + break; + // we got one, let's follow it until we are back to the seed + Index k0 = r++; + Index kPrev = k0; + mask.coeffRef(k0) = true; + for(Index k=perm.indices().coeff(k0); k!=k0; k=perm.indices().coeff(k)) + { + Block(dst, k) + .swap(Block + (dst,((Side==OnTheLeft) ^ Transposed) ? k0 : kPrev)); + + mask.coeffRef(k) = true; + kPrev = k; + } + } + } + else + { + for(Index i = 0; i < n; ++i) + { + Block + (dst, ((Side==OnTheLeft) ^ Transposed) ? perm.indices().coeff(i) : i) + + = + + Block + (mat, ((Side==OnTheRight) ^ Transposed) ? perm.indices().coeff(i) : i); + } + } + } +}; + +template +struct generic_product_impl +{ + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) + { + permutation_matrix_product::run(dst, lhs, rhs); + } +}; + +template +struct generic_product_impl +{ + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) + { + permutation_matrix_product::run(dst, rhs, lhs); + } +}; + +template +struct generic_product_impl, Rhs, PermutationShape, MatrixShape, ProductTag> +{ + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Inverse& lhs, const Rhs& rhs) + { + permutation_matrix_product::run(dst, lhs.nestedExpression(), rhs); + } +}; + +template +struct generic_product_impl, MatrixShape, PermutationShape, ProductTag> +{ + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Inverse& rhs) + { + permutation_matrix_product::run(dst, rhs.nestedExpression(), lhs); + } +}; + + +/*************************************************************************** +* Products with transpositions matrices +***************************************************************************/ + +// FIXME could we unify Transpositions and Permutation into a single "shape"?? + +/** \internal + * \class transposition_matrix_product + * Internal helper class implementing the product between a permutation matrix and a matrix. + */ +template +struct transposition_matrix_product +{ + typedef typename nested_eval::type MatrixType; + typedef typename remove_all::type MatrixTypeCleaned; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Dest& dst, const TranspositionType& tr, const ExpressionType& xpr) + { + MatrixType mat(xpr); + typedef typename TranspositionType::StorageIndex StorageIndex; + const Index size = tr.size(); + StorageIndex j = 0; + + if(!is_same_dense(dst,mat)) + dst = mat; + + for(Index k=(Transposed?size-1:0) ; Transposed?k>=0:k +struct generic_product_impl +{ + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) + { + transposition_matrix_product::run(dst, lhs, rhs); + } +}; + +template +struct generic_product_impl +{ + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) + { + transposition_matrix_product::run(dst, rhs, lhs); + } +}; + + +template +struct generic_product_impl, Rhs, TranspositionsShape, MatrixShape, ProductTag> +{ + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Transpose& lhs, const Rhs& rhs) + { + transposition_matrix_product::run(dst, lhs.nestedExpression(), rhs); + } +}; + +template +struct generic_product_impl, MatrixShape, TranspositionsShape, ProductTag> +{ + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Transpose& rhs) + { + transposition_matrix_product::run(dst, rhs.nestedExpression(), lhs); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PRODUCT_EVALUATORS_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Random.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Random.h new file mode 100644 index 000000000..dab2ac8e9 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Random.h @@ -0,0 +1,218 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_RANDOM_H +#define EIGEN_RANDOM_H + +namespace Eigen { + +namespace internal { + +template struct scalar_random_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op) + inline const Scalar operator() () const { return random(); } +}; + +template +struct functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false, IsRepeatable = false }; }; + +} // end namespace internal + +/** \returns a random matrix expression + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * \not_reentrant + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Random() should be used + * instead. + * + * + * Example: \include MatrixBase_random_int_int.cpp + * Output: \verbinclude MatrixBase_random_int_int.out + * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * + * See DenseBase::NullaryExpr(Index, const CustomNullaryOp&) for an example using C++11 random generators. + * + * \sa DenseBase::setRandom(), DenseBase::Random(Index), DenseBase::Random() + */ +template +inline const typename DenseBase::RandomReturnType +DenseBase::Random(Index rows, Index cols) +{ + return NullaryExpr(rows, cols, internal::scalar_random_op()); +} + +/** \returns a random vector expression + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * \not_reentrant + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Random() should be used + * instead. + * + * Example: \include MatrixBase_random_int.cpp + * Output: \verbinclude MatrixBase_random_int.out + * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary vector whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * + * \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random() + */ +template +inline const typename DenseBase::RandomReturnType +DenseBase::Random(Index size) +{ + return NullaryExpr(size, internal::scalar_random_op()); +} + +/** \returns a fixed-size random matrix or vector expression + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_random.cpp + * Output: \verbinclude MatrixBase_random.out + * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * + * \not_reentrant + * + * \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random(Index) + */ +template +inline const typename DenseBase::RandomReturnType +DenseBase::Random() +{ + return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op()); +} + +/** Sets all coefficients in this expression to random values. + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * \not_reentrant + * + * Example: \include MatrixBase_setRandom.cpp + * Output: \verbinclude MatrixBase_setRandom.out + * + * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index) + */ +template +EIGEN_DEVICE_FUNC inline Derived& DenseBase::setRandom() +{ + return *this = Random(rows(), cols()); +} + +/** Resizes to the given \a newSize, and sets all coefficients in this expression to random values. + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * \only_for_vectors + * \not_reentrant + * + * Example: \include Matrix_setRandom_int.cpp + * Output: \verbinclude Matrix_setRandom_int.out + * + * \sa DenseBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, DenseBase::Random() + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setRandom(Index newSize) +{ + resize(newSize); + return setRandom(); +} + +/** Resizes to the given size, and sets all coefficients in this expression to random values. + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * \not_reentrant + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setRandom_int_int.cpp + * Output: \verbinclude Matrix_setRandom_int_int.out + * + * \sa DenseBase::setRandom(), setRandom(Index), class CwiseNullaryOp, DenseBase::Random() + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setRandom(Index rows, Index cols) +{ + resize(rows, cols); + return setRandom(); +} + +/** Resizes to the given size, changing only the number of columns, and sets all + * coefficients in this expression to random values. For the parameter of type + * NoChange_t, just pass the special value \c NoChange. + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * \not_reentrant + * + * \sa DenseBase::setRandom(), setRandom(Index), setRandom(Index, NoChange_t), class CwiseNullaryOp, DenseBase::Random() + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setRandom(NoChange_t, Index cols) +{ + return setRandom(rows(), cols); +} + +/** Resizes to the given size, changing only the number of rows, and sets all + * coefficients in this expression to random values. For the parameter of type + * NoChange_t, just pass the special value \c NoChange. + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * \not_reentrant + * + * \sa DenseBase::setRandom(), setRandom(Index), setRandom(NoChange_t, Index), class CwiseNullaryOp, DenseBase::Random() + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setRandom(Index rows, NoChange_t) +{ + return setRandom(rows, cols()); +} + +} // end namespace Eigen + +#endif // EIGEN_RANDOM_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Redux.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Redux.h new file mode 100644 index 000000000..b6790d110 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Redux.h @@ -0,0 +1,515 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REDUX_H +#define EIGEN_REDUX_H + +namespace Eigen { + +namespace internal { + +// TODO +// * implement other kind of vectorization +// * factorize code + +/*************************************************************************** +* Part 1 : the logic deciding a strategy for vectorization and unrolling +***************************************************************************/ + +template +struct redux_traits +{ +public: + typedef typename find_best_packet::type PacketType; + enum { + PacketSize = unpacket_traits::size, + InnerMaxSize = int(Evaluator::IsRowMajor) + ? Evaluator::MaxColsAtCompileTime + : Evaluator::MaxRowsAtCompileTime, + OuterMaxSize = int(Evaluator::IsRowMajor) + ? Evaluator::MaxRowsAtCompileTime + : Evaluator::MaxColsAtCompileTime, + SliceVectorizedWork = int(InnerMaxSize)==Dynamic ? Dynamic + : int(OuterMaxSize)==Dynamic ? (int(InnerMaxSize)>=int(PacketSize) ? Dynamic : 0) + : (int(InnerMaxSize)/int(PacketSize)) * int(OuterMaxSize) + }; + + enum { + MightVectorize = (int(Evaluator::Flags)&ActualPacketAccessBit) + && (functor_traits::PacketAccess), + MayLinearVectorize = bool(MightVectorize) && (int(Evaluator::Flags)&LinearAccessBit), + MaySliceVectorize = bool(MightVectorize) && (int(SliceVectorizedWork)==Dynamic || int(SliceVectorizedWork)>=3) + }; + +public: + enum { + Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal) + : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) + : int(DefaultTraversal) + }; + +public: + enum { + Cost = Evaluator::SizeAtCompileTime == Dynamic ? HugeCost + : int(Evaluator::SizeAtCompileTime) * int(Evaluator::CoeffReadCost) + (Evaluator::SizeAtCompileTime-1) * functor_traits::Cost, + UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize)) + }; + +public: + enum { + Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling + }; + +#ifdef EIGEN_DEBUG_ASSIGN + static void debug() + { + std::cerr << "Xpr: " << typeid(typename Evaluator::XprType).name() << std::endl; + std::cerr.setf(std::ios::hex, std::ios::basefield); + EIGEN_DEBUG_VAR(Evaluator::Flags) + std::cerr.unsetf(std::ios::hex); + EIGEN_DEBUG_VAR(InnerMaxSize) + EIGEN_DEBUG_VAR(OuterMaxSize) + EIGEN_DEBUG_VAR(SliceVectorizedWork) + EIGEN_DEBUG_VAR(PacketSize) + EIGEN_DEBUG_VAR(MightVectorize) + EIGEN_DEBUG_VAR(MayLinearVectorize) + EIGEN_DEBUG_VAR(MaySliceVectorize) + std::cerr << "Traversal" << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl; + EIGEN_DEBUG_VAR(UnrollingLimit) + std::cerr << "Unrolling" << " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl; + std::cerr << std::endl; + } +#endif +}; + +/*************************************************************************** +* Part 2 : unrollers +***************************************************************************/ + +/*** no vectorization ***/ + +template +struct redux_novec_unroller +{ + enum { + HalfLength = Length/2 + }; + + typedef typename Evaluator::Scalar Scalar; + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func& func) + { + return func(redux_novec_unroller::run(eval,func), + redux_novec_unroller::run(eval,func)); + } +}; + +template +struct redux_novec_unroller +{ + enum { + outer = Start / Evaluator::InnerSizeAtCompileTime, + inner = Start % Evaluator::InnerSizeAtCompileTime + }; + + typedef typename Evaluator::Scalar Scalar; + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func&) + { + return eval.coeffByOuterInner(outer, inner); + } +}; + +// This is actually dead code and will never be called. It is required +// to prevent false warnings regarding failed inlining though +// for 0 length run() will never be called at all. +template +struct redux_novec_unroller +{ + typedef typename Evaluator::Scalar Scalar; + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); } +}; + +/*** vectorization ***/ + +template +struct redux_vec_unroller +{ + template + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE PacketType run(const Evaluator &eval, const Func& func) + { + enum { + PacketSize = unpacket_traits::size, + HalfLength = Length/2 + }; + + return func.packetOp( + redux_vec_unroller::template run(eval,func), + redux_vec_unroller::template run(eval,func) ); + } +}; + +template +struct redux_vec_unroller +{ + template + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE PacketType run(const Evaluator &eval, const Func&) + { + enum { + PacketSize = unpacket_traits::size, + index = Start * PacketSize, + outer = index / int(Evaluator::InnerSizeAtCompileTime), + inner = index % int(Evaluator::InnerSizeAtCompileTime), + alignment = Evaluator::Alignment + }; + return eval.template packetByOuterInner(outer, inner); + } +}; + +/*************************************************************************** +* Part 3 : implementation of all cases +***************************************************************************/ + +template::Traversal, + int Unrolling = redux_traits::Unrolling +> +struct redux_impl; + +template +struct redux_impl +{ + typedef typename Evaluator::Scalar Scalar; + + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE + Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr) + { + eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix"); + Scalar res; + res = eval.coeffByOuterInner(0, 0); + for(Index i = 1; i < xpr.innerSize(); ++i) + res = func(res, eval.coeffByOuterInner(0, i)); + for(Index i = 1; i < xpr.outerSize(); ++i) + for(Index j = 0; j < xpr.innerSize(); ++j) + res = func(res, eval.coeffByOuterInner(i, j)); + return res; + } +}; + +template +struct redux_impl + : redux_novec_unroller +{ + typedef redux_novec_unroller Base; + typedef typename Evaluator::Scalar Scalar; + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE + Scalar run(const Evaluator &eval, const Func& func, const XprType& /*xpr*/) + { + return Base::run(eval,func); + } +}; + +template +struct redux_impl +{ + typedef typename Evaluator::Scalar Scalar; + typedef typename redux_traits::PacketType PacketScalar; + + template + static Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr) + { + const Index size = xpr.size(); + + const Index packetSize = redux_traits::PacketSize; + const int packetAlignment = unpacket_traits::alignment; + enum { + alignment0 = (bool(Evaluator::Flags & DirectAccessBit) && bool(packet_traits::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned), + alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Evaluator::Alignment) + }; + const Index alignedStart = internal::first_default_aligned(xpr); + const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize); + const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize); + const Index alignedEnd2 = alignedStart + alignedSize2; + const Index alignedEnd = alignedStart + alignedSize; + Scalar res; + if(alignedSize) + { + PacketScalar packet_res0 = eval.template packet(alignedStart); + if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop + { + PacketScalar packet_res1 = eval.template packet(alignedStart+packetSize); + for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize) + { + packet_res0 = func.packetOp(packet_res0, eval.template packet(index)); + packet_res1 = func.packetOp(packet_res1, eval.template packet(index+packetSize)); + } + + packet_res0 = func.packetOp(packet_res0,packet_res1); + if(alignedEnd>alignedEnd2) + packet_res0 = func.packetOp(packet_res0, eval.template packet(alignedEnd2)); + } + res = func.predux(packet_res0); + + for(Index index = 0; index < alignedStart; ++index) + res = func(res,eval.coeff(index)); + + for(Index index = alignedEnd; index < size; ++index) + res = func(res,eval.coeff(index)); + } + else // too small to vectorize anything. + // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. + { + res = eval.coeff(0); + for(Index index = 1; index < size; ++index) + res = func(res,eval.coeff(index)); + } + + return res; + } +}; + +// NOTE: for SliceVectorizedTraversal we simply bypass unrolling +template +struct redux_impl +{ + typedef typename Evaluator::Scalar Scalar; + typedef typename redux_traits::PacketType PacketType; + + template + EIGEN_DEVICE_FUNC static Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr) + { + eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix"); + const Index innerSize = xpr.innerSize(); + const Index outerSize = xpr.outerSize(); + enum { + packetSize = redux_traits::PacketSize + }; + const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize; + Scalar res; + if(packetedInnerSize) + { + PacketType packet_res = eval.template packet(0,0); + for(Index j=0; j(j,i)); + + res = func.predux(packet_res); + for(Index j=0; j::run(eval, func, xpr); + } + + return res; + } +}; + +template +struct redux_impl +{ + typedef typename Evaluator::Scalar Scalar; + + typedef typename redux_traits::PacketType PacketType; + enum { + PacketSize = redux_traits::PacketSize, + Size = Evaluator::SizeAtCompileTime, + VectorizedSize = (int(Size) / int(PacketSize)) * int(PacketSize) + }; + + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE + Scalar run(const Evaluator &eval, const Func& func, const XprType &xpr) + { + EIGEN_ONLY_USED_FOR_DEBUG(xpr) + eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix"); + if (VectorizedSize > 0) { + Scalar res = func.predux(redux_vec_unroller::template run(eval,func)); + if (VectorizedSize != Size) + res = func(res,redux_novec_unroller::run(eval,func)); + return res; + } + else { + return redux_novec_unroller::run(eval,func); + } + } +}; + +// evaluator adaptor +template +class redux_evaluator : public internal::evaluator<_XprType> +{ + typedef internal::evaluator<_XprType> Base; +public: + typedef _XprType XprType; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit redux_evaluator(const XprType &xpr) : Base(xpr) {} + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketScalar PacketScalar; + + enum { + MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = XprType::MaxColsAtCompileTime, + // TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime from the evaluator + Flags = Base::Flags & ~DirectAccessBit, + IsRowMajor = XprType::IsRowMajor, + SizeAtCompileTime = XprType::SizeAtCompileTime, + InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + CoeffReturnType coeffByOuterInner(Index outer, Index inner) const + { return Base::coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketType packetByOuterInner(Index outer, Index inner) const + { return Base::template packet(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); } + +}; + +} // end namespace internal + +/*************************************************************************** +* Part 4 : public API +***************************************************************************/ + + +/** \returns the result of a full redux operation on the whole matrix or vector using \a func + * + * The template parameter \a BinaryOp is the type of the functor \a func which must be + * an associative operator. Both current C++98 and C++11 functor styles are handled. + * + * \warning the matrix must be not empty, otherwise an assertion is triggered. + * + * \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise() + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::redux(const Func& func) const +{ + eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix"); + + typedef typename internal::redux_evaluator ThisEvaluator; + ThisEvaluator thisEval(derived()); + + // The initial expression is passed to the reducer as an additional argument instead of + // passing it as a member of redux_evaluator to help + return internal::redux_impl::run(thisEval, func, derived()); +} + +/** \returns the minimum of all coefficients of \c *this. + * In case \c *this contains NaN, NaNPropagation determines the behavior: + * NaNPropagation == PropagateFast : undefined + * NaNPropagation == PropagateNaN : result is NaN + * NaNPropagation == PropagateNumbers : result is minimum of elements that are not NaN + * \warning the matrix must be not empty, otherwise an assertion is triggered. + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::minCoeff() const +{ + return derived().redux(Eigen::internal::scalar_min_op()); +} + +/** \returns the maximum of all coefficients of \c *this. + * In case \c *this contains NaN, NaNPropagation determines the behavior: + * NaNPropagation == PropagateFast : undefined + * NaNPropagation == PropagateNaN : result is NaN + * NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN + * \warning the matrix must be not empty, otherwise an assertion is triggered. + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::maxCoeff() const +{ + return derived().redux(Eigen::internal::scalar_max_op()); +} + +/** \returns the sum of all coefficients of \c *this + * + * If \c *this is empty, then the value 0 is returned. + * + * \sa trace(), prod(), mean() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::sum() const +{ + if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) + return Scalar(0); + return derived().redux(Eigen::internal::scalar_sum_op()); +} + +/** \returns the mean of all coefficients of *this +* +* \sa trace(), prod(), sum() +*/ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::mean() const +{ +#ifdef __INTEL_COMPILER + #pragma warning push + #pragma warning ( disable : 2259 ) +#endif + return Scalar(derived().redux(Eigen::internal::scalar_sum_op())) / Scalar(this->size()); +#ifdef __INTEL_COMPILER + #pragma warning pop +#endif +} + +/** \returns the product of all coefficients of *this + * + * Example: \include MatrixBase_prod.cpp + * Output: \verbinclude MatrixBase_prod.out + * + * \sa sum(), mean(), trace() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::prod() const +{ + if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) + return Scalar(1); + return derived().redux(Eigen::internal::scalar_product_op()); +} + +/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal. + * + * \c *this can be any matrix, not necessarily square. + * + * \sa diagonal(), sum() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar +MatrixBase::trace() const +{ + return derived().diagonal().sum(); +} + +} // end namespace Eigen + +#endif // EIGEN_REDUX_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Ref.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Ref.h new file mode 100644 index 000000000..c2a37eadb --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Ref.h @@ -0,0 +1,381 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REF_H +#define EIGEN_REF_H + +namespace Eigen { + +namespace internal { + +template +struct traits > + : public traits > +{ + typedef _PlainObjectType PlainObjectType; + typedef _StrideType StrideType; + enum { + Options = _Options, + Flags = traits >::Flags | NestByRefBit, + Alignment = traits >::Alignment + }; + + template struct match { + enum { + IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime, + HasDirectAccess = internal::has_direct_access::ret, + StorageOrderMatch = IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)), + InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic) + || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime) + || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1), + OuterStrideMatch = IsVectorAtCompileTime + || int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime), + // NOTE, this indirection of evaluator::Alignment is needed + // to workaround a very strange bug in MSVC related to the instantiation + // of has_*ary_operator in evaluator. + // This line is surprisingly very sensitive. For instance, simply adding parenthesis + // as "DerivedAlignment = (int(evaluator::Alignment))," will make MSVC fail... + DerivedAlignment = int(evaluator::Alignment), + AlignmentMatch = (int(traits::Alignment)==int(Unaligned)) || (DerivedAlignment >= int(Alignment)), // FIXME the first condition is not very clear, it should be replaced by the required alignment + ScalarTypeMatch = internal::is_same::value, + MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch + }; + typedef typename internal::conditional::type type; + }; + +}; + +template +struct traits > : public traits {}; + +} + +template class RefBase + : public MapBase +{ + typedef typename internal::traits::PlainObjectType PlainObjectType; + typedef typename internal::traits::StrideType StrideType; + +public: + + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(RefBase) + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const + { + return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const + { + return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() + : IsVectorAtCompileTime ? this->size() + : int(Flags)&RowMajorBit ? this->cols() + : this->rows(); + } + + EIGEN_DEVICE_FUNC RefBase() + : Base(0,RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime), + // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values: + m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime, + StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime) + {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase) + +protected: + + typedef Stride StrideBase; + + // Resolves inner stride if default 0. + static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index resolveInnerStride(Index inner) { + return inner == 0 ? 1 : inner; + } + + // Resolves outer stride if default 0. + static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index resolveOuterStride(Index inner, Index outer, Index rows, Index cols, bool isVectorAtCompileTime, bool isRowMajor) { + return outer == 0 ? isVectorAtCompileTime ? inner * rows * cols : isRowMajor ? inner * cols : inner * rows : outer; + } + + // Returns true if construction is valid, false if there is a stride mismatch, + // and fails if there is a size mismatch. + template + EIGEN_DEVICE_FUNC bool construct(Expression& expr) + { + // Check matrix sizes. If this is a compile-time vector, we do allow + // implicitly transposing. + EIGEN_STATIC_ASSERT( + EIGEN_PREDICATE_SAME_MATRIX_SIZE(PlainObjectType, Expression) + // If it is a vector, the transpose sizes might match. + || ( PlainObjectType::IsVectorAtCompileTime + && ((int(PlainObjectType::RowsAtCompileTime)==Eigen::Dynamic + || int(Expression::ColsAtCompileTime)==Eigen::Dynamic + || int(PlainObjectType::RowsAtCompileTime)==int(Expression::ColsAtCompileTime)) + && (int(PlainObjectType::ColsAtCompileTime)==Eigen::Dynamic + || int(Expression::RowsAtCompileTime)==Eigen::Dynamic + || int(PlainObjectType::ColsAtCompileTime)==int(Expression::RowsAtCompileTime)))), + YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES + ) + + // Determine runtime rows and columns. + Index rows = expr.rows(); + Index cols = expr.cols(); + if(PlainObjectType::RowsAtCompileTime==1) + { + eigen_assert(expr.rows()==1 || expr.cols()==1); + rows = 1; + cols = expr.size(); + } + else if(PlainObjectType::ColsAtCompileTime==1) + { + eigen_assert(expr.rows()==1 || expr.cols()==1); + rows = expr.size(); + cols = 1; + } + // Verify that the sizes are valid. + eigen_assert( + (PlainObjectType::RowsAtCompileTime == Dynamic) || (PlainObjectType::RowsAtCompileTime == rows)); + eigen_assert( + (PlainObjectType::ColsAtCompileTime == Dynamic) || (PlainObjectType::ColsAtCompileTime == cols)); + + + // If this is a vector, we might be transposing, which means that stride should swap. + const bool transpose = PlainObjectType::IsVectorAtCompileTime && (rows != expr.rows()); + // If the storage format differs, we also need to swap the stride. + const bool row_major = ((PlainObjectType::Flags)&RowMajorBit) != 0; + const bool expr_row_major = (Expression::Flags&RowMajorBit) != 0; + const bool storage_differs = (row_major != expr_row_major); + + const bool swap_stride = (transpose != storage_differs); + + // Determine expr's actual strides, resolving any defaults if zero. + const Index expr_inner_actual = resolveInnerStride(expr.innerStride()); + const Index expr_outer_actual = resolveOuterStride(expr_inner_actual, + expr.outerStride(), + expr.rows(), + expr.cols(), + Expression::IsVectorAtCompileTime != 0, + expr_row_major); + + // If this is a column-major row vector or row-major column vector, the inner-stride + // is arbitrary, so set it to either the compile-time inner stride or 1. + const bool row_vector = (rows == 1); + const bool col_vector = (cols == 1); + const Index inner_stride = + ( (!row_major && row_vector) || (row_major && col_vector) ) ? + ( StrideType::InnerStrideAtCompileTime > 0 ? Index(StrideType::InnerStrideAtCompileTime) : 1) + : swap_stride ? expr_outer_actual : expr_inner_actual; + + // If this is a column-major column vector or row-major row vector, the outer-stride + // is arbitrary, so set it to either the compile-time outer stride or vector size. + const Index outer_stride = + ( (!row_major && col_vector) || (row_major && row_vector) ) ? + ( StrideType::OuterStrideAtCompileTime > 0 ? Index(StrideType::OuterStrideAtCompileTime) : rows * cols * inner_stride) + : swap_stride ? expr_inner_actual : expr_outer_actual; + + // Check if given inner/outer strides are compatible with compile-time strides. + const bool inner_valid = (StrideType::InnerStrideAtCompileTime == Dynamic) + || (resolveInnerStride(Index(StrideType::InnerStrideAtCompileTime)) == inner_stride); + if (!inner_valid) { + return false; + } + + const bool outer_valid = (StrideType::OuterStrideAtCompileTime == Dynamic) + || (resolveOuterStride( + inner_stride, + Index(StrideType::OuterStrideAtCompileTime), + rows, cols, PlainObjectType::IsVectorAtCompileTime != 0, + row_major) + == outer_stride); + if (!outer_valid) { + return false; + } + + ::new (static_cast(this)) Base(expr.data(), rows, cols); + ::new (&m_stride) StrideBase( + (StrideType::OuterStrideAtCompileTime == 0) ? 0 : outer_stride, + (StrideType::InnerStrideAtCompileTime == 0) ? 0 : inner_stride ); + return true; + } + + StrideBase m_stride; +}; + +/** \class Ref + * \ingroup Core_Module + * + * \brief A matrix or vector expression mapping an existing expression + * + * \tparam PlainObjectType the equivalent matrix type of the mapped data + * \tparam Options specifies the pointer alignment in bytes. It can be: \c #Aligned128, , \c #Aligned64, \c #Aligned32, \c #Aligned16, \c #Aligned8 or \c #Unaligned. + * The default is \c #Unaligned. + * \tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1), + * but accepts a variable outer stride (leading dimension). + * This can be overridden by specifying strides. + * The type passed here must be a specialization of the Stride template, see examples below. + * + * This class provides a way to write non-template functions taking Eigen objects as parameters while limiting the number of copies. + * A Ref<> object can represent either a const expression or a l-value: + * \code + * // in-out argument: + * void foo1(Ref x); + * + * // read-only const argument: + * void foo2(const Ref& x); + * \endcode + * + * In the in-out case, the input argument must satisfy the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered. + * By default, a Ref can reference any dense vector expression of float having a contiguous memory layout. + * Likewise, a Ref can reference any column-major dense matrix expression of float whose column's elements are contiguously stored with + * the possibility to have a constant space in-between each column, i.e. the inner stride must be equal to 1, but the outer stride (or leading dimension) + * can be greater than the number of rows. + * + * In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function. + * Here are some examples: + * \code + * MatrixXf A; + * VectorXf a; + * foo1(a.head()); // OK + * foo1(A.col()); // OK + * foo1(A.row()); // Compilation error because here innerstride!=1 + * foo2(A.row()); // Compilation error because A.row() is a 1xN object while foo2 is expecting a Nx1 object + * foo2(A.row().transpose()); // The row is copied into a contiguous temporary + * foo2(2*a); // The expression is evaluated into a temporary + * foo2(A.col().segment(2,4)); // No temporary + * \endcode + * + * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameters. + * Here is an example accepting an innerstride!=1: + * \code + * // in-out argument: + * void foo3(Ref > x); + * foo3(A.row()); // OK + * \endcode + * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involve more + * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overload internally calling a + * template function, e.g.: + * \code + * // in the .h: + * void foo(const Ref& A); + * void foo(const Ref >& A); + * + * // in the .cpp: + * template void foo_impl(const TypeOfA& A) { + * ... // crazy code goes here + * } + * void foo(const Ref& A) { foo_impl(A); } + * void foo(const Ref >& A) { foo_impl(A); } + * \endcode + * + * See also the following stackoverflow questions for further references: + * - Correct usage of the Eigen::Ref<> class + * + * \sa PlainObjectBase::Map(), \ref TopicStorageOrders + */ +template class Ref + : public RefBase > +{ + private: + typedef internal::traits Traits; + template + EIGEN_DEVICE_FUNC inline Ref(const PlainObjectBase& expr, + typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0); + public: + + typedef RefBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Ref) + + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC inline Ref(PlainObjectBase& expr, + typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0) + { + EIGEN_STATIC_ASSERT(bool(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + // Construction must pass since we will not create temprary storage in the non-const case. + const bool success = Base::construct(expr.derived()); + EIGEN_UNUSED_VARIABLE(success) + eigen_assert(success); + } + template + EIGEN_DEVICE_FUNC inline Ref(const DenseBase& expr, + typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0) + #else + /** Implicit constructor from any dense expression */ + template + inline Ref(DenseBase& expr) + #endif + { + EIGEN_STATIC_ASSERT(bool(internal::is_lvalue::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); + EIGEN_STATIC_ASSERT(bool(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + EIGEN_STATIC_ASSERT(!Derived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); + // Construction must pass since we will not create temporary storage in the non-const case. + const bool success = Base::construct(expr.const_cast_derived()); + EIGEN_UNUSED_VARIABLE(success) + eigen_assert(success); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref) + +}; + +// this is the const ref version +template class Ref + : public RefBase > +{ + typedef internal::traits Traits; + public: + + typedef RefBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Ref) + + template + EIGEN_DEVICE_FUNC inline Ref(const DenseBase& expr, + typename internal::enable_if::ScalarTypeMatch),Derived>::type* = 0) + { +// std::cout << match_helper::HasDirectAccess << "," << match_helper::OuterStrideMatch << "," << match_helper::InnerStrideMatch << "\n"; +// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n"; +// std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n"; + construct(expr.derived(), typename Traits::template match::type()); + } + + EIGEN_DEVICE_FUNC inline Ref(const Ref& other) : Base(other) { + // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy + } + + template + EIGEN_DEVICE_FUNC inline Ref(const RefBase& other) { + construct(other.derived(), typename Traits::template match::type()); + } + + protected: + + template + EIGEN_DEVICE_FUNC void construct(const Expression& expr,internal::true_type) + { + // Check if we can use the underlying expr's storage directly, otherwise call the copy version. + if (!Base::construct(expr)) { + construct(expr, internal::false_type()); + } + } + + template + EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type) + { + internal::call_assignment_no_alias(m_object,expr,internal::assign_op()); + Base::construct(m_object); + } + + protected: + TPlainObjectType m_object; +}; + +} // end namespace Eigen + +#endif // EIGEN_REF_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Replicate.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Replicate.h new file mode 100644 index 000000000..ab5be7e64 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Replicate.h @@ -0,0 +1,142 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REPLICATE_H +#define EIGEN_REPLICATE_H + +namespace Eigen { + +namespace internal { +template +struct traits > + : traits +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename ref_selector::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; + enum { + RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic + ? Dynamic + : RowFactor * MatrixType::RowsAtCompileTime, + ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic + ? Dynamic + : ColFactor * MatrixType::ColsAtCompileTime, + //FIXME we don't propagate the max sizes !!! + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1 + : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0 + : (MatrixType::Flags & RowMajorBit) ? 1 : 0, + + // FIXME enable DirectAccess with negative strides? + Flags = IsRowMajor ? RowMajorBit : 0 + }; +}; +} + +/** + * \class Replicate + * \ingroup Core_Module + * + * \brief Expression of the multiple replication of a matrix or vector + * + * \tparam MatrixType the type of the object we are replicating + * \tparam RowFactor number of repetitions at compile time along the vertical direction, can be Dynamic. + * \tparam ColFactor number of repetitions at compile time along the horizontal direction, can be Dynamic. + * + * This class represents an expression of the multiple replication of a matrix or vector. + * It is the return type of DenseBase::replicate() and most of the time + * this is the only way it is used. + * + * \sa DenseBase::replicate() + */ +template class Replicate + : public internal::dense_xpr_base< Replicate >::type +{ + typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; + typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Replicate) + typedef typename internal::remove_all::type NestedExpression; + + template + EIGEN_DEVICE_FUNC + inline explicit Replicate(const OriginalMatrixType& matrix) + : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) + { + EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), + THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) + eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic); + } + + template + EIGEN_DEVICE_FUNC + inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor) + : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) + { + EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), + THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); } + + EIGEN_DEVICE_FUNC + const _MatrixTypeNested& nestedExpression() const + { + return m_matrix; + } + + protected: + MatrixTypeNested m_matrix; + const internal::variable_if_dynamic m_rowFactor; + const internal::variable_if_dynamic m_colFactor; +}; + +/** + * \return an expression of the replication of \c *this + * + * Example: \include MatrixBase_replicate.cpp + * Output: \verbinclude MatrixBase_replicate.out + * + * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate + */ +template +template +EIGEN_DEVICE_FUNC const Replicate +DenseBase::replicate() const +{ + return Replicate(derived()); +} + +/** + * \return an expression of the replication of each column (or row) of \c *this + * + * Example: \include DirectionWise_replicate_int.cpp + * Output: \verbinclude DirectionWise_replicate_int.out + * + * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate + */ +template +EIGEN_DEVICE_FUNC const typename VectorwiseOp::ReplicateReturnType +VectorwiseOp::replicate(Index factor) const +{ + return typename VectorwiseOp::ReplicateReturnType + (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1); +} + +} // end namespace Eigen + +#endif // EIGEN_REPLICATE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Reshaped.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Reshaped.h new file mode 100644 index 000000000..52de73b6f --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Reshaped.h @@ -0,0 +1,454 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2017 Gael Guennebaud +// Copyright (C) 2014 yoco +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_RESHAPED_H +#define EIGEN_RESHAPED_H + +namespace Eigen { + +/** \class Reshaped + * \ingroup Core_Module + * + * \brief Expression of a fixed-size or dynamic-size reshape + * + * \tparam XprType the type of the expression in which we are taking a reshape + * \tparam Rows the number of rows of the reshape we are taking at compile time (optional) + * \tparam Cols the number of columns of the reshape we are taking at compile time (optional) + * \tparam Order can be ColMajor or RowMajor, default is ColMajor. + * + * This class represents an expression of either a fixed-size or dynamic-size reshape. + * It is the return type of DenseBase::reshaped(NRowsType,NColsType) and + * most of the time this is the only way it is used. + * + * However, in C++98, if you want to directly maniputate reshaped expressions, + * for instance if you want to write a function returning such an expression, you + * will need to use this class. In C++11, it is advised to use the \em auto + * keyword for such use cases. + * + * Here is an example illustrating the dynamic case: + * \include class_Reshaped.cpp + * Output: \verbinclude class_Reshaped.out + * + * Here is an example illustrating the fixed-size case: + * \include class_FixedReshaped.cpp + * Output: \verbinclude class_FixedReshaped.out + * + * \sa DenseBase::reshaped(NRowsType,NColsType) + */ + +namespace internal { + +template +struct traits > : traits +{ + typedef typename traits::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + enum{ + MatrixRows = traits::RowsAtCompileTime, + MatrixCols = traits::ColsAtCompileTime, + RowsAtCompileTime = Rows, + ColsAtCompileTime = Cols, + MaxRowsAtCompileTime = Rows, + MaxColsAtCompileTime = Cols, + XpxStorageOrder = ((int(traits::Flags) & RowMajorBit) == RowMajorBit) ? RowMajor : ColMajor, + ReshapedStorageOrder = (RowsAtCompileTime == 1 && ColsAtCompileTime != 1) ? RowMajor + : (ColsAtCompileTime == 1 && RowsAtCompileTime != 1) ? ColMajor + : XpxStorageOrder, + HasSameStorageOrderAsXprType = (ReshapedStorageOrder == XpxStorageOrder), + InnerSize = (ReshapedStorageOrder==int(RowMajor)) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + InnerStrideAtCompileTime = HasSameStorageOrderAsXprType + ? int(inner_stride_at_compile_time::ret) + : Dynamic, + OuterStrideAtCompileTime = Dynamic, + + HasDirectAccess = internal::has_direct_access::ret + && (Order==int(XpxStorageOrder)) + && ((evaluator::Flags&LinearAccessBit)==LinearAccessBit), + + MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits::size) == 0) + && (InnerStrideAtCompileTime == 1) + ? PacketAccessBit : 0, + //MaskAlignedBit = ((OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0, + FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + FlagsRowMajorBit = (ReshapedStorageOrder==int(RowMajor)) ? RowMajorBit : 0, + FlagsDirectAccessBit = HasDirectAccess ? DirectAccessBit : 0, + Flags0 = traits::Flags & ( (HereditaryBits & ~RowMajorBit) | MaskPacketAccessBit), + + Flags = (Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit | FlagsDirectAccessBit) + }; +}; + +template class ReshapedImpl_dense; + +} // end namespace internal + +template class ReshapedImpl; + +template class Reshaped + : public ReshapedImpl::StorageKind> +{ + typedef ReshapedImpl::StorageKind> Impl; + public: + //typedef typename Impl::Base Base; + typedef Impl Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(Reshaped) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reshaped) + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC + inline Reshaped(XprType& xpr) + : Impl(xpr) + { + EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE) + eigen_assert(Rows * Cols == xpr.rows() * xpr.cols()); + } + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC + inline Reshaped(XprType& xpr, + Index reshapeRows, Index reshapeCols) + : Impl(xpr, reshapeRows, reshapeCols) + { + eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==reshapeRows) + && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==reshapeCols)); + eigen_assert(reshapeRows * reshapeCols == xpr.rows() * xpr.cols()); + } +}; + +// The generic default implementation for dense reshape simply forward to the internal::ReshapedImpl_dense +// that must be specialized for direct and non-direct access... +template +class ReshapedImpl + : public internal::ReshapedImpl_dense >::HasDirectAccess> +{ + typedef internal::ReshapedImpl_dense >::HasDirectAccess> Impl; + public: + typedef Impl Base; + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl) + EIGEN_DEVICE_FUNC inline ReshapedImpl(XprType& xpr) : Impl(xpr) {} + EIGEN_DEVICE_FUNC inline ReshapedImpl(XprType& xpr, Index reshapeRows, Index reshapeCols) + : Impl(xpr, reshapeRows, reshapeCols) {} +}; + +namespace internal { + +/** \internal Internal implementation of dense Reshaped in the general case. */ +template +class ReshapedImpl_dense + : public internal::dense_xpr_base >::type +{ + typedef Reshaped ReshapedType; + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ReshapedType) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl_dense) + + typedef typename internal::ref_selector::non_const_type MatrixTypeNested; + typedef typename internal::remove_all::type NestedExpression; + + class InnerIterator; + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC + inline ReshapedImpl_dense(XprType& xpr) + : m_xpr(xpr), m_rows(Rows), m_cols(Cols) + {} + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC + inline ReshapedImpl_dense(XprType& xpr, Index nRows, Index nCols) + : m_xpr(xpr), m_rows(nRows), m_cols(nCols) + {} + + EIGEN_DEVICE_FUNC Index rows() const { return m_rows; } + EIGEN_DEVICE_FUNC Index cols() const { return m_cols; } + + #ifdef EIGEN_PARSED_BY_DOXYGEN + /** \sa MapBase::data() */ + EIGEN_DEVICE_FUNC inline const Scalar* data() const; + EIGEN_DEVICE_FUNC inline Index innerStride() const; + EIGEN_DEVICE_FUNC inline Index outerStride() const; + #endif + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC + typename internal::remove_reference::type& + nestedExpression() { return m_xpr; } + + protected: + + MatrixTypeNested m_xpr; + const internal::variable_if_dynamic m_rows; + const internal::variable_if_dynamic m_cols; +}; + + +/** \internal Internal implementation of dense Reshaped in the direct access case. */ +template +class ReshapedImpl_dense + : public MapBase > +{ + typedef Reshaped ReshapedType; + typedef typename internal::ref_selector::non_const_type XprTypeNested; + public: + + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ReshapedType) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl_dense) + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC + inline ReshapedImpl_dense(XprType& xpr) + : Base(xpr.data()), m_xpr(xpr) + {} + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC + inline ReshapedImpl_dense(XprType& xpr, Index nRows, Index nCols) + : Base(xpr.data(), nRows, nCols), + m_xpr(xpr) + {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& nestedExpression() const + { + return m_xpr; + } + + EIGEN_DEVICE_FUNC + XprType& nestedExpression() { return m_xpr; } + + /** \sa MapBase::innerStride() */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index innerStride() const + { + return m_xpr.innerStride(); + } + + /** \sa MapBase::outerStride() */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index outerStride() const + { + return ((Flags&RowMajorBit)==RowMajorBit) ? this->cols() : this->rows(); + } + + protected: + + XprTypeNested m_xpr; +}; + +// Evaluators +template struct reshaped_evaluator; + +template +struct evaluator > + : reshaped_evaluator >::HasDirectAccess> +{ + typedef Reshaped XprType; + typedef typename XprType::Scalar Scalar; + // TODO: should check for smaller packet types + typedef typename packet_traits::type PacketScalar; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + HasDirectAccess = traits::HasDirectAccess, + +// RowsAtCompileTime = traits::RowsAtCompileTime, +// ColsAtCompileTime = traits::ColsAtCompileTime, +// MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, +// MaxColsAtCompileTime = traits::MaxColsAtCompileTime, +// +// InnerStrideAtCompileTime = traits::HasSameStorageOrderAsXprType +// ? int(inner_stride_at_compile_time::ret) +// : Dynamic, +// OuterStrideAtCompileTime = Dynamic, + + FlagsLinearAccessBit = (traits::RowsAtCompileTime == 1 || traits::ColsAtCompileTime == 1 || HasDirectAccess) ? LinearAccessBit : 0, + FlagsRowMajorBit = (traits::ReshapedStorageOrder==int(RowMajor)) ? RowMajorBit : 0, + FlagsDirectAccessBit = HasDirectAccess ? DirectAccessBit : 0, + Flags0 = evaluator::Flags & (HereditaryBits & ~RowMajorBit), + Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit | FlagsDirectAccessBit, + + PacketAlignment = unpacket_traits::alignment, + Alignment = evaluator::Alignment + }; + typedef reshaped_evaluator reshaped_evaluator_type; + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : reshaped_evaluator_type(xpr) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } +}; + +template +struct reshaped_evaluator + : evaluator_base > +{ + typedef Reshaped XprType; + + enum { + CoeffReadCost = evaluator::CoeffReadCost /* TODO + cost of index computations */, + + Flags = (evaluator::Flags & (HereditaryBits /*| LinearAccessBit | DirectAccessBit*/)), + + Alignment = 0 + }; + + EIGEN_DEVICE_FUNC explicit reshaped_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_xpr(xpr) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + typedef std::pair RowCol; + + inline RowCol index_remap(Index rowId, Index colId) const + { + if(Order==ColMajor) + { + const Index nth_elem_idx = colId * m_xpr.rows() + rowId; + return RowCol(nth_elem_idx % m_xpr.nestedExpression().rows(), + nth_elem_idx / m_xpr.nestedExpression().rows()); + } + else + { + const Index nth_elem_idx = colId + rowId * m_xpr.cols(); + return RowCol(nth_elem_idx / m_xpr.nestedExpression().cols(), + nth_elem_idx % m_xpr.nestedExpression().cols()); + } + } + + EIGEN_DEVICE_FUNC + inline Scalar& coeffRef(Index rowId, Index colId) + { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + const RowCol row_col = index_remap(rowId, colId); + return m_argImpl.coeffRef(row_col.first, row_col.second); + } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index rowId, Index colId) const + { + const RowCol row_col = index_remap(rowId, colId); + return m_argImpl.coeffRef(row_col.first, row_col.second); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const + { + const RowCol row_col = index_remap(rowId, colId); + return m_argImpl.coeff(row_col.first, row_col.second); + } + + EIGEN_DEVICE_FUNC + inline Scalar& coeffRef(Index index) + { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + const RowCol row_col = index_remap(Rows == 1 ? 0 : index, + Rows == 1 ? index : 0); + return m_argImpl.coeffRef(row_col.first, row_col.second); + + } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index index) const + { + const RowCol row_col = index_remap(Rows == 1 ? 0 : index, + Rows == 1 ? index : 0); + return m_argImpl.coeffRef(row_col.first, row_col.second); + } + + EIGEN_DEVICE_FUNC + inline const CoeffReturnType coeff(Index index) const + { + const RowCol row_col = index_remap(Rows == 1 ? 0 : index, + Rows == 1 ? index : 0); + return m_argImpl.coeff(row_col.first, row_col.second); + } +#if 0 + EIGEN_DEVICE_FUNC + template + inline PacketScalar packet(Index rowId, Index colId) const + { + const RowCol row_col = index_remap(rowId, colId); + return m_argImpl.template packet(row_col.first, row_col.second); + + } + + template + EIGEN_DEVICE_FUNC + inline void writePacket(Index rowId, Index colId, const PacketScalar& val) + { + const RowCol row_col = index_remap(rowId, colId); + m_argImpl.const_cast_derived().template writePacket + (row_col.first, row_col.second, val); + } + + template + EIGEN_DEVICE_FUNC + inline PacketScalar packet(Index index) const + { + const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + return m_argImpl.template packet(row_col.first, row_col.second); + } + + template + EIGEN_DEVICE_FUNC + inline void writePacket(Index index, const PacketScalar& val) + { + const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + return m_argImpl.template packet(row_col.first, row_col.second, val); + } +#endif +protected: + + evaluator m_argImpl; + const XprType& m_xpr; + +}; + +template +struct reshaped_evaluator +: mapbase_evaluator, + typename Reshaped::PlainObject> +{ + typedef Reshaped XprType; + typedef typename XprType::Scalar Scalar; + + EIGEN_DEVICE_FUNC explicit reshaped_evaluator(const XprType& xpr) + : mapbase_evaluator(xpr) + { + // TODO: for the 3.4 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime + eigen_assert(((internal::UIntPtr(xpr.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator::Alignment)) == 0) && "data is not aligned"); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_RESHAPED_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ReturnByValue.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ReturnByValue.h new file mode 100644 index 000000000..4dad13ea1 --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/ReturnByValue.h @@ -0,0 +1,119 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// Copyright (C) 2009-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_RETURNBYVALUE_H +#define EIGEN_RETURNBYVALUE_H + +namespace Eigen { + +namespace internal { + +template +struct traits > + : public traits::ReturnType> +{ + enum { + // We're disabling the DirectAccess because e.g. the constructor of + // the Block-with-DirectAccess expression requires to have a coeffRef method. + // Also, we don't want to have to implement the stride stuff. + Flags = (traits::ReturnType>::Flags + | EvalBeforeNestingBit) & ~DirectAccessBit + }; +}; + +/* The ReturnByValue object doesn't even have a coeff() method. + * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix. + * So internal::nested always gives the plain return matrix type. + * + * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ?? + * Answer: EvalBeforeNestingBit should be deprecated since we have the evaluators + */ +template +struct nested_eval, n, PlainObject> +{ + typedef typename traits::ReturnType type; +}; + +} // end namespace internal + +/** \class ReturnByValue + * \ingroup Core_Module + * + */ +template class ReturnByValue + : public internal::dense_xpr_base< ReturnByValue >::type, internal::no_assignment_operator +{ + public: + typedef typename internal::traits::ReturnType ReturnType; + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue) + + template + EIGEN_DEVICE_FUNC + inline void evalTo(Dest& dst) const + { static_cast(this)->evalTo(dst); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index rows() const EIGEN_NOEXCEPT { return static_cast(this)->rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index cols() const EIGEN_NOEXCEPT { return static_cast(this)->cols(); } + +#ifndef EIGEN_PARSED_BY_DOXYGEN +#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT + class Unusable{ + Unusable(const Unusable&) {} + Unusable& operator=(const Unusable&) {return *this;} + }; + const Unusable& coeff(Index) const { return *reinterpret_cast(this); } + const Unusable& coeff(Index,Index) const { return *reinterpret_cast(this); } + Unusable& coeffRef(Index) { return *reinterpret_cast(this); } + Unusable& coeffRef(Index,Index) { return *reinterpret_cast(this); } +#undef Unusable +#endif +}; + +template +template +EIGEN_DEVICE_FUNC Derived& DenseBase::operator=(const ReturnByValue& other) +{ + other.evalTo(derived()); + return derived(); +} + +namespace internal { + +// Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that +// when a ReturnByValue expression is assigned, the evaluator is not constructed. +// TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world + +template +struct evaluator > + : public evaluator::ReturnType> +{ + typedef ReturnByValue XprType; + typedef typename internal::traits::ReturnType PlainObject; + typedef evaluator Base; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) + : m_result(xpr.rows(), xpr.cols()) + { + ::new (static_cast(this)) Base(m_result); + xpr.evalTo(m_result); + } + +protected: + PlainObject m_result; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_RETURNBYVALUE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Reverse.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Reverse.h new file mode 100644 index 000000000..28cdd76ac --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Reverse.h @@ -0,0 +1,217 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2009 Ricard Marxer +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REVERSE_H +#define EIGEN_REVERSE_H + +namespace Eigen { + +namespace internal { + +template +struct traits > + : traits +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename ref_selector::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + Flags = _MatrixTypeNested::Flags & (RowMajorBit | LvalueBit) + }; +}; + +template struct reverse_packet_cond +{ + static inline PacketType run(const PacketType& x) { return preverse(x); } +}; + +template struct reverse_packet_cond +{ + static inline PacketType run(const PacketType& x) { return x; } +}; + +} // end namespace internal + +/** \class Reverse + * \ingroup Core_Module + * + * \brief Expression of the reverse of a vector or matrix + * + * \tparam MatrixType the type of the object of which we are taking the reverse + * \tparam Direction defines the direction of the reverse operation, can be Vertical, Horizontal, or BothDirections + * + * This class represents an expression of the reverse of a vector. + * It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::reverse(), VectorwiseOp::reverse() + */ +template class Reverse + : public internal::dense_xpr_base< Reverse >::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Reverse) + typedef typename internal::remove_all::type NestedExpression; + using Base::IsRowMajor; + + protected: + enum { + PacketSize = internal::packet_traits::size, + IsColMajor = !IsRowMajor, + ReverseRow = (Direction == Vertical) || (Direction == BothDirections), + ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), + OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, + OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1, + ReversePacket = (Direction == BothDirections) + || ((Direction == Vertical) && IsColMajor) + || ((Direction == Horizontal) && IsRowMajor) + }; + typedef internal::reverse_packet_cond reverse_packet; + public: + + EIGEN_DEVICE_FUNC explicit inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse) + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + inline Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } + + EIGEN_DEVICE_FUNC inline Index innerStride() const + { + return -m_matrix.innerStride(); + } + + EIGEN_DEVICE_FUNC const typename internal::remove_all::type& + nestedExpression() const + { + return m_matrix; + } + + protected: + typename MatrixType::Nested m_matrix; +}; + +/** \returns an expression of the reverse of *this. + * + * Example: \include MatrixBase_reverse.cpp + * Output: \verbinclude MatrixBase_reverse.out + * + */ +template +EIGEN_DEVICE_FUNC inline typename DenseBase::ReverseReturnType +DenseBase::reverse() +{ + return ReverseReturnType(derived()); +} + + +//reverse const overload moved DenseBase.h due to a CUDA compiler bug + +/** This is the "in place" version of reverse: it reverses \c *this. + * + * In most cases it is probably better to simply use the reversed expression + * of a matrix. However, when reversing the matrix data itself is really needed, + * then this "in-place" version is probably the right choice because it provides + * the following additional benefits: + * - less error prone: doing the same operation with .reverse() requires special care: + * \code m = m.reverse().eval(); \endcode + * - this API enables reverse operations without the need for a temporary + * - it allows future optimizations (cache friendliness, etc.) + * + * \sa VectorwiseOp::reverseInPlace(), reverse() */ +template +EIGEN_DEVICE_FUNC inline void DenseBase::reverseInPlace() +{ + if(cols()>rows()) + { + Index half = cols()/2; + leftCols(half).swap(rightCols(half).reverse()); + if((cols()%2)==1) + { + Index half2 = rows()/2; + col(half).head(half2).swap(col(half).tail(half2).reverse()); + } + } + else + { + Index half = rows()/2; + topRows(half).swap(bottomRows(half).reverse()); + if((rows()%2)==1) + { + Index half2 = cols()/2; + row(half).head(half2).swap(row(half).tail(half2).reverse()); + } + } +} + +namespace internal { + +template +struct vectorwise_reverse_inplace_impl; + +template<> +struct vectorwise_reverse_inplace_impl +{ + template + static void run(ExpressionType &xpr) + { + const int HalfAtCompileTime = ExpressionType::RowsAtCompileTime==Dynamic?Dynamic:ExpressionType::RowsAtCompileTime/2; + Index half = xpr.rows()/2; + xpr.topRows(fix(half)) + .swap(xpr.bottomRows(fix(half)).colwise().reverse()); + } +}; + +template<> +struct vectorwise_reverse_inplace_impl +{ + template + static void run(ExpressionType &xpr) + { + const int HalfAtCompileTime = ExpressionType::ColsAtCompileTime==Dynamic?Dynamic:ExpressionType::ColsAtCompileTime/2; + Index half = xpr.cols()/2; + xpr.leftCols(fix(half)) + .swap(xpr.rightCols(fix(half)).rowwise().reverse()); + } +}; + +} // end namespace internal + +/** This is the "in place" version of VectorwiseOp::reverse: it reverses each column or row of \c *this. + * + * In most cases it is probably better to simply use the reversed expression + * of a matrix. However, when reversing the matrix data itself is really needed, + * then this "in-place" version is probably the right choice because it provides + * the following additional benefits: + * - less error prone: doing the same operation with .reverse() requires special care: + * \code m = m.reverse().eval(); \endcode + * - this API enables reverse operations without the need for a temporary + * + * \sa DenseBase::reverseInPlace(), reverse() */ +template +EIGEN_DEVICE_FUNC void VectorwiseOp::reverseInPlace() +{ + internal::vectorwise_reverse_inplace_impl::run(m_matrix); +} + +} // end namespace Eigen + +#endif // EIGEN_REVERSE_H diff --git a/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Select.h b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Select.h new file mode 100644 index 000000000..7c86bf87c --- /dev/null +++ b/src/reference/ai_poc/anomaly_det/Eigen/src/Core/Select.h @@ -0,0 +1,164 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SELECT_H +#define EIGEN_SELECT_H + +namespace Eigen { + +/** \class Select + * \ingroup Core_Module + * + * \brief Expression of a coefficient wise version of the C++ ternary operator ?: + * + * \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix + * \param ThenMatrixType the type of the \em then expression + * \param ElseMatrixType the type of the \em else expression + * + * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:. + * It is the return type of DenseBase::select() and most of the time this is the only way it is used. + * + * \sa DenseBase::select(const DenseBase&, const DenseBase&) const + */ + +namespace internal { +template +struct traits > + : traits +{ + typedef typename traits::Scalar Scalar; + typedef Dense StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename ConditionMatrixType::Nested ConditionMatrixNested; + typedef typename ThenMatrixType::Nested ThenMatrixNested; + typedef typename ElseMatrixType::Nested ElseMatrixNested; + enum { + RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime, + ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime, + Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & RowMajorBit + }; +}; +} + +template +class Select : public internal::dense_xpr_base< Select >::type, + internal::no_assignment_operator +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Select) + + inline EIGEN_DEVICE_FUNC + Select(const ConditionMatrixType& a_conditionMatrix, + const ThenMatrixType& a_thenMatrix, + const ElseMatrixType& a_elseMatrix) + : m_condition(a_conditionMatrix), m_then(a_thenMatrix), m_else(a_elseMatrix) + { + eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows()); + eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols()); + } + + inline EIGEN_DEVICE_FUNC Index rows() const { return m_condition.rows(); } + inline EIGEN_DEVICE_FUNC Index cols() const { return m_condition.cols(); } + + inline EIGEN_DEVICE_FUNC + const Scalar coeff(Index i, Index j) const + { + if (m_condition.coeff(i,j)) + return m_then.coeff(i,j); + else + return m_else.coeff(i,j); + } + + inline EIGEN_DEVICE_FUNC + const Scalar coeff(Index i) const + { + if (m_condition.coeff(i)) + return m_then.coeff(i); + else + return m_else.coeff(i); + } + + inline EIGEN_DEVICE_FUNC const ConditionMatrixType& conditionMatrix() const + { + return m_condition; + } + + inline EIGEN_DEVICE_FUNC const ThenMatrixType& thenMatrix() const + { + return m_then; + } + + inline EIGEN_DEVICE_FUNC const ElseMatrixType& elseMatrix() const + { + return m_else; + } + + protected: + typename ConditionMatrixType::Nested m_condition; + typename ThenMatrixType::Nested m_then; + typename ElseMatrixType::Nested m_else; +}; + + +/** \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j) + * if \c *this(i,j), and \a elseMatrix(i,j) otherwise. + * + * Example: \include MatrixBase_select.cpp + * Output: \verbinclude MatrixBase_select.out + * + * \sa class Select + */ +template +template +inline const Select +DenseBase::select(const DenseBase& thenMatrix, + const DenseBase& elseMatrix) const +{ + return Select(derived(), thenMatrix.derived(), elseMatrix.derived()); +} + +/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with + * the \em else expression being a scalar value. + * + * \sa DenseBase::select(const DenseBase&, const DenseBase&) const, class Select + */ +template +template +inline const Select +DenseBase::select(const DenseBase& thenMatrix, + const typename ThenDerived::Scalar& elseScalar) const +{ + return Select( + derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar)); +} + +/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with + * the \em then expression being a scalar value. + * + * \sa DenseBase::select(const DenseBase&, const DenseBase&) const, class Select + */ +template +template +inline const Select +DenseBase::select(const typename ElseDerived::Scalar& thenScalar, + const DenseBase& elseMatrix) const +{ + return Select( + derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_SELECT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SelfAdjointView.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SelfAdjointView.h new file mode 100644 index 000000000..b2e51f37a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SelfAdjointView.h @@ -0,0 +1,352 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SELFADJOINTMATRIX_H +#define EIGEN_SELFADJOINTMATRIX_H + +namespace Eigen { + +/** \class SelfAdjointView + * \ingroup Core_Module + * + * + * \brief Expression of a selfadjoint matrix from a triangular part of a dense matrix + * + * \param MatrixType the type of the dense matrix storing the coefficients + * \param TriangularPart can be either \c #Lower or \c #Upper + * + * This class is an expression of a sefladjoint matrix from a triangular part of a matrix + * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView() + * and most of the time this is the only way that it is used. + * + * \sa class TriangularBase, MatrixBase::selfadjointView() + */ + +namespace internal { +template +struct traits > : traits +{ + typedef typename ref_selector::non_const_type MatrixTypeNested; + typedef typename remove_all::type MatrixTypeNestedCleaned; + typedef MatrixType ExpressionType; + typedef typename MatrixType::PlainObject FullMatrixType; + enum { + Mode = UpLo | SelfAdjoint, + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + Flags = MatrixTypeNestedCleaned::Flags & (HereditaryBits|FlagsLvalueBit) + & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)) // FIXME these flags should be preserved + }; +}; +} + + +template class SelfAdjointView + : public TriangularBase > +{ + public: + + typedef _MatrixType MatrixType; + typedef TriangularBase Base; + typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; + typedef typename internal::traits::MatrixTypeNestedCleaned MatrixTypeNestedCleaned; + typedef MatrixTypeNestedCleaned NestedExpression; + + /** \brief The type of coefficients in this matrix */ + typedef typename internal::traits::Scalar Scalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef typename internal::remove_all::type MatrixConjugateReturnType; + + enum { + Mode = internal::traits::Mode, + Flags = internal::traits::Flags, + TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0) + }; + typedef typename MatrixType::PlainObject PlainObject; + + EIGEN_DEVICE_FUNC + explicit inline SelfAdjointView(MatrixType& matrix) : m_matrix(matrix) + { + EIGEN_STATIC_ASSERT(UpLo==Lower || UpLo==Upper,SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY); + } + + EIGEN_DEVICE_FUNC + inline Index rows() const { return m_matrix.rows(); } + EIGEN_DEVICE_FUNC + inline Index cols() const { return m_matrix.cols(); } + EIGEN_DEVICE_FUNC + inline Index outerStride() const { return m_matrix.outerStride(); } + EIGEN_DEVICE_FUNC + inline Index innerStride() const { return m_matrix.innerStride(); } + + /** \sa MatrixBase::coeff() + * \warning the coordinates must fit into the referenced triangular part + */ + EIGEN_DEVICE_FUNC + inline Scalar coeff(Index row, Index col) const + { + Base::check_coordinates_internal(row, col); + return m_matrix.coeff(row, col); + } + + /** \sa MatrixBase::coeffRef() + * \warning the coordinates must fit into the referenced triangular part + */ + EIGEN_DEVICE_FUNC + inline Scalar& coeffRef(Index row, Index col) + { + EIGEN_STATIC_ASSERT_LVALUE(SelfAdjointView); + Base::check_coordinates_internal(row, col); + return m_matrix.coeffRef(row, col); + } + + /** \internal */ + EIGEN_DEVICE_FUNC + const MatrixTypeNestedCleaned& _expression() const { return m_matrix; } + + EIGEN_DEVICE_FUNC + const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; } + EIGEN_DEVICE_FUNC + MatrixTypeNestedCleaned& nestedExpression() { return m_matrix; } + + /** Efficient triangular matrix times vector/matrix product */ + template + EIGEN_DEVICE_FUNC + const Product + operator*(const MatrixBase& rhs) const + { + return Product(*this, rhs.derived()); + } + + /** Efficient vector/matrix times triangular matrix product */ + template friend + EIGEN_DEVICE_FUNC + const Product + operator*(const MatrixBase& lhs, const SelfAdjointView& rhs) + { + return Product(lhs.derived(),rhs); + } + + friend EIGEN_DEVICE_FUNC + const SelfAdjointView + operator*(const Scalar& s, const SelfAdjointView& mat) + { + return (s*mat.nestedExpression()).template selfadjointView(); + } + + /** Perform a symmetric rank 2 update of the selfadjoint matrix \c *this: + * \f$ this = this + \alpha u v^* + conj(\alpha) v u^* \f$ + * \returns a reference to \c *this + * + * The vectors \a u and \c v \b must be column vectors, however they can be + * a adjoint expression without any overhead. Only the meaningful triangular + * part of the matrix is updated, the rest is left unchanged. + * + * \sa rankUpdate(const MatrixBase&, Scalar) + */ + template + EIGEN_DEVICE_FUNC + SelfAdjointView& rankUpdate(const MatrixBase& u, const MatrixBase& v, const Scalar& alpha = Scalar(1)); + + /** Perform a symmetric rank K update of the selfadjoint matrix \c *this: + * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix. + * + * \returns a reference to \c *this + * + * Note that to perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply + * call this function with u.adjoint(). + * + * \sa rankUpdate(const MatrixBase&, const MatrixBase&, Scalar) + */ + template + EIGEN_DEVICE_FUNC + SelfAdjointView& rankUpdate(const MatrixBase& u, const Scalar& alpha = Scalar(1)); + + /** \returns an expression of a triangular view extracted from the current selfadjoint view of a given triangular part + * + * The parameter \a TriMode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper, + * \c #Lower, \c #StrictlyLower, \c #UnitLower. + * + * If \c TriMode references the same triangular part than \c *this, then this method simply return a \c TriangularView of the nested expression, + * otherwise, the nested expression is first transposed, thus returning a \c TriangularView> object. + * + * \sa MatrixBase::triangularView(), class TriangularView + */ + template + EIGEN_DEVICE_FUNC + typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), + TriangularView, + TriangularView >::type + triangularView() const + { + typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), MatrixType&, typename MatrixType::ConstTransposeReturnType>::type tmp1(m_matrix); + typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), MatrixType&, typename MatrixType::AdjointReturnType>::type tmp2(tmp1); + return typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), + TriangularView, + TriangularView >::type(tmp2); + } + + typedef SelfAdjointView ConjugateReturnType; + /** \sa MatrixBase::conjugate() const */ + EIGEN_DEVICE_FUNC + inline const ConjugateReturnType conjugate() const + { return ConjugateReturnType(m_matrix.conjugate()); } + + typedef SelfAdjointView AdjointReturnType; + /** \sa MatrixBase::adjoint() const */ + EIGEN_DEVICE_FUNC + inline const AdjointReturnType adjoint() const + { return AdjointReturnType(m_matrix.adjoint()); } + + typedef SelfAdjointView TransposeReturnType; + /** \sa MatrixBase::transpose() */ + EIGEN_DEVICE_FUNC + inline TransposeReturnType transpose() + { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + typename MatrixType::TransposeReturnType tmp(m_matrix); + return TransposeReturnType(tmp); + } + + typedef SelfAdjointView ConstTransposeReturnType; + /** \sa MatrixBase::transpose() const */ + EIGEN_DEVICE_FUNC + inline const ConstTransposeReturnType transpose() const + { + return ConstTransposeReturnType(m_matrix.transpose()); + } + + /** \returns a const expression of the main diagonal of the matrix \c *this + * + * This method simply returns the diagonal of the nested expression, thus by-passing the SelfAdjointView decorator. + * + * \sa MatrixBase::diagonal(), class Diagonal */ + EIGEN_DEVICE_FUNC + typename MatrixType::ConstDiagonalReturnType diagonal() const + { + return typename MatrixType::ConstDiagonalReturnType(m_matrix); + } + +/////////// Cholesky module /////////// + + const LLT llt() const; + const LDLT ldlt() const; + +/////////// Eigenvalue module /////////// + + /** Real part of #Scalar */ + typedef typename NumTraits::Real RealScalar; + /** Return type of eigenvalues() */ + typedef Matrix::ColsAtCompileTime, 1> EigenvaluesReturnType; + + EIGEN_DEVICE_FUNC + EigenvaluesReturnType eigenvalues() const; + EIGEN_DEVICE_FUNC + RealScalar operatorNorm() const; + + protected: + MatrixTypeNested m_matrix; +}; + + +// template +// internal::selfadjoint_matrix_product_returntype > +// operator*(const MatrixBase& lhs, const SelfAdjointView& rhs) +// { +// return internal::matrix_selfadjoint_product_returntype >(lhs.derived(),rhs); +// } + +// selfadjoint to dense matrix + +namespace internal { + +// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.> +// in the future selfadjoint-ness should be defined by the expression traits +// such that Transpose > is valid. (currently TriangularBase::transpose() is overloaded to make it work) +template +struct evaluator_traits > +{ + typedef typename storage_kind_to_evaluator_kind::Kind Kind; + typedef SelfAdjointShape Shape; +}; + +template +class triangular_dense_assignment_kernel + : public generic_dense_assignment_kernel +{ +protected: + typedef generic_dense_assignment_kernel Base; + typedef typename Base::DstXprType DstXprType; + typedef typename Base::SrcXprType SrcXprType; + using Base::m_dst; + using Base::m_src; + using Base::m_functor; +public: + + typedef typename Base::DstEvaluatorType DstEvaluatorType; + typedef typename Base::SrcEvaluatorType SrcEvaluatorType; + typedef typename Base::Scalar Scalar; + typedef typename Base::AssignmentTraits AssignmentTraits; + + + EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr) + : Base(dst, src, func, dstExpr) + {} + + EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col) + { + eigen_internal_assert(row!=col); + Scalar tmp = m_src.coeff(row,col); + m_functor.assignCoeff(m_dst.coeffRef(row,col), tmp); + m_functor.assignCoeff(m_dst.coeffRef(col,row), numext::conj(tmp)); + } + + EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id) + { + Base::assignCoeff(id,id); + } + + EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index, Index) + { eigen_internal_assert(false && "should never be called"); } +}; + +} // end namespace internal + +/*************************************************************************** +* Implementation of MatrixBase methods +***************************************************************************/ + +/** This is the const version of MatrixBase::selfadjointView() */ +template +template +typename MatrixBase::template ConstSelfAdjointViewReturnType::Type +MatrixBase::selfadjointView() const +{ + return typename ConstSelfAdjointViewReturnType::Type(derived()); +} + +/** \returns an expression of a symmetric/self-adjoint view extracted from the upper or lower triangular part of the current matrix + * + * The parameter \a UpLo can be either \c #Upper or \c #Lower + * + * Example: \include MatrixBase_selfadjointView.cpp + * Output: \verbinclude MatrixBase_selfadjointView.out + * + * \sa class SelfAdjointView + */ +template +template +typename MatrixBase::template SelfAdjointViewReturnType::Type +MatrixBase::selfadjointView() +{ + return typename SelfAdjointViewReturnType::Type(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINTMATRIX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h new file mode 100644 index 000000000..7c89c2e23 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h @@ -0,0 +1,47 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SELFCWISEBINARYOP_H +#define EIGEN_SELFCWISEBINARYOP_H + +namespace Eigen { + +// TODO generalize the scalar type of 'other' + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::operator*=(const Scalar& other) +{ + internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op()); + return derived(); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase::operator+=(const Scalar& other) +{ + internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op()); + return derived(); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase::operator-=(const Scalar& other) +{ + internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op()); + return derived(); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::operator/=(const Scalar& other) +{ + internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_SELFCWISEBINARYOP_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Solve.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Solve.h new file mode 100644 index 000000000..a8daea511 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Solve.h @@ -0,0 +1,188 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SOLVE_H +#define EIGEN_SOLVE_H + +namespace Eigen { + +template class SolveImpl; + +/** \class Solve + * \ingroup Core_Module + * + * \brief Pseudo expression representing a solving operation + * + * \tparam Decomposition the type of the matrix or decomposion object + * \tparam Rhstype the type of the right-hand side + * + * This class represents an expression of A.solve(B) + * and most of the time this is the only way it is used. + * + */ +namespace internal { + +// this solve_traits class permits to determine the evaluation type with respect to storage kind (Dense vs Sparse) +template struct solve_traits; + +template +struct solve_traits +{ + typedef typename make_proper_matrix_type::type PlainObject; +}; + +template +struct traits > + : traits::StorageKind>::PlainObject> +{ + typedef typename solve_traits::StorageKind>::PlainObject PlainObject; + typedef typename promote_index_type::type StorageIndex; + typedef traits BaseTraits; + enum { + Flags = BaseTraits::Flags & RowMajorBit, + CoeffReadCost = HugeCost + }; +}; + +} + + +template +class Solve : public SolveImpl::StorageKind> +{ +public: + typedef typename internal::traits::PlainObject PlainObject; + typedef typename internal::traits::StorageIndex StorageIndex; + + Solve(const Decomposition &dec, const RhsType &rhs) + : m_dec(dec), m_rhs(rhs) + {} + + EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); } + EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); } + + EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; } + EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; } + +protected: + const Decomposition &m_dec; + const RhsType &m_rhs; +}; + + +// Specialization of the Solve expression for dense results +template +class SolveImpl + : public MatrixBase > +{ + typedef Solve Derived; + +public: + + typedef MatrixBase > Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) + +private: + + Scalar coeff(Index row, Index col) const; + Scalar coeff(Index i) const; +}; + +// Generic API dispatcher +template +class SolveImpl : public internal::generic_xpr_base, MatrixXpr, StorageKind>::type +{ + public: + typedef typename internal::generic_xpr_base, MatrixXpr, StorageKind>::type Base; +}; + +namespace internal { + +// Evaluator of Solve -> eval into a temporary +template +struct evaluator > + : public evaluator::PlainObject> +{ + typedef Solve SolveType; + typedef typename SolveType::PlainObject PlainObject; + typedef evaluator Base; + + enum { Flags = Base::Flags | EvalBeforeNestingBit }; + + EIGEN_DEVICE_FUNC explicit evaluator(const SolveType& solve) + : m_result(solve.rows(), solve.cols()) + { + ::new (static_cast(this)) Base(m_result); + solve.dec()._solve_impl(solve.rhs(), m_result); + } + +protected: + PlainObject m_result; +}; + +// Specialization for "dst = dec.solve(rhs)" +// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere +template +struct Assignment, internal::assign_op, Dense2Dense> +{ + typedef Solve SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + src.dec()._solve_impl(src.rhs(), dst); + } +}; + +// Specialization for "dst = dec.transpose().solve(rhs)" +template +struct Assignment,RhsType>, internal::assign_op, Dense2Dense> +{ + typedef Solve,RhsType> SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + src.dec().nestedExpression().template _solve_impl_transposed(src.rhs(), dst); + } +}; + +// Specialization for "dst = dec.adjoint().solve(rhs)" +template +struct Assignment, const Transpose >,RhsType>, + internal::assign_op, Dense2Dense> +{ + typedef Solve, const Transpose >,RhsType> SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed(src.rhs(), dst); + } +}; + +} // end namepsace internal + +} // end namespace Eigen + +#endif // EIGEN_SOLVE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SolveTriangular.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SolveTriangular.h new file mode 100644 index 000000000..4652e2e19 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SolveTriangular.h @@ -0,0 +1,235 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SOLVETRIANGULAR_H +#define EIGEN_SOLVETRIANGULAR_H + +namespace Eigen { + +namespace internal { + +// Forward declarations: +// The following two routines are implemented in the products/TriangularSolver*.h files +template +struct triangular_solve_vector; + +template +struct triangular_solve_matrix; + +// small helper struct extracting some traits on the underlying solver operation +template +class trsolve_traits +{ + private: + enum { + RhsIsVectorAtCompileTime = (Side==OnTheLeft ? Rhs::ColsAtCompileTime : Rhs::RowsAtCompileTime)==1 + }; + public: + enum { + Unrolling = (RhsIsVectorAtCompileTime && Rhs::SizeAtCompileTime != Dynamic && Rhs::SizeAtCompileTime <= 8) + ? CompleteUnrolling : NoUnrolling, + RhsVectors = RhsIsVectorAtCompileTime ? 1 : Dynamic + }; +}; + +template::Unrolling, + int RhsVectors = trsolve_traits::RhsVectors + > +struct triangular_solver_selector; + +template +struct triangular_solver_selector +{ + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + typedef blas_traits LhsProductTraits; + typedef typename LhsProductTraits::ExtractType ActualLhsType; + typedef Map, Aligned> MappedRhs; + static void run(const Lhs& lhs, Rhs& rhs) + { + ActualLhsType actualLhs = LhsProductTraits::extract(lhs); + + // FIXME find a way to allow an inner stride if packet_traits::size==1 + + bool useRhsDirectly = Rhs::InnerStrideAtCompileTime==1 || rhs.innerStride()==1; + + ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhs,rhs.size(), + (useRhsDirectly ? rhs.data() : 0)); + + if(!useRhsDirectly) + MappedRhs(actualRhs,rhs.size()) = rhs; + + triangular_solve_vector + ::run(actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs); + + if(!useRhsDirectly) + rhs = MappedRhs(actualRhs, rhs.size()); + } +}; + +// the rhs is a matrix +template +struct triangular_solver_selector +{ + typedef typename Rhs::Scalar Scalar; + typedef blas_traits LhsProductTraits; + typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType; + + static void run(const Lhs& lhs, Rhs& rhs) + { + typename internal::add_const_on_value_type::type actualLhs = LhsProductTraits::extract(lhs); + + const Index size = lhs.rows(); + const Index othersize = Side==OnTheLeft? rhs.cols() : rhs.rows(); + + typedef internal::gemm_blocking_space<(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar, + Rhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxRowsAtCompileTime,4> BlockingType; + + BlockingType blocking(rhs.rows(), rhs.cols(), size, 1, false); + + triangular_solve_matrix + ::run(size, othersize, &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride(), blocking); + } +}; + +/*************************************************************************** +* meta-unrolling implementation +***************************************************************************/ + +template +struct triangular_solver_unroller; + +template +struct triangular_solver_unroller { + enum { + IsLower = ((Mode&Lower)==Lower), + DiagIndex = IsLower ? LoopIndex : Size - LoopIndex - 1, + StartIndex = IsLower ? 0 : DiagIndex+1 + }; + static void run(const Lhs& lhs, Rhs& rhs) + { + if (LoopIndex>0) + rhs.coeffRef(DiagIndex) -= lhs.row(DiagIndex).template segment(StartIndex).transpose() + .cwiseProduct(rhs.template segment(StartIndex)).sum(); + + if(!(Mode & UnitDiag)) + rhs.coeffRef(DiagIndex) /= lhs.coeff(DiagIndex,DiagIndex); + + triangular_solver_unroller::run(lhs,rhs); + } +}; + +template +struct triangular_solver_unroller { + static void run(const Lhs&, Rhs&) {} +}; + +template +struct triangular_solver_selector { + static void run(const Lhs& lhs, Rhs& rhs) + { triangular_solver_unroller::run(lhs,rhs); } +}; + +template +struct triangular_solver_selector { + static void run(const Lhs& lhs, Rhs& rhs) + { + Transpose trLhs(lhs); + Transpose trRhs(rhs); + + triangular_solver_unroller,Transpose, + ((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag), + 0,Rhs::SizeAtCompileTime>::run(trLhs,trRhs); + } +}; + +} // end namespace internal + +/*************************************************************************** +* TriangularView methods +***************************************************************************/ + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void TriangularViewImpl::solveInPlace(const MatrixBase& _other) const +{ + OtherDerived& other = _other.const_cast_derived(); + eigen_assert( derived().cols() == derived().rows() && ((Side==OnTheLeft && derived().cols() == other.rows()) || (Side==OnTheRight && derived().cols() == other.cols())) ); + eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); + // If solving for a 0x0 matrix, nothing to do, simply return. + if (derived().cols() == 0) + return; + + enum { copy = (internal::traits::Flags & RowMajorBit) && OtherDerived::IsVectorAtCompileTime && OtherDerived::SizeAtCompileTime!=1}; + typedef typename internal::conditional::type, OtherDerived&>::type OtherCopy; + OtherCopy otherCopy(other); + + internal::triangular_solver_selector::type, + Side, Mode>::run(derived().nestedExpression(), otherCopy); + + if (copy) + other = otherCopy; +} + +template +template +const internal::triangular_solve_retval,Other> +TriangularViewImpl::solve(const MatrixBase& other) const +{ + return internal::triangular_solve_retval(derived(), other.derived()); +} +#endif + +namespace internal { + + +template +struct traits > +{ + typedef typename internal::plain_matrix_type_column_major::type ReturnType; +}; + +template struct triangular_solve_retval + : public ReturnByValue > +{ + typedef typename remove_all::type RhsNestedCleaned; + typedef ReturnByValue Base; + + triangular_solve_retval(const TriangularType& tri, const Rhs& rhs) + : m_triangularMatrix(tri), m_rhs(rhs) + {} + + inline Index rows() const { return m_rhs.rows(); } + inline Index cols() const { return m_rhs.cols(); } + + template inline void evalTo(Dest& dst) const + { + if(!is_same_dense(dst,m_rhs)) + dst = m_rhs; + m_triangularMatrix.template solveInPlace(dst); + } + + protected: + const TriangularType& m_triangularMatrix; + typename Rhs::Nested m_rhs; +}; + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SOLVETRIANGULAR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SolverBase.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SolverBase.h new file mode 100644 index 000000000..8a4adc229 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/SolverBase.h @@ -0,0 +1,130 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SOLVERBASE_H +#define EIGEN_SOLVERBASE_H + +namespace Eigen { + +namespace internal { + + + +} // end namespace internal + +/** \class SolverBase + * \brief A base class for matrix decomposition and solvers + * + * \tparam Derived the actual type of the decomposition/solver. + * + * Any matrix decomposition inheriting this base class provide the following API: + * + * \code + * MatrixType A, b, x; + * DecompositionType dec(A); + * x = dec.solve(b); // solve A * x = b + * x = dec.transpose().solve(b); // solve A^T * x = b + * x = dec.adjoint().solve(b); // solve A' * x = b + * \endcode + * + * \warning Currently, any other usage of transpose() and adjoint() are not supported and will produce compilation errors. + * + * \sa class PartialPivLU, class FullPivLU + */ +template +class SolverBase : public EigenBase +{ + public: + + typedef EigenBase Base; + typedef typename internal::traits::Scalar Scalar; + typedef Scalar CoeffReturnType; + + enum { + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, + internal::traits::ColsAtCompileTime>::ret), + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, + MaxSizeAtCompileTime = (internal::size_at_compile_time::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime>::ret), + IsVectorAtCompileTime = internal::traits::MaxRowsAtCompileTime == 1 + || internal::traits::MaxColsAtCompileTime == 1 + }; + + /** Default constructor */ + SolverBase() + {} + + ~SolverBase() + {} + + using Base::derived; + + /** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A. + */ + template + inline const Solve + solve(const MatrixBase& b) const + { + eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); + return Solve(derived(), b.derived()); + } + + /** \internal the return type of transpose() */ + typedef typename internal::add_const >::type ConstTransposeReturnType; + /** \returns an expression of the transposed of the factored matrix. + * + * A typical usage is to solve for the transposed problem A^T x = b: + * \code x = dec.transpose().solve(b); \endcode + * + * \sa adjoint(), solve() + */ + inline ConstTransposeReturnType transpose() const + { + return ConstTransposeReturnType(derived()); + } + + /** \internal the return type of adjoint() */ + typedef typename internal::conditional::IsComplex, + CwiseUnaryOp, ConstTransposeReturnType>, + ConstTransposeReturnType + >::type AdjointReturnType; + /** \returns an expression of the adjoint of the factored matrix + * + * A typical usage is to solve for the adjoint problem A' x = b: + * \code x = dec.adjoint().solve(b); \endcode + * + * For real scalar types, this function is equivalent to transpose(). + * + * \sa transpose(), solve() + */ + inline AdjointReturnType adjoint() const + { + return AdjointReturnType(derived().transpose()); + } + + protected: +}; + +namespace internal { + +template +struct generic_xpr_base +{ + typedef SolverBase type; + +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SOLVERBASE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/StableNorm.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/StableNorm.h new file mode 100644 index 000000000..88c8d9890 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/StableNorm.h @@ -0,0 +1,221 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STABLENORM_H +#define EIGEN_STABLENORM_H + +namespace Eigen { + +namespace internal { + +template +inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale) +{ + Scalar maxCoeff = bl.cwiseAbs().maxCoeff(); + + if(maxCoeff>scale) + { + ssq = ssq * numext::abs2(scale/maxCoeff); + Scalar tmp = Scalar(1)/maxCoeff; + if(tmp > NumTraits::highest()) + { + invScale = NumTraits::highest(); + scale = Scalar(1)/invScale; + } + else if(maxCoeff>NumTraits::highest()) // we got a INF + { + invScale = Scalar(1); + scale = maxCoeff; + } + else + { + scale = maxCoeff; + invScale = tmp; + } + } + else if(maxCoeff!=maxCoeff) // we got a NaN + { + scale = maxCoeff; + } + + // TODO if the maxCoeff is much much smaller than the current scale, + // then we can neglect this sub vector + if(scale>Scalar(0)) // if scale==0, then bl is 0 + ssq += (bl*invScale).squaredNorm(); +} + +template +inline typename NumTraits::Scalar>::Real +blueNorm_impl(const EigenBase& _vec) +{ + typedef typename Derived::RealScalar RealScalar; + using std::pow; + using std::sqrt; + using std::abs; + const Derived& vec(_vec.derived()); + static bool initialized = false; + static RealScalar b1, b2, s1m, s2m, rbig, relerr; + if(!initialized) + { + int ibeta, it, iemin, iemax, iexp; + RealScalar eps; + // This program calculates the machine-dependent constants + // bl, b2, slm, s2m, relerr overfl + // from the "basic" machine-dependent numbers + // nbig, ibeta, it, iemin, iemax, rbig. + // The following define the basic machine-dependent constants. + // For portability, the PORT subprograms "ilmaeh" and "rlmach" + // are used. For any specific computer, each of the assignment + // statements can be replaced + ibeta = std::numeric_limits::radix; // base for floating-point numbers + it = std::numeric_limits::digits; // number of base-beta digits in mantissa + iemin = std::numeric_limits::min_exponent; // minimum exponent + iemax = std::numeric_limits::max_exponent; // maximum exponent + rbig = (std::numeric_limits::max)(); // largest floating-point number + + iexp = -((1-iemin)/2); + b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange + iexp = (iemax + 1 - it)/2; + b2 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // upper boundary of midrange + + iexp = (2-iemin)/2; + s1m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for lower range + iexp = - ((iemax+it)/2); + s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range + + eps = RealScalar(pow(double(ibeta), 1-it)); + relerr = sqrt(eps); // tolerance for neglecting asml + initialized = true; + } + Index n = vec.size(); + RealScalar ab2 = b2 / RealScalar(n); + RealScalar asml = RealScalar(0); + RealScalar amed = RealScalar(0); + RealScalar abig = RealScalar(0); + for(typename Derived::InnerIterator it(vec, 0); it; ++it) + { + RealScalar ax = abs(it.value()); + if(ax > ab2) abig += numext::abs2(ax*s2m); + else if(ax < b1) asml += numext::abs2(ax*s1m); + else amed += numext::abs2(ax); + } + if(amed!=amed) + return amed; // we got a NaN + if(abig > RealScalar(0)) + { + abig = sqrt(abig); + if(abig > rbig) // overflow, or *this contains INF values + return abig; // return INF + if(amed > RealScalar(0)) + { + abig = abig/s2m; + amed = sqrt(amed); + } + else + return abig/s2m; + } + else if(asml > RealScalar(0)) + { + if (amed > RealScalar(0)) + { + abig = sqrt(amed); + amed = sqrt(asml) / s1m; + } + else + return sqrt(asml)/s1m; + } + else + return sqrt(amed); + asml = numext::mini(abig, amed); + abig = numext::maxi(abig, amed); + if(asml <= abig*relerr) + return abig; + else + return abig * sqrt(RealScalar(1) + numext::abs2(asml/abig)); +} + +} // end namespace internal + +/** \returns the \em l2 norm of \c *this avoiding underflow and overflow. + * This version use a blockwise two passes algorithm: + * 1 - find the absolute largest coefficient \c s + * 2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way + * + * For architecture/scalar types supporting vectorization, this version + * is faster than blueNorm(). Otherwise the blueNorm() is much faster. + * + * \sa norm(), blueNorm(), hypotNorm() + */ +template +inline typename NumTraits::Scalar>::Real +MatrixBase::stableNorm() const +{ + using std::sqrt; + using std::abs; + const Index blockSize = 4096; + RealScalar scale(0); + RealScalar invScale(1); + RealScalar ssq(0); // sum of square + + typedef typename internal::nested_eval::type DerivedCopy; + typedef typename internal::remove_all::type DerivedCopyClean; + const DerivedCopy copy(derived()); + + enum { + CanAlign = ( (int(DerivedCopyClean::Flags)&DirectAccessBit) + || (int(internal::evaluator::Alignment)>0) // FIXME Alignment)>0 might not be enough + ) && (blockSize*sizeof(Scalar)*20) // if we cannot allocate on the stack, then let's not bother about this optimization + }; + typedef typename internal::conditional, internal::evaluator::Alignment>, + typename DerivedCopyClean::ConstSegmentReturnType>::type SegmentWrapper; + Index n = size(); + + if(n==1) + return abs(this->coeff(0)); + + Index bi = internal::first_default_aligned(copy); + if (bi>0) + internal::stable_norm_kernel(copy.head(bi), ssq, scale, invScale); + for (; bi +inline typename NumTraits::Scalar>::Real +MatrixBase::blueNorm() const +{ + return internal::blueNorm_impl(*this); +} + +/** \returns the \em l2 norm of \c *this avoiding undeflow and overflow. + * This version use a concatenation of hypot() calls, and it is very slow. + * + * \sa norm(), stableNorm() + */ +template +inline typename NumTraits::Scalar>::Real +MatrixBase::hypotNorm() const +{ + return this->cwiseAbs().redux(internal::scalar_hypot_op()); +} + +} // end namespace Eigen + +#endif // EIGEN_STABLENORM_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Stride.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Stride.h new file mode 100644 index 000000000..513742f34 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Stride.h @@ -0,0 +1,111 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STRIDE_H +#define EIGEN_STRIDE_H + +namespace Eigen { + +/** \class Stride + * \ingroup Core_Module + * + * \brief Holds strides information for Map + * + * This class holds the strides information for mapping arrays with strides with class Map. + * + * It holds two values: the inner stride and the outer stride. + * + * The inner stride is the pointer increment between two consecutive entries within a given row of a + * row-major matrix or within a given column of a column-major matrix. + * + * The outer stride is the pointer increment between two consecutive rows of a row-major matrix or + * between two consecutive columns of a column-major matrix. + * + * These two values can be passed either at compile-time as template parameters, or at runtime as + * arguments to the constructor. + * + * Indeed, this class takes two template parameters: + * \tparam _OuterStrideAtCompileTime the outer stride, or Dynamic if you want to specify it at runtime. + * \tparam _InnerStrideAtCompileTime the inner stride, or Dynamic if you want to specify it at runtime. + * + * Here is an example: + * \include Map_general_stride.cpp + * Output: \verbinclude Map_general_stride.out + * + * \sa class InnerStride, class OuterStride, \ref TopicStorageOrders + */ +template +class Stride +{ + public: + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + enum { + InnerStrideAtCompileTime = _InnerStrideAtCompileTime, + OuterStrideAtCompileTime = _OuterStrideAtCompileTime + }; + + /** Default constructor, for use when strides are fixed at compile time */ + EIGEN_DEVICE_FUNC + Stride() + : m_outer(OuterStrideAtCompileTime), m_inner(InnerStrideAtCompileTime) + { + eigen_assert(InnerStrideAtCompileTime != Dynamic && OuterStrideAtCompileTime != Dynamic); + } + + /** Constructor allowing to pass the strides at runtime */ + EIGEN_DEVICE_FUNC + Stride(Index outerStride, Index innerStride) + : m_outer(outerStride), m_inner(innerStride) + { + eigen_assert(innerStride>=0 && outerStride>=0); + } + + /** Copy constructor */ + EIGEN_DEVICE_FUNC + Stride(const Stride& other) + : m_outer(other.outer()), m_inner(other.inner()) + {} + + /** \returns the outer stride */ + EIGEN_DEVICE_FUNC + inline Index outer() const { return m_outer.value(); } + /** \returns the inner stride */ + EIGEN_DEVICE_FUNC + inline Index inner() const { return m_inner.value(); } + + protected: + internal::variable_if_dynamic m_outer; + internal::variable_if_dynamic m_inner; +}; + +/** \brief Convenience specialization of Stride to specify only an inner stride + * See class Map for some examples */ +template +class InnerStride : public Stride<0, Value> +{ + typedef Stride<0, Value> Base; + public: + EIGEN_DEVICE_FUNC InnerStride() : Base() {} + EIGEN_DEVICE_FUNC InnerStride(Index v) : Base(0, v) {} // FIXME making this explicit could break valid code +}; + +/** \brief Convenience specialization of Stride to specify only an outer stride + * See class Map for some examples */ +template +class OuterStride : public Stride +{ + typedef Stride Base; + public: + EIGEN_DEVICE_FUNC OuterStride() : Base() {} + EIGEN_DEVICE_FUNC OuterStride(Index v) : Base(v,0) {} // FIXME making this explicit could break valid code +}; + +} // end namespace Eigen + +#endif // EIGEN_STRIDE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Swap.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Swap.h new file mode 100644 index 000000000..d70200918 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Swap.h @@ -0,0 +1,67 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SWAP_H +#define EIGEN_SWAP_H + +namespace Eigen { + +namespace internal { + +// Overload default assignPacket behavior for swapping them +template +class generic_dense_assignment_kernel, Specialized> + : public generic_dense_assignment_kernel, BuiltIn> +{ +protected: + typedef generic_dense_assignment_kernel, BuiltIn> Base; + using Base::m_dst; + using Base::m_src; + using Base::m_functor; + +public: + typedef typename Base::Scalar Scalar; + typedef typename Base::DstXprType DstXprType; + typedef swap_assign_op Functor; + + EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr) + : Base(dst, src, func, dstExpr) + {} + + template + void assignPacket(Index row, Index col) + { + PacketType tmp = m_src.template packet(row,col); + const_cast(m_src).template writePacket(row,col, m_dst.template packet(row,col)); + m_dst.template writePacket(row,col,tmp); + } + + template + void assignPacket(Index index) + { + PacketType tmp = m_src.template packet(index); + const_cast(m_src).template writePacket(index, m_dst.template packet(index)); + m_dst.template writePacket(index,tmp); + } + + // TODO find a simple way not to have to copy/paste this function from generic_dense_assignment_kernel, by simple I mean no CRTP (Gael) + template + void assignPacketByOuterInner(Index outer, Index inner) + { + Index row = Base::rowIndexByOuterInner(outer, inner); + Index col = Base::colIndexByOuterInner(outer, inner); + assignPacket(row, col); + } +}; + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SWAP_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Transpose.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Transpose.h new file mode 100644 index 000000000..960dc4510 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Transpose.h @@ -0,0 +1,405 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2009-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRANSPOSE_H +#define EIGEN_TRANSPOSE_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename ref_selector::type MatrixTypeNested; + typedef typename remove_reference::type MatrixTypeNestedPlain; + enum { + RowsAtCompileTime = MatrixType::ColsAtCompileTime, + ColsAtCompileTime = MatrixType::RowsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + Flags0 = traits::Flags & ~(LvalueBit | NestByRefBit), + Flags1 = Flags0 | FlagsLvalueBit, + Flags = Flags1 ^ RowMajorBit, + InnerStrideAtCompileTime = inner_stride_at_compile_time::ret, + OuterStrideAtCompileTime = outer_stride_at_compile_time::ret + }; +}; +} + +template class TransposeImpl; + +/** \class Transpose + * \ingroup Core_Module + * + * \brief Expression of the transpose of a matrix + * + * \tparam MatrixType the type of the object of which we are taking the transpose + * + * This class represents an expression of the transpose of a matrix. + * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::transpose(), MatrixBase::adjoint() + */ +template class Transpose + : public TransposeImpl::StorageKind> +{ + public: + + typedef typename internal::ref_selector::non_const_type MatrixTypeNested; + + typedef typename TransposeImpl::StorageKind>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose) + typedef typename internal::remove_all::type NestedExpression; + + EIGEN_DEVICE_FUNC + explicit inline Transpose(MatrixType& matrix) : m_matrix(matrix) {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose) + + EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); } + EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const { return m_matrix; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC + typename internal::remove_reference::type& + nestedExpression() { return m_matrix; } + + /** \internal */ + void resize(Index nrows, Index ncols) { + m_matrix.resize(ncols,nrows); + } + + protected: + typename internal::ref_selector::non_const_type m_matrix; +}; + +namespace internal { + +template::ret> +struct TransposeImpl_base +{ + typedef typename dense_xpr_base >::type type; +}; + +template +struct TransposeImpl_base +{ + typedef typename dense_xpr_base >::type type; +}; + +} // end namespace internal + +// Generic API dispatcher +template +class TransposeImpl + : public internal::generic_xpr_base >::type +{ +public: + typedef typename internal::generic_xpr_base >::type Base; +}; + +template class TransposeImpl + : public internal::TransposeImpl_base::type +{ + public: + + typedef typename internal::TransposeImpl_base::type Base; + using Base::coeffRef; + EIGEN_DENSE_PUBLIC_INTERFACE(Transpose) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl) + + EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride(); } + EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride(); } + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); } + EIGEN_DEVICE_FUNC inline const Scalar* data() const { return derived().nestedExpression().data(); } + + // FIXME: shall we keep the const version of coeffRef? + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index rowId, Index colId) const + { + return derived().nestedExpression().coeffRef(colId, rowId); + } + + EIGEN_DEVICE_FUNC + inline const Scalar& coeffRef(Index index) const + { + return derived().nestedExpression().coeffRef(index); + } + protected: + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(TransposeImpl) +}; + +/** \returns an expression of the transpose of *this. + * + * Example: \include MatrixBase_transpose.cpp + * Output: \verbinclude MatrixBase_transpose.out + * + * \warning If you want to replace a matrix by its own transpose, do \b NOT do this: + * \code + * m = m.transpose(); // bug!!! caused by aliasing effect + * \endcode + * Instead, use the transposeInPlace() method: + * \code + * m.transposeInPlace(); + * \endcode + * which gives Eigen good opportunities for optimization, or alternatively you can also do: + * \code + * m = m.transpose().eval(); + * \endcode + * + * \sa transposeInPlace(), adjoint() */ +template +inline Transpose +DenseBase::transpose() +{ + return TransposeReturnType(derived()); +} + +/** This is the const version of transpose(). + * + * Make sure you read the warning for transpose() ! + * + * \sa transposeInPlace(), adjoint() */ +template +inline typename DenseBase::ConstTransposeReturnType +DenseBase::transpose() const +{ + return ConstTransposeReturnType(derived()); +} + +/** \returns an expression of the adjoint (i.e. conjugate transpose) of *this. + * + * Example: \include MatrixBase_adjoint.cpp + * Output: \verbinclude MatrixBase_adjoint.out + * + * \warning If you want to replace a matrix by its own adjoint, do \b NOT do this: + * \code + * m = m.adjoint(); // bug!!! caused by aliasing effect + * \endcode + * Instead, use the adjointInPlace() method: + * \code + * m.adjointInPlace(); + * \endcode + * which gives Eigen good opportunities for optimization, or alternatively you can also do: + * \code + * m = m.adjoint().eval(); + * \endcode + * + * \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */ +template +inline const typename MatrixBase::AdjointReturnType +MatrixBase::adjoint() const +{ + return AdjointReturnType(this->transpose()); +} + +/*************************************************************************** +* "in place" transpose implementation +***************************************************************************/ + +namespace internal { + +template::size)) + && (internal::evaluator::Flags&PacketAccessBit) > +struct inplace_transpose_selector; + +template +struct inplace_transpose_selector { // square matrix + static void run(MatrixType& m) { + m.matrix().template triangularView().swap(m.matrix().transpose()); + } +}; + +// TODO: vectorized path is currently limited to LargestPacketSize x LargestPacketSize cases only. +template +struct inplace_transpose_selector { // PacketSize x PacketSize + static void run(MatrixType& m) { + typedef typename MatrixType::Scalar Scalar; + typedef typename internal::packet_traits::type Packet; + const Index PacketSize = internal::packet_traits::size; + const Index Alignment = internal::evaluator::Alignment; + PacketBlock A; + for (Index i=0; i(i,0); + internal::ptranspose(A); + for (Index i=0; i(m.rowIndexByOuterInner(i,0), m.colIndexByOuterInner(i,0), A.packet[i]); + } +}; + +template +struct inplace_transpose_selector { // non square matrix + static void run(MatrixType& m) { + if (m.rows()==m.cols()) + m.matrix().template triangularView().swap(m.matrix().transpose()); + else + m = m.transpose().eval(); + } +}; + +} // end namespace internal + +/** This is the "in place" version of transpose(): it replaces \c *this by its own transpose. + * Thus, doing + * \code + * m.transposeInPlace(); + * \endcode + * has the same effect on m as doing + * \code + * m = m.transpose().eval(); + * \endcode + * and is faster and also safer because in the latter line of code, forgetting the eval() results + * in a bug caused by \ref TopicAliasing "aliasing". + * + * Notice however that this method is only useful if you want to replace a matrix by its own transpose. + * If you just need the transpose of a matrix, use transpose(). + * + * \note if the matrix is not square, then \c *this must be a resizable matrix. + * This excludes (non-square) fixed-size matrices, block-expressions and maps. + * + * \sa transpose(), adjoint(), adjointInPlace() */ +template +inline void DenseBase::transposeInPlace() +{ + eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic)) + && "transposeInPlace() called on a non-square non-resizable matrix"); + internal::inplace_transpose_selector::run(derived()); +} + +/*************************************************************************** +* "in place" adjoint implementation +***************************************************************************/ + +/** This is the "in place" version of adjoint(): it replaces \c *this by its own transpose. + * Thus, doing + * \code + * m.adjointInPlace(); + * \endcode + * has the same effect on m as doing + * \code + * m = m.adjoint().eval(); + * \endcode + * and is faster and also safer because in the latter line of code, forgetting the eval() results + * in a bug caused by aliasing. + * + * Notice however that this method is only useful if you want to replace a matrix by its own adjoint. + * If you just need the adjoint of a matrix, use adjoint(). + * + * \note if the matrix is not square, then \c *this must be a resizable matrix. + * This excludes (non-square) fixed-size matrices, block-expressions and maps. + * + * \sa transpose(), adjoint(), transposeInPlace() */ +template +inline void MatrixBase::adjointInPlace() +{ + derived() = adjoint().eval(); +} + +#ifndef EIGEN_NO_DEBUG + +// The following is to detect aliasing problems in most common cases. + +namespace internal { + +template +struct check_transpose_aliasing_compile_time_selector +{ + enum { ret = bool(blas_traits::IsTransposed) != DestIsTransposed }; +}; + +template +struct check_transpose_aliasing_compile_time_selector > +{ + enum { ret = bool(blas_traits::IsTransposed) != DestIsTransposed + || bool(blas_traits::IsTransposed) != DestIsTransposed + }; +}; + +template +struct check_transpose_aliasing_run_time_selector +{ + static bool run(const Scalar* dest, const OtherDerived& src) + { + return (bool(blas_traits::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src)); + } +}; + +template +struct check_transpose_aliasing_run_time_selector > +{ + static bool run(const Scalar* dest, const CwiseBinaryOp& src) + { + return ((blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.lhs()))) + || ((blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.rhs()))); + } +}; + +// the following selector, checkTransposeAliasing_impl, based on MightHaveTransposeAliasing, +// is because when the condition controlling the assert is known at compile time, ICC emits a warning. +// This is actually a good warning: in expressions that don't have any transposing, the condition is +// known at compile time to be false, and using that, we can avoid generating the code of the assert again +// and again for all these expressions that don't need it. + +template::IsTransposed,OtherDerived>::ret + > +struct checkTransposeAliasing_impl +{ + static void run(const Derived& dst, const OtherDerived& other) + { + eigen_assert((!check_transpose_aliasing_run_time_selector + ::IsTransposed,OtherDerived> + ::run(extract_data(dst), other)) + && "aliasing detected during transposition, use transposeInPlace() " + "or evaluate the rhs into a temporary using .eval()"); + + } +}; + +template +struct checkTransposeAliasing_impl +{ + static void run(const Derived&, const OtherDerived&) + { + } +}; + +template +void check_for_aliasing(const Dst &dst, const Src &src) +{ + internal::checkTransposeAliasing_impl::run(dst, src); +} + +} // end namespace internal + +#endif // EIGEN_NO_DEBUG + +} // end namespace Eigen + +#endif // EIGEN_TRANSPOSE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Transpositions.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Transpositions.h new file mode 100644 index 000000000..86da5af59 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Transpositions.h @@ -0,0 +1,407 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010-2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRANSPOSITIONS_H +#define EIGEN_TRANSPOSITIONS_H + +namespace Eigen { + +template +class TranspositionsBase +{ + typedef internal::traits Traits; + + public: + + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar StorageIndex; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + /** Copies the \a other transpositions into \c *this */ + template + Derived& operator=(const TranspositionsBase& other) + { + indices() = other.indices(); + return derived(); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + Derived& operator=(const TranspositionsBase& other) + { + indices() = other.indices(); + return derived(); + } + #endif + + /** \returns the number of transpositions */ + Index size() const { return indices().size(); } + /** \returns the number of rows of the equivalent permutation matrix */ + Index rows() const { return indices().size(); } + /** \returns the number of columns of the equivalent permutation matrix */ + Index cols() const { return indices().size(); } + + /** Direct access to the underlying index vector */ + inline const StorageIndex& coeff(Index i) const { return indices().coeff(i); } + /** Direct access to the underlying index vector */ + inline StorageIndex& coeffRef(Index i) { return indices().coeffRef(i); } + /** Direct access to the underlying index vector */ + inline const StorageIndex& operator()(Index i) const { return indices()(i); } + /** Direct access to the underlying index vector */ + inline StorageIndex& operator()(Index i) { return indices()(i); } + /** Direct access to the underlying index vector */ + inline const StorageIndex& operator[](Index i) const { return indices()(i); } + /** Direct access to the underlying index vector */ + inline StorageIndex& operator[](Index i) { return indices()(i); } + + /** const version of indices(). */ + const IndicesType& indices() const { return derived().indices(); } + /** \returns a reference to the stored array representing the transpositions. */ + IndicesType& indices() { return derived().indices(); } + + /** Resizes to given size. */ + inline void resize(Index newSize) + { + indices().resize(newSize); + } + + /** Sets \c *this to represents an identity transformation */ + void setIdentity() + { + for(StorageIndex i = 0; i < indices().size(); ++i) + coeffRef(i) = i; + } + + // FIXME: do we want such methods ? + // might be usefull when the target matrix expression is complex, e.g.: + // object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..); + /* + template + void applyForwardToRows(MatrixType& mat) const + { + for(Index k=0 ; k + void applyBackwardToRows(MatrixType& mat) const + { + for(Index k=size()-1 ; k>=0 ; --k) + if(m_indices(k)!=k) + mat.row(k).swap(mat.row(m_indices(k))); + } + */ + + /** \returns the inverse transformation */ + inline Transpose inverse() const + { return Transpose(derived()); } + + /** \returns the tranpose transformation */ + inline Transpose transpose() const + { return Transpose(derived()); } + + protected: +}; + +namespace internal { +template +struct traits > + : traits > +{ + typedef Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType; + typedef TranspositionsStorage StorageKind; +}; +} + +/** \class Transpositions + * \ingroup Core_Module + * + * \brief Represents a sequence of transpositions (row/column interchange) + * + * \tparam SizeAtCompileTime the number of transpositions, or Dynamic + * \tparam MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it. + * + * This class represents a permutation transformation as a sequence of \em n transpositions + * \f$[T_{n-1} \ldots T_{i} \ldots T_{0}]\f$. It is internally stored as a vector of integers \c indices. + * Each transposition \f$ T_{i} \f$ applied on the left of a matrix (\f$ T_{i} M\f$) interchanges + * the rows \c i and \c indices[i] of the matrix \c M. + * A transposition applied on the right (e.g., \f$ M T_{i}\f$) yields a column interchange. + * + * Compared to the class PermutationMatrix, such a sequence of transpositions is what is + * computed during a decomposition with pivoting, and it is faster when applying the permutation in-place. + * + * To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example: + * \code + * Transpositions tr; + * MatrixXf mat; + * mat = tr * mat; + * \endcode + * In this example, we detect that the matrix appears on both side, and so the transpositions + * are applied in-place without any temporary or extra copy. + * + * \sa class PermutationMatrix + */ + +template +class Transpositions : public TranspositionsBase > +{ + typedef internal::traits Traits; + public: + + typedef TranspositionsBase Base; + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar StorageIndex; + + inline Transpositions() {} + + /** Copy constructor. */ + template + inline Transpositions(const TranspositionsBase& other) + : m_indices(other.indices()) {} + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** Standard copy constructor. Defined only to prevent a default copy constructor + * from hiding the other templated constructor */ + inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {} + #endif + + /** Generic constructor from expression of the transposition indices. */ + template + explicit inline Transpositions(const MatrixBase& indices) : m_indices(indices) + {} + + /** Copies the \a other transpositions into \c *this */ + template + Transpositions& operator=(const TranspositionsBase& other) + { + return Base::operator=(other); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + Transpositions& operator=(const Transpositions& other) + { + m_indices = other.m_indices; + return *this; + } + #endif + + /** Constructs an uninitialized permutation matrix of given size. + */ + inline Transpositions(Index size) : m_indices(size) + {} + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + /** \returns a reference to the stored array representing the transpositions. */ + IndicesType& indices() { return m_indices; } + + protected: + + IndicesType m_indices; +}; + + +namespace internal { +template +struct traits,_PacketAccess> > + : traits > +{ + typedef Map, _PacketAccess> IndicesType; + typedef _StorageIndex StorageIndex; + typedef TranspositionsStorage StorageKind; +}; +} + +template +class Map,PacketAccess> + : public TranspositionsBase,PacketAccess> > +{ + typedef internal::traits Traits; + public: + + typedef TranspositionsBase Base; + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar StorageIndex; + + explicit inline Map(const StorageIndex* indicesPtr) + : m_indices(indicesPtr) + {} + + inline Map(const StorageIndex* indicesPtr, Index size) + : m_indices(indicesPtr,size) + {} + + /** Copies the \a other transpositions into \c *this */ + template + Map& operator=(const TranspositionsBase& other) + { + return Base::operator=(other); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + Map& operator=(const Map& other) + { + m_indices = other.m_indices; + return *this; + } + #endif + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + + /** \returns a reference to the stored array representing the transpositions. */ + IndicesType& indices() { return m_indices; } + + protected: + + IndicesType m_indices; +}; + +namespace internal { +template +struct traits > + : traits > +{ + typedef TranspositionsStorage StorageKind; +}; +} + +template +class TranspositionsWrapper + : public TranspositionsBase > +{ + typedef internal::traits Traits; + public: + + typedef TranspositionsBase Base; + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar StorageIndex; + + explicit inline TranspositionsWrapper(IndicesType& indices) + : m_indices(indices) + {} + + /** Copies the \a other transpositions into \c *this */ + template + TranspositionsWrapper& operator=(const TranspositionsBase& other) + { + return Base::operator=(other); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + TranspositionsWrapper& operator=(const TranspositionsWrapper& other) + { + m_indices = other.m_indices; + return *this; + } + #endif + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + + /** \returns a reference to the stored array representing the transpositions. */ + IndicesType& indices() { return m_indices; } + + protected: + + typename IndicesType::Nested m_indices; +}; + + + +/** \returns the \a matrix with the \a transpositions applied to the columns. + */ +template +EIGEN_DEVICE_FUNC +const Product +operator*(const MatrixBase &matrix, + const TranspositionsBase& transpositions) +{ + return Product + (matrix.derived(), transpositions.derived()); +} + +/** \returns the \a matrix with the \a transpositions applied to the rows. + */ +template +EIGEN_DEVICE_FUNC +const Product +operator*(const TranspositionsBase &transpositions, + const MatrixBase& matrix) +{ + return Product + (transpositions.derived(), matrix.derived()); +} + +// Template partial specialization for transposed/inverse transpositions + +namespace internal { + +template +struct traits > > + : traits +{}; + +} // end namespace internal + +template +class Transpose > +{ + typedef TranspositionsDerived TranspositionType; + typedef typename TranspositionType::IndicesType IndicesType; + public: + + explicit Transpose(const TranspositionType& t) : m_transpositions(t) {} + + Index size() const { return m_transpositions.size(); } + Index rows() const { return m_transpositions.size(); } + Index cols() const { return m_transpositions.size(); } + + /** \returns the \a matrix with the inverse transpositions applied to the columns. + */ + template friend + const Product + operator*(const MatrixBase& matrix, const Transpose& trt) + { + return Product(matrix.derived(), trt); + } + + /** \returns the \a matrix with the inverse transpositions applied to the rows. + */ + template + const Product + operator*(const MatrixBase& matrix) const + { + return Product(*this, matrix.derived()); + } + + const TranspositionType& nestedExpression() const { return m_transpositions; } + + protected: + const TranspositionType& m_transpositions; +}; + +} // end namespace Eigen + +#endif // EIGEN_TRANSPOSITIONS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/TriangularMatrix.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/TriangularMatrix.h new file mode 100644 index 000000000..9abb7e31a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/TriangularMatrix.h @@ -0,0 +1,985 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Benoit Jacob +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRIANGULARMATRIX_H +#define EIGEN_TRIANGULARMATRIX_H + +namespace Eigen { + +namespace internal { + +template struct triangular_solve_retval; + +} + +/** \class TriangularBase + * \ingroup Core_Module + * + * \brief Base class for triangular part in a matrix + */ +template class TriangularBase : public EigenBase +{ + public: + + enum { + Mode = internal::traits::Mode, + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, + + SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, + internal::traits::ColsAtCompileTime>::ret), + /**< This is equal to the number of coefficients, i.e. the number of + * rows times the number of columns, or to \a Dynamic if this is not + * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ + + MaxSizeAtCompileTime = (internal::size_at_compile_time::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime>::ret) + + }; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::StorageIndex StorageIndex; + typedef typename internal::traits::FullMatrixType DenseMatrixType; + typedef DenseMatrixType DenseType; + typedef Derived const& Nested; + + EIGEN_DEVICE_FUNC + inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); } + + EIGEN_DEVICE_FUNC + inline Index rows() const { return derived().rows(); } + EIGEN_DEVICE_FUNC + inline Index cols() const { return derived().cols(); } + EIGEN_DEVICE_FUNC + inline Index outerStride() const { return derived().outerStride(); } + EIGEN_DEVICE_FUNC + inline Index innerStride() const { return derived().innerStride(); } + + // dummy resize function + void resize(Index rows, Index cols) + { + EIGEN_UNUSED_VARIABLE(rows); + EIGEN_UNUSED_VARIABLE(cols); + eigen_assert(rows==this->rows() && cols==this->cols()); + } + + EIGEN_DEVICE_FUNC + inline Scalar coeff(Index row, Index col) const { return derived().coeff(row,col); } + EIGEN_DEVICE_FUNC + inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); } + + /** \see MatrixBase::copyCoeff(row,col) + */ + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other) + { + derived().coeffRef(row, col) = other.coeff(row, col); + } + + EIGEN_DEVICE_FUNC + inline Scalar operator()(Index row, Index col) const + { + check_coordinates(row, col); + return coeff(row,col); + } + EIGEN_DEVICE_FUNC + inline Scalar& operator()(Index row, Index col) + { + check_coordinates(row, col); + return coeffRef(row,col); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + EIGEN_DEVICE_FUNC + inline const Derived& derived() const { return *static_cast(this); } + EIGEN_DEVICE_FUNC + inline Derived& derived() { return *static_cast(this); } + #endif // not EIGEN_PARSED_BY_DOXYGEN + + template + EIGEN_DEVICE_FUNC + void evalTo(MatrixBase &other) const; + template + EIGEN_DEVICE_FUNC + void evalToLazy(MatrixBase &other) const; + + EIGEN_DEVICE_FUNC + DenseMatrixType toDenseMatrix() const + { + DenseMatrixType res(rows(), cols()); + evalToLazy(res); + return res; + } + + protected: + + void check_coordinates(Index row, Index col) const + { + EIGEN_ONLY_USED_FOR_DEBUG(row); + EIGEN_ONLY_USED_FOR_DEBUG(col); + eigen_assert(col>=0 && col=0 && row=row) + || (mode==Lower && col<=row) + || ((mode==StrictlyUpper || mode==UnitUpper) && col>row) + || ((mode==StrictlyLower || mode==UnitLower) && col +struct traits > : traits +{ + typedef typename ref_selector::non_const_type MatrixTypeNested; + typedef typename remove_reference::type MatrixTypeNestedNonRef; + typedef typename remove_all::type MatrixTypeNestedCleaned; + typedef typename MatrixType::PlainObject FullMatrixType; + typedef MatrixType ExpressionType; + enum { + Mode = _Mode, + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits | FlagsLvalueBit) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) + }; +}; +} + +template class TriangularViewImpl; + +template class TriangularView + : public TriangularViewImpl<_MatrixType, _Mode, typename internal::traits<_MatrixType>::StorageKind > +{ + public: + + typedef TriangularViewImpl<_MatrixType, _Mode, typename internal::traits<_MatrixType>::StorageKind > Base; + typedef typename internal::traits::Scalar Scalar; + typedef _MatrixType MatrixType; + + protected: + typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; + typedef typename internal::traits::MatrixTypeNestedNonRef MatrixTypeNestedNonRef; + + typedef typename internal::remove_all::type MatrixConjugateReturnType; + + public: + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::MatrixTypeNestedCleaned NestedExpression; + + enum { + Mode = _Mode, + Flags = internal::traits::Flags, + TransposeMode = (Mode & Upper ? Lower : 0) + | (Mode & Lower ? Upper : 0) + | (Mode & (UnitDiag)) + | (Mode & (ZeroDiag)), + IsVectorAtCompileTime = false + }; + + EIGEN_DEVICE_FUNC + explicit inline TriangularView(MatrixType& matrix) : m_matrix(matrix) + {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TriangularView) + + /** \copydoc EigenBase::rows() */ + EIGEN_DEVICE_FUNC + inline Index rows() const { return m_matrix.rows(); } + /** \copydoc EigenBase::cols() */ + EIGEN_DEVICE_FUNC + inline Index cols() const { return m_matrix.cols(); } + + /** \returns a const reference to the nested expression */ + EIGEN_DEVICE_FUNC + const NestedExpression& nestedExpression() const { return m_matrix; } + + /** \returns a reference to the nested expression */ + EIGEN_DEVICE_FUNC + NestedExpression& nestedExpression() { return m_matrix; } + + typedef TriangularView ConjugateReturnType; + /** \sa MatrixBase::conjugate() const */ + EIGEN_DEVICE_FUNC + inline const ConjugateReturnType conjugate() const + { return ConjugateReturnType(m_matrix.conjugate()); } + + typedef TriangularView AdjointReturnType; + /** \sa MatrixBase::adjoint() const */ + EIGEN_DEVICE_FUNC + inline const AdjointReturnType adjoint() const + { return AdjointReturnType(m_matrix.adjoint()); } + + typedef TriangularView TransposeReturnType; + /** \sa MatrixBase::transpose() */ + EIGEN_DEVICE_FUNC + inline TransposeReturnType transpose() + { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + typename MatrixType::TransposeReturnType tmp(m_matrix); + return TransposeReturnType(tmp); + } + + typedef TriangularView ConstTransposeReturnType; + /** \sa MatrixBase::transpose() const */ + EIGEN_DEVICE_FUNC + inline const ConstTransposeReturnType transpose() const + { + return ConstTransposeReturnType(m_matrix.transpose()); + } + + template + EIGEN_DEVICE_FUNC + inline const Solve + solve(const MatrixBase& other) const + { return Solve(*this, other.derived()); } + + // workaround MSVC ICE + #if EIGEN_COMP_MSVC + template + EIGEN_DEVICE_FUNC + inline const internal::triangular_solve_retval + solve(const MatrixBase& other) const + { return Base::template solve(other); } + #else + using Base::solve; + #endif + + /** \returns a selfadjoint view of the referenced triangular part which must be either \c #Upper or \c #Lower. + * + * This is a shortcut for \code this->nestedExpression().selfadjointView<(*this)::Mode>() \endcode + * \sa MatrixBase::selfadjointView() */ + EIGEN_DEVICE_FUNC + SelfAdjointView selfadjointView() + { + EIGEN_STATIC_ASSERT((Mode&(UnitDiag|ZeroDiag))==0,PROGRAMMING_ERROR); + return SelfAdjointView(m_matrix); + } + + /** This is the const version of selfadjointView() */ + EIGEN_DEVICE_FUNC + const SelfAdjointView selfadjointView() const + { + EIGEN_STATIC_ASSERT((Mode&(UnitDiag|ZeroDiag))==0,PROGRAMMING_ERROR); + return SelfAdjointView(m_matrix); + } + + + /** \returns the determinant of the triangular matrix + * \sa MatrixBase::determinant() */ + EIGEN_DEVICE_FUNC + Scalar determinant() const + { + if (Mode & UnitDiag) + return 1; + else if (Mode & ZeroDiag) + return 0; + else + return m_matrix.diagonal().prod(); + } + + protected: + + MatrixTypeNested m_matrix; +}; + +/** \ingroup Core_Module + * + * \brief Base class for a triangular part in a \b dense matrix + * + * This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated. + * It extends class TriangularView with additional methods which available for dense expressions only. + * + * \sa class TriangularView, MatrixBase::triangularView() + */ +template class TriangularViewImpl<_MatrixType,_Mode,Dense> + : public TriangularBase > +{ + public: + + typedef TriangularView<_MatrixType, _Mode> TriangularViewType; + typedef TriangularBase Base; + typedef typename internal::traits::Scalar Scalar; + + typedef _MatrixType MatrixType; + typedef typename MatrixType::PlainObject DenseMatrixType; + typedef DenseMatrixType PlainObject; + + public: + using Base::evalToLazy; + using Base::derived; + + typedef typename internal::traits::StorageKind StorageKind; + + enum { + Mode = _Mode, + Flags = internal::traits::Flags + }; + + /** \returns the outer-stride of the underlying dense matrix + * \sa DenseCoeffsBase::outerStride() */ + EIGEN_DEVICE_FUNC + inline Index outerStride() const { return derived().nestedExpression().outerStride(); } + /** \returns the inner-stride of the underlying dense matrix + * \sa DenseCoeffsBase::innerStride() */ + EIGEN_DEVICE_FUNC + inline Index innerStride() const { return derived().nestedExpression().innerStride(); } + + /** \sa MatrixBase::operator+=() */ + template + EIGEN_DEVICE_FUNC + TriangularViewType& operator+=(const DenseBase& other) { + internal::call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op()); + return derived(); + } + /** \sa MatrixBase::operator-=() */ + template + EIGEN_DEVICE_FUNC + TriangularViewType& operator-=(const DenseBase& other) { + internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op()); + return derived(); + } + + /** \sa MatrixBase::operator*=() */ + EIGEN_DEVICE_FUNC + TriangularViewType& operator*=(const typename internal::traits::Scalar& other) { return *this = derived().nestedExpression() * other; } + /** \sa DenseBase::operator/=() */ + EIGEN_DEVICE_FUNC + TriangularViewType& operator/=(const typename internal::traits::Scalar& other) { return *this = derived().nestedExpression() / other; } + + /** \sa MatrixBase::fill() */ + EIGEN_DEVICE_FUNC + void fill(const Scalar& value) { setConstant(value); } + /** \sa MatrixBase::setConstant() */ + EIGEN_DEVICE_FUNC + TriangularViewType& setConstant(const Scalar& value) + { return *this = MatrixType::Constant(derived().rows(), derived().cols(), value); } + /** \sa MatrixBase::setZero() */ + EIGEN_DEVICE_FUNC + TriangularViewType& setZero() { return setConstant(Scalar(0)); } + /** \sa MatrixBase::setOnes() */ + EIGEN_DEVICE_FUNC + TriangularViewType& setOnes() { return setConstant(Scalar(1)); } + + /** \sa MatrixBase::coeff() + * \warning the coordinates must fit into the referenced triangular part + */ + EIGEN_DEVICE_FUNC + inline Scalar coeff(Index row, Index col) const + { + Base::check_coordinates_internal(row, col); + return derived().nestedExpression().coeff(row, col); + } + + /** \sa MatrixBase::coeffRef() + * \warning the coordinates must fit into the referenced triangular part + */ + EIGEN_DEVICE_FUNC + inline Scalar& coeffRef(Index row, Index col) + { + EIGEN_STATIC_ASSERT_LVALUE(TriangularViewType); + Base::check_coordinates_internal(row, col); + return derived().nestedExpression().coeffRef(row, col); + } + + /** Assigns a triangular matrix to a triangular part of a dense matrix */ + template + EIGEN_DEVICE_FUNC + TriangularViewType& operator=(const TriangularBase& other); + + /** Shortcut for\code *this = other.other.triangularView<(*this)::Mode>() \endcode */ + template + EIGEN_DEVICE_FUNC + TriangularViewType& operator=(const MatrixBase& other); + +#ifndef EIGEN_PARSED_BY_DOXYGEN + EIGEN_DEVICE_FUNC + TriangularViewType& operator=(const TriangularViewImpl& other) + { return *this = other.derived().nestedExpression(); } + + /** \deprecated */ + template + EIGEN_DEVICE_FUNC + void lazyAssign(const TriangularBase& other); + + /** \deprecated */ + template + EIGEN_DEVICE_FUNC + void lazyAssign(const MatrixBase& other); +#endif + + /** Efficient triangular matrix times vector/matrix product */ + template + EIGEN_DEVICE_FUNC + const Product + operator*(const MatrixBase& rhs) const + { + return Product(derived(), rhs.derived()); + } + + /** Efficient vector/matrix times triangular matrix product */ + template friend + EIGEN_DEVICE_FUNC + const Product + operator*(const MatrixBase& lhs, const TriangularViewImpl& rhs) + { + return Product(lhs.derived(),rhs.derived()); + } + + /** \returns the product of the inverse of \c *this with \a other, \a *this being triangular. + * + * This function computes the inverse-matrix matrix product inverse(\c *this) * \a other if + * \a Side==OnTheLeft (the default), or the right-inverse-multiply \a other * inverse(\c *this) if + * \a Side==OnTheRight. + * + * Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft + * + * The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the + * diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this + * is an upper (resp. lower) triangular matrix. + * + * Example: \include Triangular_solve.cpp + * Output: \verbinclude Triangular_solve.out + * + * This function returns an expression of the inverse-multiply and can works in-place if it is assigned + * to the same matrix or vector \a other. + * + * For users coming from BLAS, this function (and more specifically solveInPlace()) offer + * all the operations supported by the \c *TRSV and \c *TRSM BLAS routines. + * + * \sa TriangularView::solveInPlace() + */ + template + EIGEN_DEVICE_FUNC + inline const internal::triangular_solve_retval + solve(const MatrixBase& other) const; + + /** "in-place" version of TriangularView::solve() where the result is written in \a other + * + * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here. + * This function will const_cast it, so constness isn't honored here. + * + * Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft + * + * See TriangularView:solve() for the details. + */ + template + EIGEN_DEVICE_FUNC + void solveInPlace(const MatrixBase& other) const; + + template + EIGEN_DEVICE_FUNC + void solveInPlace(const MatrixBase& other) const + { return solveInPlace(other); } + + /** Swaps the coefficients of the common triangular parts of two matrices */ + template + EIGEN_DEVICE_FUNC +#ifdef EIGEN_PARSED_BY_DOXYGEN + void swap(TriangularBase &other) +#else + void swap(TriangularBase const & other) +#endif + { + EIGEN_STATIC_ASSERT_LVALUE(OtherDerived); + call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op()); + } + + /** \deprecated + * Shortcut for \code (*this).swap(other.triangularView<(*this)::Mode>()) \endcode */ + template + EIGEN_DEVICE_FUNC + void swap(MatrixBase const & other) + { + EIGEN_STATIC_ASSERT_LVALUE(OtherDerived); + call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op()); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const { + if(!internal::is_same_dense(dst,rhs)) + dst = rhs; + this->solveInPlace(dst); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TriangularViewType& _assignProduct(const ProductType& prod, const Scalar& alpha, bool beta); + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(TriangularViewImpl) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(TriangularViewImpl) + +}; + +/*************************************************************************** +* Implementation of triangular evaluation/assignment +***************************************************************************/ + +#ifndef EIGEN_PARSED_BY_DOXYGEN +// FIXME should we keep that possibility +template +template +inline TriangularView& +TriangularViewImpl::operator=(const MatrixBase& other) +{ + internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op()); + return derived(); +} + +// FIXME should we keep that possibility +template +template +void TriangularViewImpl::lazyAssign(const MatrixBase& other) +{ + internal::call_assignment_no_alias(derived(), other.template triangularView()); +} + + + +template +template +inline TriangularView& +TriangularViewImpl::operator=(const TriangularBase& other) +{ + eigen_assert(Mode == int(OtherDerived::Mode)); + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +template +void TriangularViewImpl::lazyAssign(const TriangularBase& other) +{ + eigen_assert(Mode == int(OtherDerived::Mode)); + internal::call_assignment_no_alias(derived(), other.derived()); +} +#endif + +/*************************************************************************** +* Implementation of TriangularBase methods +***************************************************************************/ + +/** Assigns a triangular or selfadjoint matrix to a dense matrix. + * If the matrix is triangular, the opposite part is set to zero. */ +template +template +void TriangularBase::evalTo(MatrixBase &other) const +{ + evalToLazy(other.derived()); +} + +/*************************************************************************** +* Implementation of TriangularView methods +***************************************************************************/ + +/*************************************************************************** +* Implementation of MatrixBase methods +***************************************************************************/ + +/** + * \returns an expression of a triangular view extracted from the current matrix + * + * The parameter \a Mode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper, + * \c #Lower, \c #StrictlyLower, \c #UnitLower. + * + * Example: \include MatrixBase_triangularView.cpp + * Output: \verbinclude MatrixBase_triangularView.out + * + * \sa class TriangularView + */ +template +template +typename MatrixBase::template TriangularViewReturnType::Type +MatrixBase::triangularView() +{ + return typename TriangularViewReturnType::Type(derived()); +} + +/** This is the const version of MatrixBase::triangularView() */ +template +template +typename MatrixBase::template ConstTriangularViewReturnType::Type +MatrixBase::triangularView() const +{ + return typename ConstTriangularViewReturnType::Type(derived()); +} + +/** \returns true if *this is approximately equal to an upper triangular matrix, + * within the precision given by \a prec. + * + * \sa isLowerTriangular() + */ +template +bool MatrixBase::isUpperTriangular(const RealScalar& prec) const +{ + RealScalar maxAbsOnUpperPart = static_cast(-1); + for(Index j = 0; j < cols(); ++j) + { + Index maxi = numext::mini(j, rows()-1); + for(Index i = 0; i <= maxi; ++i) + { + RealScalar absValue = numext::abs(coeff(i,j)); + if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue; + } + } + RealScalar threshold = maxAbsOnUpperPart * prec; + for(Index j = 0; j < cols(); ++j) + for(Index i = j+1; i < rows(); ++i) + if(numext::abs(coeff(i, j)) > threshold) return false; + return true; +} + +/** \returns true if *this is approximately equal to a lower triangular matrix, + * within the precision given by \a prec. + * + * \sa isUpperTriangular() + */ +template +bool MatrixBase::isLowerTriangular(const RealScalar& prec) const +{ + RealScalar maxAbsOnLowerPart = static_cast(-1); + for(Index j = 0; j < cols(); ++j) + for(Index i = j; i < rows(); ++i) + { + RealScalar absValue = numext::abs(coeff(i,j)); + if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue; + } + RealScalar threshold = maxAbsOnLowerPart * prec; + for(Index j = 1; j < cols(); ++j) + { + Index maxi = numext::mini(j, rows()-1); + for(Index i = 0; i < maxi; ++i) + if(numext::abs(coeff(i, j)) > threshold) return false; + } + return true; +} + + +/*************************************************************************** +**************************************************************************** +* Evaluators and Assignment of triangular expressions +*************************************************************************** +***************************************************************************/ + +namespace internal { + + +// TODO currently a triangular expression has the form TriangularView<.,.> +// in the future triangular-ness should be defined by the expression traits +// such that Transpose > is valid. (currently TriangularBase::transpose() is overloaded to make it work) +template +struct evaluator_traits > +{ + typedef typename storage_kind_to_evaluator_kind::Kind Kind; + typedef typename glue_shapes::Shape, TriangularShape>::type Shape; +}; + +template +struct unary_evaluator, IndexBased> + : evaluator::type> +{ + typedef TriangularView XprType; + typedef evaluator::type> Base; + unary_evaluator(const XprType &xpr) : Base(xpr.nestedExpression()) {} +}; + +// Additional assignment kinds: +struct Triangular2Triangular {}; +struct Triangular2Dense {}; +struct Dense2Triangular {}; + + +template struct triangular_assignment_loop; + + +/** \internal Specialization of the dense assignment kernel for triangular matrices. + * The main difference is that the triangular, diagonal, and opposite parts are processed through three different functions. + * \tparam UpLo must be either Lower or Upper + * \tparam Mode must be either 0, UnitDiag, ZeroDiag, or SelfAdjoint + */ +template +class triangular_dense_assignment_kernel : public generic_dense_assignment_kernel +{ +protected: + typedef generic_dense_assignment_kernel Base; + typedef typename Base::DstXprType DstXprType; + typedef typename Base::SrcXprType SrcXprType; + using Base::m_dst; + using Base::m_src; + using Base::m_functor; +public: + + typedef typename Base::DstEvaluatorType DstEvaluatorType; + typedef typename Base::SrcEvaluatorType SrcEvaluatorType; + typedef typename Base::Scalar Scalar; + typedef typename Base::AssignmentTraits AssignmentTraits; + + + EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr) + : Base(dst, src, func, dstExpr) + {} + +#ifdef EIGEN_INTERNAL_DEBUGGING + EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col) + { + eigen_internal_assert(row!=col); + Base::assignCoeff(row,col); + } +#else + using Base::assignCoeff; +#endif + + EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id) + { + if(Mode==UnitDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(1)); + else if(Mode==ZeroDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(0)); + else if(Mode==0) Base::assignCoeff(id,id); + } + + EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index row, Index col) + { + eigen_internal_assert(row!=col); + if(SetOpposite) + m_functor.assignCoeff(m_dst.coeffRef(row,col), Scalar(0)); + } +}; + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_triangular_assignment_loop(DstXprType& dst, const SrcXprType& src, const Functor &func) +{ + typedef evaluator DstEvaluatorType; + typedef evaluator SrcEvaluatorType; + + SrcEvaluatorType srcEvaluator(src); + + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + DstEvaluatorType dstEvaluator(dst); + + typedef triangular_dense_assignment_kernel< Mode&(Lower|Upper),Mode&(UnitDiag|ZeroDiag|SelfAdjoint),SetOpposite, + DstEvaluatorType,SrcEvaluatorType,Functor> Kernel; + Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); + + enum { + unroll = DstXprType::SizeAtCompileTime != Dynamic + && SrcEvaluatorType::CoeffReadCost < HugeCost + && DstXprType::SizeAtCompileTime * (DstEvaluatorType::CoeffReadCost+SrcEvaluatorType::CoeffReadCost) / 2 <= EIGEN_UNROLLING_LIMIT + }; + + triangular_assignment_loop::run(kernel); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +void call_triangular_assignment_loop(DstXprType& dst, const SrcXprType& src) +{ + call_triangular_assignment_loop(dst, src, internal::assign_op()); +} + +template<> struct AssignmentKind { typedef Triangular2Triangular Kind; }; +template<> struct AssignmentKind { typedef Triangular2Dense Kind; }; +template<> struct AssignmentKind { typedef Dense2Triangular Kind; }; + + +template< typename DstXprType, typename SrcXprType, typename Functor> +struct Assignment +{ + EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) + { + eigen_assert(int(DstXprType::Mode) == int(SrcXprType::Mode)); + + call_triangular_assignment_loop(dst, src, func); + } +}; + +template< typename DstXprType, typename SrcXprType, typename Functor> +struct Assignment +{ + EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) + { + call_triangular_assignment_loop(dst, src, func); + } +}; + +template< typename DstXprType, typename SrcXprType, typename Functor> +struct Assignment +{ + EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) + { + call_triangular_assignment_loop(dst, src, func); + } +}; + + +template +struct triangular_assignment_loop +{ + // FIXME: this is not very clean, perhaps this information should be provided by the kernel? + typedef typename Kernel::DstEvaluatorType DstEvaluatorType; + typedef typename DstEvaluatorType::XprType DstXprType; + + enum { + col = (UnrollCount-1) / DstXprType::RowsAtCompileTime, + row = (UnrollCount-1) % DstXprType::RowsAtCompileTime + }; + + typedef typename Kernel::Scalar Scalar; + + EIGEN_DEVICE_FUNC + static inline void run(Kernel &kernel) + { + triangular_assignment_loop::run(kernel); + + if(row==col) + kernel.assignDiagonalCoeff(row); + else if( ((Mode&Lower) && row>col) || ((Mode&Upper) && row +struct triangular_assignment_loop +{ + EIGEN_DEVICE_FUNC + static inline void run(Kernel &) {} +}; + + + +// TODO: experiment with a recursive assignment procedure splitting the current +// triangular part into one rectangular and two triangular parts. + + +template +struct triangular_assignment_loop +{ + typedef typename Kernel::Scalar Scalar; + EIGEN_DEVICE_FUNC + static inline void run(Kernel &kernel) + { + for(Index j = 0; j < kernel.cols(); ++j) + { + Index maxi = numext::mini(j, kernel.rows()); + Index i = 0; + if (((Mode&Lower) && SetOpposite) || (Mode&Upper)) + { + for(; i < maxi; ++i) + if(Mode&Upper) kernel.assignCoeff(i, j); + else kernel.assignOppositeCoeff(i, j); + } + else + i = maxi; + + if(i +template +void TriangularBase::evalToLazy(MatrixBase &other) const +{ + other.derived().resize(this->rows(), this->cols()); + internal::call_triangular_assignment_loop(other.derived(), derived().nestedExpression()); +} + +namespace internal { + +// Triangular = Product +template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> +struct Assignment, internal::assign_op::Scalar>, Dense2Triangular> +{ + typedef Product SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + dst._assignProduct(src, 1, 0); + } +}; + +// Triangular += Product +template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> +struct Assignment, internal::add_assign_op::Scalar>, Dense2Triangular> +{ + typedef Product SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op &) + { + dst._assignProduct(src, 1, 1); + } +}; + +// Triangular -= Product +template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> +struct Assignment, internal::sub_assign_op::Scalar>, Dense2Triangular> +{ + typedef Product SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op &) + { + dst._assignProduct(src, -1, 1); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULARMATRIX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/VectorBlock.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/VectorBlock.h new file mode 100644 index 000000000..d72fbf7e9 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/VectorBlock.h @@ -0,0 +1,96 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_VECTORBLOCK_H +#define EIGEN_VECTORBLOCK_H + +namespace Eigen { + +namespace internal { +template +struct traits > + : public traits::Flags & RowMajorBit ? 1 : Size, + traits::Flags & RowMajorBit ? Size : 1> > +{ +}; +} + +/** \class VectorBlock + * \ingroup Core_Module + * + * \brief Expression of a fixed-size or dynamic-size sub-vector + * + * \tparam VectorType the type of the object in which we are taking a sub-vector + * \tparam Size size of the sub-vector we are taking at compile time (optional) + * + * This class represents an expression of either a fixed-size or dynamic-size sub-vector. + * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment(Index) and + * most of the time this is the only way it is used. + * + * However, if you want to directly maniputate sub-vector expressions, + * for instance if you want to write a function returning such an expression, you + * will need to use this class. + * + * Here is an example illustrating the dynamic case: + * \include class_VectorBlock.cpp + * Output: \verbinclude class_VectorBlock.out + * + * \note Even though this expression has dynamic size, in the case where \a VectorType + * has fixed size, this expression inherits a fixed maximal size which means that evaluating + * it does not cause a dynamic memory allocation. + * + * Here is an example illustrating the fixed-size case: + * \include class_FixedVectorBlock.cpp + * Output: \verbinclude class_FixedVectorBlock.out + * + * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index) + */ +template class VectorBlock + : public Block::Flags & RowMajorBit ? 1 : Size, + internal::traits::Flags & RowMajorBit ? Size : 1> +{ + typedef Block::Flags & RowMajorBit ? 1 : Size, + internal::traits::Flags & RowMajorBit ? Size : 1> Base; + enum { + IsColVector = !(internal::traits::Flags & RowMajorBit) + }; + public: + EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock) + + using Base::operator=; + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC + inline VectorBlock(VectorType& vector, Index start, Index size) + : Base(vector, + IsColVector ? start : 0, IsColVector ? 0 : start, + IsColVector ? size : 1, IsColVector ? 1 : size) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock); + } + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC + inline VectorBlock(VectorType& vector, Index start) + : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock); + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_VECTORBLOCK_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/VectorwiseOp.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/VectorwiseOp.h new file mode 100644 index 000000000..4fe267e9f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/VectorwiseOp.h @@ -0,0 +1,695 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PARTIAL_REDUX_H +#define EIGEN_PARTIAL_REDUX_H + +namespace Eigen { + +/** \class PartialReduxExpr + * \ingroup Core_Module + * + * \brief Generic expression of a partially reduxed matrix + * + * \tparam MatrixType the type of the matrix we are applying the redux operation + * \tparam MemberOp type of the member functor + * \tparam Direction indicates the direction of the redux (#Vertical or #Horizontal) + * + * This class represents an expression of a partial redux operator of a matrix. + * It is the return type of some VectorwiseOp functions, + * and most of the time this is the only way it is used. + * + * \sa class VectorwiseOp + */ + +template< typename MatrixType, typename MemberOp, int Direction> +class PartialReduxExpr; + +namespace internal { +template +struct traits > + : traits +{ + typedef typename MemberOp::result_type Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename MatrixType::Scalar InputScalar; + enum { + RowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::RowsAtCompileTime, + ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime, + Flags = RowsAtCompileTime == 1 ? RowMajorBit : 0, + TraversalSize = Direction==Vertical ? MatrixType::RowsAtCompileTime : MatrixType::ColsAtCompileTime + }; +}; +} + +template< typename MatrixType, typename MemberOp, int Direction> +class PartialReduxExpr : public internal::dense_xpr_base< PartialReduxExpr >::type, + internal::no_assignment_operator +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(PartialReduxExpr) + + EIGEN_DEVICE_FUNC + explicit PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp()) + : m_matrix(mat), m_functor(func) {} + + EIGEN_DEVICE_FUNC + Index rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); } + EIGEN_DEVICE_FUNC + Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); } + + EIGEN_DEVICE_FUNC + typename MatrixType::Nested nestedExpression() const { return m_matrix; } + + EIGEN_DEVICE_FUNC + const MemberOp& functor() const { return m_functor; } + + protected: + typename MatrixType::Nested m_matrix; + const MemberOp m_functor; +}; + +#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \ + template \ + struct member_##MEMBER { \ + EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER) \ + typedef ResultType result_type; \ + template struct Cost \ + { enum { value = COST }; }; \ + template \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ + ResultType operator()(const XprType& mat) const \ + { return mat.MEMBER(); } \ + } + +namespace internal { + +EIGEN_MEMBER_FUNCTOR(squaredNorm, Size * NumTraits::MulCost + (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(norm, (Size+5) * NumTraits::MulCost + (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(stableNorm, (Size+5) * NumTraits::MulCost + (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(blueNorm, (Size+5) * NumTraits::MulCost + (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(hypotNorm, (Size-1) * functor_traits >::Cost ); +EIGEN_MEMBER_FUNCTOR(sum, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(mean, (Size-1)*NumTraits::AddCost + NumTraits::MulCost); +EIGEN_MEMBER_FUNCTOR(minCoeff, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(maxCoeff, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(all, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(any, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(count, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(prod, (Size-1)*NumTraits::MulCost); + +template +struct member_lpnorm { + typedef ResultType result_type; + template struct Cost + { enum { value = (Size+5) * NumTraits::MulCost + (Size-1)*NumTraits::AddCost }; }; + EIGEN_DEVICE_FUNC member_lpnorm() {} + template + EIGEN_DEVICE_FUNC inline ResultType operator()(const XprType& mat) const + { return mat.template lpNorm

(); } +}; + +template +struct member_redux { + typedef typename result_of< + BinaryOp(const Scalar&,const Scalar&) + >::type result_type; + template struct Cost + { enum { value = (Size-1) * functor_traits::Cost }; }; + EIGEN_DEVICE_FUNC explicit member_redux(const BinaryOp func) : m_functor(func) {} + template + EIGEN_DEVICE_FUNC inline result_type operator()(const DenseBase& mat) const + { return mat.redux(m_functor); } + const BinaryOp m_functor; +}; +} + +/** \class VectorwiseOp + * \ingroup Core_Module + * + * \brief Pseudo expression providing partial reduction operations + * + * \tparam ExpressionType the type of the object on which to do partial reductions + * \tparam Direction indicates the direction of the redux (#Vertical or #Horizontal) + * + * This class represents a pseudo expression with partial reduction features. + * It is the return type of DenseBase::colwise() and DenseBase::rowwise() + * and most of the time this is the only way it is used. + * + * Example: \include MatrixBase_colwise.cpp + * Output: \verbinclude MatrixBase_colwise.out + * + * \sa DenseBase::colwise(), DenseBase::rowwise(), class PartialReduxExpr + */ +template class VectorwiseOp +{ + public: + + typedef typename ExpressionType::Scalar Scalar; + typedef typename ExpressionType::RealScalar RealScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + typedef typename internal::ref_selector::non_const_type ExpressionTypeNested; + typedef typename internal::remove_all::type ExpressionTypeNestedCleaned; + + template class Functor, + typename Scalar_=Scalar> struct ReturnType + { + typedef PartialReduxExpr, + Direction + > Type; + }; + + template struct ReduxReturnType + { + typedef PartialReduxExpr, + Direction + > Type; + }; + + enum { + isVertical = (Direction==Vertical) ? 1 : 0, + isHorizontal = (Direction==Horizontal) ? 1 : 0 + }; + + protected: + + typedef typename internal::conditional::type SubVector; + /** \internal + * \returns the i-th subvector according to the \c Direction */ + EIGEN_DEVICE_FUNC + SubVector subVector(Index i) + { + return SubVector(m_matrix.derived(),i); + } + + /** \internal + * \returns the number of subvectors in the direction \c Direction */ + EIGEN_DEVICE_FUNC + Index subVectors() const + { return isVertical?m_matrix.cols():m_matrix.rows(); } + + template struct ExtendedType { + typedef Replicate Type; + }; + + /** \internal + * Replicates a vector to match the size of \c *this */ + template + EIGEN_DEVICE_FUNC + typename ExtendedType::Type + extendedTo(const DenseBase& other) const + { + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isVertical, OtherDerived::MaxColsAtCompileTime==1), + YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED) + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isHorizontal, OtherDerived::MaxRowsAtCompileTime==1), + YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED) + return typename ExtendedType::Type + (other.derived(), + isVertical ? 1 : m_matrix.rows(), + isHorizontal ? 1 : m_matrix.cols()); + } + + template struct OppositeExtendedType { + typedef Replicate Type; + }; + + /** \internal + * Replicates a vector in the opposite direction to match the size of \c *this */ + template + EIGEN_DEVICE_FUNC + typename OppositeExtendedType::Type + extendedToOpposite(const DenseBase& other) const + { + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isHorizontal, OtherDerived::MaxColsAtCompileTime==1), + YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED) + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isVertical, OtherDerived::MaxRowsAtCompileTime==1), + YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED) + return typename OppositeExtendedType::Type + (other.derived(), + isHorizontal ? 1 : m_matrix.rows(), + isVertical ? 1 : m_matrix.cols()); + } + + public: + EIGEN_DEVICE_FUNC + explicit inline VectorwiseOp(ExpressionType& matrix) : m_matrix(matrix) {} + + /** \internal */ + EIGEN_DEVICE_FUNC + inline const ExpressionType& _expression() const { return m_matrix; } + + /** \returns a row or column vector expression of \c *this reduxed by \a func + * + * The template parameter \a BinaryOp is the type of the functor + * of the custom redux operator. Note that func must be an associative operator. + * + * \sa class VectorwiseOp, DenseBase::colwise(), DenseBase::rowwise() + */ + template + EIGEN_DEVICE_FUNC + const typename ReduxReturnType::Type + redux(const BinaryOp& func = BinaryOp()) const + { return typename ReduxReturnType::Type(_expression(), internal::member_redux(func)); } + + typedef typename ReturnType::Type MinCoeffReturnType; + typedef typename ReturnType::Type MaxCoeffReturnType; + typedef typename ReturnType::Type SquaredNormReturnType; + typedef typename ReturnType::Type NormReturnType; + typedef typename ReturnType::Type BlueNormReturnType; + typedef typename ReturnType::Type StableNormReturnType; + typedef typename ReturnType::Type HypotNormReturnType; + typedef typename ReturnType::Type SumReturnType; + typedef typename ReturnType::Type MeanReturnType; + typedef typename ReturnType::Type AllReturnType; + typedef typename ReturnType::Type AnyReturnType; + typedef PartialReduxExpr, Direction> CountReturnType; + typedef typename ReturnType::Type ProdReturnType; + typedef Reverse ConstReverseReturnType; + typedef Reverse ReverseReturnType; + + template struct LpNormReturnType { + typedef PartialReduxExpr,Direction> Type; + }; + + /** \returns a row (or column) vector expression of the smallest coefficient + * of each column (or row) of the referenced expression. + * + * \warning the result is undefined if \c *this contains NaN. + * + * Example: \include PartialRedux_minCoeff.cpp + * Output: \verbinclude PartialRedux_minCoeff.out + * + * \sa DenseBase::minCoeff() */ + EIGEN_DEVICE_FUNC + const MinCoeffReturnType minCoeff() const + { return MinCoeffReturnType(_expression()); } + + /** \returns a row (or column) vector expression of the largest coefficient + * of each column (or row) of the referenced expression. + * + * \warning the result is undefined if \c *this contains NaN. + * + * Example: \include PartialRedux_maxCoeff.cpp + * Output: \verbinclude PartialRedux_maxCoeff.out + * + * \sa DenseBase::maxCoeff() */ + EIGEN_DEVICE_FUNC + const MaxCoeffReturnType maxCoeff() const + { return MaxCoeffReturnType(_expression()); } + + /** \returns a row (or column) vector expression of the squared norm + * of each column (or row) of the referenced expression. + * This is a vector with real entries, even if the original matrix has complex entries. + * + * Example: \include PartialRedux_squaredNorm.cpp + * Output: \verbinclude PartialRedux_squaredNorm.out + * + * \sa DenseBase::squaredNorm() */ + EIGEN_DEVICE_FUNC + const SquaredNormReturnType squaredNorm() const + { return SquaredNormReturnType(_expression()); } + + /** \returns a row (or column) vector expression of the norm + * of each column (or row) of the referenced expression. + * This is a vector with real entries, even if the original matrix has complex entries. + * + * Example: \include PartialRedux_norm.cpp + * Output: \verbinclude PartialRedux_norm.out + * + * \sa DenseBase::norm() */ + EIGEN_DEVICE_FUNC + const NormReturnType norm() const + { return NormReturnType(_expression()); } + + /** \returns a row (or column) vector expression of the norm + * of each column (or row) of the referenced expression. + * This is a vector with real entries, even if the original matrix has complex entries. + * + * Example: \include PartialRedux_norm.cpp + * Output: \verbinclude PartialRedux_norm.out + * + * \sa DenseBase::norm() */ + template + EIGEN_DEVICE_FUNC + const typename LpNormReturnType

::Type lpNorm() const + { return typename LpNormReturnType

::Type(_expression()); } + + + /** \returns a row (or column) vector expression of the norm + * of each column (or row) of the referenced expression, using + * Blue's algorithm. + * This is a vector with real entries, even if the original matrix has complex entries. + * + * \sa DenseBase::blueNorm() */ + EIGEN_DEVICE_FUNC + const BlueNormReturnType blueNorm() const + { return BlueNormReturnType(_expression()); } + + + /** \returns a row (or column) vector expression of the norm + * of each column (or row) of the referenced expression, avoiding + * underflow and overflow. + * This is a vector with real entries, even if the original matrix has complex entries. + * + * \sa DenseBase::stableNorm() */ + EIGEN_DEVICE_FUNC + const StableNormReturnType stableNorm() const + { return StableNormReturnType(_expression()); } + + + /** \returns a row (or column) vector expression of the norm + * of each column (or row) of the referenced expression, avoiding + * underflow and overflow using a concatenation of hypot() calls. + * This is a vector with real entries, even if the original matrix has complex entries. + * + * \sa DenseBase::hypotNorm() */ + EIGEN_DEVICE_FUNC + const HypotNormReturnType hypotNorm() const + { return HypotNormReturnType(_expression()); } + + /** \returns a row (or column) vector expression of the sum + * of each column (or row) of the referenced expression. + * + * Example: \include PartialRedux_sum.cpp + * Output: \verbinclude PartialRedux_sum.out + * + * \sa DenseBase::sum() */ + EIGEN_DEVICE_FUNC + const SumReturnType sum() const + { return SumReturnType(_expression()); } + + /** \returns a row (or column) vector expression of the mean + * of each column (or row) of the referenced expression. + * + * \sa DenseBase::mean() */ + EIGEN_DEVICE_FUNC + const MeanReturnType mean() const + { return MeanReturnType(_expression()); } + + /** \returns a row (or column) vector expression representing + * whether \b all coefficients of each respective column (or row) are \c true. + * This expression can be assigned to a vector with entries of type \c bool. + * + * \sa DenseBase::all() */ + EIGEN_DEVICE_FUNC + const AllReturnType all() const + { return AllReturnType(_expression()); } + + /** \returns a row (or column) vector expression representing + * whether \b at \b least one coefficient of each respective column (or row) is \c true. + * This expression can be assigned to a vector with entries of type \c bool. + * + * \sa DenseBase::any() */ + EIGEN_DEVICE_FUNC + const AnyReturnType any() const + { return AnyReturnType(_expression()); } + + /** \returns a row (or column) vector expression representing + * the number of \c true coefficients of each respective column (or row). + * This expression can be assigned to a vector whose entries have the same type as is used to + * index entries of the original matrix; for dense matrices, this is \c std::ptrdiff_t . + * + * Example: \include PartialRedux_count.cpp + * Output: \verbinclude PartialRedux_count.out + * + * \sa DenseBase::count() */ + EIGEN_DEVICE_FUNC + const CountReturnType count() const + { return CountReturnType(_expression()); } + + /** \returns a row (or column) vector expression of the product + * of each column (or row) of the referenced expression. + * + * Example: \include PartialRedux_prod.cpp + * Output: \verbinclude PartialRedux_prod.out + * + * \sa DenseBase::prod() */ + EIGEN_DEVICE_FUNC + const ProdReturnType prod() const + { return ProdReturnType(_expression()); } + + + /** \returns a matrix expression + * where each column (or row) are reversed. + * + * Example: \include Vectorwise_reverse.cpp + * Output: \verbinclude Vectorwise_reverse.out + * + * \sa DenseBase::reverse() */ + EIGEN_DEVICE_FUNC + const ConstReverseReturnType reverse() const + { return ConstReverseReturnType( _expression() ); } + + /** \returns a writable matrix expression + * where each column (or row) are reversed. + * + * \sa reverse() const */ + EIGEN_DEVICE_FUNC + ReverseReturnType reverse() + { return ReverseReturnType( _expression() ); } + + typedef Replicate ReplicateReturnType; + EIGEN_DEVICE_FUNC + const ReplicateReturnType replicate(Index factor) const; + + /** + * \return an expression of the replication of each column (or row) of \c *this + * + * Example: \include DirectionWise_replicate.cpp + * Output: \verbinclude DirectionWise_replicate.out + * + * \sa VectorwiseOp::replicate(Index), DenseBase::replicate(), class Replicate + */ + // NOTE implemented here because of sunstudio's compilation errors + // isVertical*Factor+isHorizontal instead of (isVertical?Factor:1) to handle CUDA bug with ternary operator + template const Replicate + EIGEN_DEVICE_FUNC + replicate(Index factor = Factor) const + { + return Replicate + (_expression(),isVertical?factor:1,isHorizontal?factor:1); + } + +/////////// Artithmetic operators /////////// + + /** Copies the vector \a other to each subvector of \c *this */ + template + EIGEN_DEVICE_FUNC + ExpressionType& operator=(const DenseBase& other) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + //eigen_assert((m_matrix.isNull()) == (other.isNull())); FIXME + return const_cast(m_matrix = extendedTo(other.derived())); + } + + /** Adds the vector \a other to each subvector of \c *this */ + template + EIGEN_DEVICE_FUNC + ExpressionType& operator+=(const DenseBase& other) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + return const_cast(m_matrix += extendedTo(other.derived())); + } + + /** Substracts the vector \a other to each subvector of \c *this */ + template + EIGEN_DEVICE_FUNC + ExpressionType& operator-=(const DenseBase& other) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + return const_cast(m_matrix -= extendedTo(other.derived())); + } + + /** Multiples each subvector of \c *this by the vector \a other */ + template + EIGEN_DEVICE_FUNC + ExpressionType& operator*=(const DenseBase& other) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + m_matrix *= extendedTo(other.derived()); + return const_cast(m_matrix); + } + + /** Divides each subvector of \c *this by the vector \a other */ + template + EIGEN_DEVICE_FUNC + ExpressionType& operator/=(const DenseBase& other) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + m_matrix /= extendedTo(other.derived()); + return const_cast(m_matrix); + } + + /** Returns the expression of the sum of the vector \a other to each subvector of \c *this */ + template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC + CwiseBinaryOp, + const ExpressionTypeNestedCleaned, + const typename ExtendedType::Type> + operator+(const DenseBase& other) const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + return m_matrix + extendedTo(other.derived()); + } + + /** Returns the expression of the difference between each subvector of \c *this and the vector \a other */ + template + EIGEN_DEVICE_FUNC + CwiseBinaryOp, + const ExpressionTypeNestedCleaned, + const typename ExtendedType::Type> + operator-(const DenseBase& other) const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + return m_matrix - extendedTo(other.derived()); + } + + /** Returns the expression where each subvector is the product of the vector \a other + * by the corresponding subvector of \c *this */ + template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC + CwiseBinaryOp, + const ExpressionTypeNestedCleaned, + const typename ExtendedType::Type> + EIGEN_DEVICE_FUNC + operator*(const DenseBase& other) const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + return m_matrix * extendedTo(other.derived()); + } + + /** Returns the expression where each subvector is the quotient of the corresponding + * subvector of \c *this by the vector \a other */ + template + EIGEN_DEVICE_FUNC + CwiseBinaryOp, + const ExpressionTypeNestedCleaned, + const typename ExtendedType::Type> + operator/(const DenseBase& other) const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + return m_matrix / extendedTo(other.derived()); + } + + /** \returns an expression where each column (or row) of the referenced matrix are normalized. + * The referenced matrix is \b not modified. + * \sa MatrixBase::normalized(), normalize() + */ + EIGEN_DEVICE_FUNC + CwiseBinaryOp, + const ExpressionTypeNestedCleaned, + const typename OppositeExtendedType::Type>::Type> + normalized() const { return m_matrix.cwiseQuotient(extendedToOpposite(this->norm())); } + + + /** Normalize in-place each row or columns of the referenced matrix. + * \sa MatrixBase::normalize(), normalized() + */ + EIGEN_DEVICE_FUNC void normalize() { + m_matrix = this->normalized(); + } + + EIGEN_DEVICE_FUNC inline void reverseInPlace(); + +/////////// Geometry module /////////// + + typedef Homogeneous HomogeneousReturnType; + EIGEN_DEVICE_FUNC + HomogeneousReturnType homogeneous() const; + + typedef typename ExpressionType::PlainObject CrossReturnType; + template + EIGEN_DEVICE_FUNC + const CrossReturnType cross(const MatrixBase& other) const; + + enum { + HNormalized_Size = Direction==Vertical ? internal::traits::RowsAtCompileTime + : internal::traits::ColsAtCompileTime, + HNormalized_SizeMinusOne = HNormalized_Size==Dynamic ? Dynamic : HNormalized_Size-1 + }; + typedef Block::RowsAtCompileTime), + Direction==Horizontal ? int(HNormalized_SizeMinusOne) + : int(internal::traits::ColsAtCompileTime)> + HNormalized_Block; + typedef Block::RowsAtCompileTime), + Direction==Horizontal ? 1 : int(internal::traits::ColsAtCompileTime)> + HNormalized_Factors; + typedef CwiseBinaryOp::Scalar>, + const HNormalized_Block, + const Replicate > + HNormalizedReturnType; + + EIGEN_DEVICE_FUNC + const HNormalizedReturnType hnormalized() const; + + protected: + ExpressionTypeNested m_matrix; +}; + +//const colwise moved to DenseBase.h due to CUDA compiler bug + + +/** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations + * + * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting + */ +template +inline typename DenseBase::ColwiseReturnType +DenseBase::colwise() +{ + return ColwiseReturnType(derived()); +} + +//const rowwise moved to DenseBase.h due to CUDA compiler bug + + +/** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations + * + * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting + */ +template +inline typename DenseBase::RowwiseReturnType +DenseBase::rowwise() +{ + return RowwiseReturnType(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_PARTIAL_REDUX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Visitor.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Visitor.h new file mode 100644 index 000000000..54c1883d9 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/Visitor.h @@ -0,0 +1,273 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_VISITOR_H +#define EIGEN_VISITOR_H + +namespace Eigen { + +namespace internal { + +template +struct visitor_impl +{ + enum { + col = (UnrollCount-1) / Derived::RowsAtCompileTime, + row = (UnrollCount-1) % Derived::RowsAtCompileTime + }; + + EIGEN_DEVICE_FUNC + static inline void run(const Derived &mat, Visitor& visitor) + { + visitor_impl::run(mat, visitor); + visitor(mat.coeff(row, col), row, col); + } +}; + +template +struct visitor_impl +{ + EIGEN_DEVICE_FUNC + static inline void run(const Derived &mat, Visitor& visitor) + { + return visitor.init(mat.coeff(0, 0), 0, 0); + } +}; + +template +struct visitor_impl +{ + EIGEN_DEVICE_FUNC + static inline void run(const Derived& mat, Visitor& visitor) + { + visitor.init(mat.coeff(0,0), 0, 0); + for(Index i = 1; i < mat.rows(); ++i) + visitor(mat.coeff(i, 0), i, 0); + for(Index j = 1; j < mat.cols(); ++j) + for(Index i = 0; i < mat.rows(); ++i) + visitor(mat.coeff(i, j), i, j); + } +}; + +// evaluator adaptor +template +class visitor_evaluator +{ +public: + EIGEN_DEVICE_FUNC + explicit visitor_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {} + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + enum { + RowsAtCompileTime = XprType::RowsAtCompileTime, + CoeffReadCost = internal::evaluator::CoeffReadCost + }; + + EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); } + EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); } + EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const + { return m_evaluator.coeff(row, col); } + +protected: + internal::evaluator m_evaluator; + const XprType &m_xpr; +}; +} // end namespace internal + +/** Applies the visitor \a visitor to the whole coefficients of the matrix or vector. + * + * The template parameter \a Visitor is the type of the visitor and provides the following interface: + * \code + * struct MyVisitor { + * // called for the first coefficient + * void init(const Scalar& value, Index i, Index j); + * // called for all other coefficients + * void operator() (const Scalar& value, Index i, Index j); + * }; + * \endcode + * + * \note compared to one or two \em for \em loops, visitors offer automatic + * unrolling for small fixed size matrix. + * + * \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux() + */ +template +template +EIGEN_DEVICE_FUNC +void DenseBase::visit(Visitor& visitor) const +{ + typedef typename internal::visitor_evaluator ThisEvaluator; + ThisEvaluator thisEval(derived()); + + enum { + unroll = SizeAtCompileTime != Dynamic + && SizeAtCompileTime * ThisEvaluator::CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits::Cost <= EIGEN_UNROLLING_LIMIT + }; + return internal::visitor_impl::run(thisEval, visitor); +} + +namespace internal { + +/** \internal + * \brief Base class to implement min and max visitors + */ +template +struct coeff_visitor +{ + typedef typename Derived::Scalar Scalar; + Index row, col; + Scalar res; + EIGEN_DEVICE_FUNC + inline void init(const Scalar& value, Index i, Index j) + { + res = value; + row = i; + col = j; + } +}; + +/** \internal + * \brief Visitor computing the min coefficient with its value and coordinates + * + * \sa DenseBase::minCoeff(Index*, Index*) + */ +template +struct min_coeff_visitor : coeff_visitor +{ + typedef typename Derived::Scalar Scalar; + EIGEN_DEVICE_FUNC + void operator() (const Scalar& value, Index i, Index j) + { + if(value < this->res) + { + this->res = value; + this->row = i; + this->col = j; + } + } +}; + +template +struct functor_traits > { + enum { + Cost = NumTraits::AddCost + }; +}; + +/** \internal + * \brief Visitor computing the max coefficient with its value and coordinates + * + * \sa DenseBase::maxCoeff(Index*, Index*) + */ +template +struct max_coeff_visitor : coeff_visitor +{ + typedef typename Derived::Scalar Scalar; + EIGEN_DEVICE_FUNC + void operator() (const Scalar& value, Index i, Index j) + { + if(value > this->res) + { + this->res = value; + this->row = i; + this->col = j; + } + } +}; + +template +struct functor_traits > { + enum { + Cost = NumTraits::AddCost + }; +}; + +} // end namespace internal + +/** \fn DenseBase::minCoeff(IndexType* rowId, IndexType* colId) const + * \returns the minimum of all coefficients of *this and puts in *row and *col its location. + * \warning the result is undefined if \c *this contains NaN. + * + * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visit(), DenseBase::minCoeff() + */ +template +template +EIGEN_DEVICE_FUNC +typename internal::traits::Scalar +DenseBase::minCoeff(IndexType* rowId, IndexType* colId) const +{ + internal::min_coeff_visitor minVisitor; + this->visit(minVisitor); + *rowId = minVisitor.row; + if (colId) *colId = minVisitor.col; + return minVisitor.res; +} + +/** \returns the minimum of all coefficients of *this and puts in *index its location. + * \warning the result is undefined if \c *this contains NaN. + * + * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::minCoeff() + */ +template +template +EIGEN_DEVICE_FUNC +typename internal::traits::Scalar +DenseBase::minCoeff(IndexType* index) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + internal::min_coeff_visitor minVisitor; + this->visit(minVisitor); + *index = IndexType((RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row); + return minVisitor.res; +} + +/** \fn DenseBase::maxCoeff(IndexType* rowId, IndexType* colId) const + * \returns the maximum of all coefficients of *this and puts in *row and *col its location. + * \warning the result is undefined if \c *this contains NaN. + * + * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::maxCoeff() + */ +template +template +EIGEN_DEVICE_FUNC +typename internal::traits::Scalar +DenseBase::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const +{ + internal::max_coeff_visitor maxVisitor; + this->visit(maxVisitor); + *rowPtr = maxVisitor.row; + if (colPtr) *colPtr = maxVisitor.col; + return maxVisitor.res; +} + +/** \returns the maximum of all coefficients of *this and puts in *index its location. + * \warning the result is undefined if \c *this contains NaN. + * + * \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff() + */ +template +template +EIGEN_DEVICE_FUNC +typename internal::traits::Scalar +DenseBase::maxCoeff(IndexType* index) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + internal::max_coeff_visitor maxVisitor; + this->visit(maxVisitor); + *index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row; + return maxVisitor.res; +} + +} // end namespace Eigen + +#endif // EIGEN_VISITOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/Complex.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/Complex.h new file mode 100644 index 000000000..7fa61969d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/Complex.h @@ -0,0 +1,451 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_AVX_H +#define EIGEN_COMPLEX_AVX_H + +namespace Eigen { + +namespace internal { + +//---------- float ---------- +struct Packet4cf +{ + EIGEN_STRONG_INLINE Packet4cf() {} + EIGEN_STRONG_INLINE explicit Packet4cf(const __m256& a) : v(a) {} + __m256 v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet4cf type; + typedef Packet2cf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { typedef std::complex type; enum {size=4, alignment=Aligned32}; typedef Packet2cf half; }; + +template<> EIGEN_STRONG_INLINE Packet4cf padd(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_add_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cf psub(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_sub_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cf pnegate(const Packet4cf& a) +{ + return Packet4cf(pnegate(a.v)); +} +template<> EIGEN_STRONG_INLINE Packet4cf pconj(const Packet4cf& a) +{ + const __m256 mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000)); + return Packet4cf(_mm256_xor_ps(a.v,mask)); +} + +template<> EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) +{ + __m256 tmp1 = _mm256_mul_ps(_mm256_moveldup_ps(a.v), b.v); + __m256 tmp2 = _mm256_mul_ps(_mm256_movehdup_ps(a.v), _mm256_permute_ps(b.v, _MM_SHUFFLE(2,3,0,1))); + __m256 result = _mm256_addsub_ps(tmp1, tmp2); + return Packet4cf(result); +} + +template<> EIGEN_STRONG_INLINE Packet4cf pand (const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_and_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cf por (const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_or_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cf pxor (const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_xor_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cf pandnot(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_andnot_ps(a.v,b.v)); } + +template<> EIGEN_STRONG_INLINE Packet4cf pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet4cf(pload(&numext::real_ref(*from))); } +template<> EIGEN_STRONG_INLINE Packet4cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cf(ploadu(&numext::real_ref(*from))); } + + +template<> EIGEN_STRONG_INLINE Packet4cf pset1(const std::complex& from) +{ + return Packet4cf(_mm256_castpd_ps(_mm256_broadcast_sd((const double*)(const void*)&from))); +} + +template<> EIGEN_STRONG_INLINE Packet4cf ploaddup(const std::complex* from) +{ + // FIXME The following might be optimized using _mm256_movedup_pd + Packet2cf a = ploaddup(from); + Packet2cf b = ploaddup(from+1); + return Packet4cf(_mm256_insertf128_ps(_mm256_castps128_ps256(a.v), b.v, 1)); +} + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex* to, const Packet4cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex* to, const Packet4cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); } + +template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather, Packet4cf>(const std::complex* from, Index stride) +{ + return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]), + std::imag(from[2*stride]), std::real(from[2*stride]), + std::imag(from[1*stride]), std::real(from[1*stride]), + std::imag(from[0*stride]), std::real(from[0*stride]))); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet4cf>(std::complex* to, const Packet4cf& from, Index stride) +{ + __m128 low = _mm256_extractf128_ps(from.v, 0); + to[stride*0] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)), + _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1))); + to[stride*1] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 2)), + _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3))); + + __m128 high = _mm256_extractf128_ps(from.v, 1); + to[stride*2] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 0)), + _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1))); + to[stride*3] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 2)), + _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3))); + +} + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet4cf& a) +{ + return pfirst(Packet2cf(_mm256_castps256_ps128(a.v))); +} + +template<> EIGEN_STRONG_INLINE Packet4cf preverse(const Packet4cf& a) { + __m128 low = _mm256_extractf128_ps(a.v, 0); + __m128 high = _mm256_extractf128_ps(a.v, 1); + __m128d lowd = _mm_castps_pd(low); + __m128d highd = _mm_castps_pd(high); + low = _mm_castpd_ps(_mm_shuffle_pd(lowd,lowd,0x1)); + high = _mm_castpd_ps(_mm_shuffle_pd(highd,highd,0x1)); + __m256 result = _mm256_setzero_ps(); + result = _mm256_insertf128_ps(result, low, 1); + result = _mm256_insertf128_ps(result, high, 0); + return Packet4cf(result); +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet4cf& a) +{ + return predux(padd(Packet2cf(_mm256_extractf128_ps(a.v,0)), + Packet2cf(_mm256_extractf128_ps(a.v,1)))); +} + +template<> EIGEN_STRONG_INLINE Packet4cf preduxp(const Packet4cf* vecs) +{ + Packet8f t0 = _mm256_shuffle_ps(vecs[0].v, vecs[0].v, _MM_SHUFFLE(3, 1, 2 ,0)); + Packet8f t1 = _mm256_shuffle_ps(vecs[1].v, vecs[1].v, _MM_SHUFFLE(3, 1, 2 ,0)); + t0 = _mm256_hadd_ps(t0,t1); + Packet8f t2 = _mm256_shuffle_ps(vecs[2].v, vecs[2].v, _MM_SHUFFLE(3, 1, 2 ,0)); + Packet8f t3 = _mm256_shuffle_ps(vecs[3].v, vecs[3].v, _MM_SHUFFLE(3, 1, 2 ,0)); + t2 = _mm256_hadd_ps(t2,t3); + + t1 = _mm256_permute2f128_ps(t0,t2, 0 + (2<<4)); + t3 = _mm256_permute2f128_ps(t0,t2, 1 + (3<<4)); + + return Packet4cf(_mm256_add_ps(t1,t3)); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet4cf& a) +{ + return predux_mul(pmul(Packet2cf(_mm256_extractf128_ps(a.v, 0)), + Packet2cf(_mm256_extractf128_ps(a.v, 1)))); +} + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet4cf& first, const Packet4cf& second) + { + if (Offset==0) return; + palign_impl::run(first.v, second.v); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet4cf& y, const Packet4cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) const + { + return internal::pmul(a, pconj(b)); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet4cf& y, const Packet4cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) const + { + return internal::pmul(pconj(a), b); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet4cf& y, const Packet4cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) const + { + return pconj(internal::pmul(a, b)); + } +}; + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet4cf,Packet8f) + +template<> EIGEN_STRONG_INLINE Packet4cf pdiv(const Packet4cf& a, const Packet4cf& b) +{ + Packet4cf num = pmul(a, pconj(b)); + __m256 tmp = _mm256_mul_ps(b.v, b.v); + __m256 tmp2 = _mm256_shuffle_ps(tmp,tmp,0xB1); + __m256 denom = _mm256_add_ps(tmp, tmp2); + return Packet4cf(_mm256_div_ps(num.v, denom)); +} + +template<> EIGEN_STRONG_INLINE Packet4cf pcplxflip(const Packet4cf& x) +{ + return Packet4cf(_mm256_shuffle_ps(x.v, x.v, _MM_SHUFFLE(2, 3, 0 ,1))); +} + +//---------- double ---------- +struct Packet2cd +{ + EIGEN_STRONG_INLINE Packet2cd() {} + EIGEN_STRONG_INLINE explicit Packet2cd(const __m256d& a) : v(a) {} + __m256d v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet2cd type; + typedef Packet1cd half; + enum { + Vectorizable = 1, + AlignedOnScalar = 0, + size = 2, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { typedef std::complex type; enum {size=2, alignment=Aligned32}; typedef Packet1cd half; }; + +template<> EIGEN_STRONG_INLINE Packet2cd padd(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_add_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd psub(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_sub_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd pnegate(const Packet2cd& a) { return Packet2cd(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd pconj(const Packet2cd& a) +{ + const __m256d mask = _mm256_castsi256_pd(_mm256_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0)); + return Packet2cd(_mm256_xor_pd(a.v,mask)); +} + +template<> EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) +{ + __m256d tmp1 = _mm256_shuffle_pd(a.v,a.v,0x0); + __m256d even = _mm256_mul_pd(tmp1, b.v); + __m256d tmp2 = _mm256_shuffle_pd(a.v,a.v,0xF); + __m256d tmp3 = _mm256_shuffle_pd(b.v,b.v,0x5); + __m256d odd = _mm256_mul_pd(tmp2, tmp3); + return Packet2cd(_mm256_addsub_pd(even, odd)); +} + +template<> EIGEN_STRONG_INLINE Packet2cd pand (const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_and_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd por (const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_or_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd pxor (const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_xor_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd pandnot(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_andnot_pd(a.v,b.v)); } + +template<> EIGEN_STRONG_INLINE Packet2cd pload (const std::complex* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cd ploadu(const std::complex* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cd(ploadu((const double*)from)); } + +template<> EIGEN_STRONG_INLINE Packet2cd pset1(const std::complex& from) +{ + // in case casting to a __m128d* is really not safe, then we can still fallback to this version: (much slower though) +// return Packet2cd(_mm256_loadu2_m128d((const double*)&from,(const double*)&from)); + return Packet2cd(_mm256_broadcast_pd((const __m128d*)(const void*)&from)); +} + +template<> EIGEN_STRONG_INLINE Packet2cd ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); } + +template<> EIGEN_DEVICE_FUNC inline Packet2cd pgather, Packet2cd>(const std::complex* from, Index stride) +{ + return Packet2cd(_mm256_set_pd(std::imag(from[1*stride]), std::real(from[1*stride]), + std::imag(from[0*stride]), std::real(from[0*stride]))); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cd>(std::complex* to, const Packet2cd& from, Index stride) +{ + __m128d low = _mm256_extractf128_pd(from.v, 0); + to[stride*0] = std::complex(_mm_cvtsd_f64(low), _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1))); + __m128d high = _mm256_extractf128_pd(from.v, 1); + to[stride*1] = std::complex(_mm_cvtsd_f64(high), _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1))); +} + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cd& a) +{ + __m128d low = _mm256_extractf128_pd(a.v, 0); + EIGEN_ALIGN16 double res[2]; + _mm_store_pd(res, low); + return std::complex(res[0],res[1]); +} + +template<> EIGEN_STRONG_INLINE Packet2cd preverse(const Packet2cd& a) { + __m256d result = _mm256_permute2f128_pd(a.v, a.v, 1); + return Packet2cd(result); +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cd& a) +{ + return predux(padd(Packet1cd(_mm256_extractf128_pd(a.v,0)), + Packet1cd(_mm256_extractf128_pd(a.v,1)))); +} + +template<> EIGEN_STRONG_INLINE Packet2cd preduxp(const Packet2cd* vecs) +{ + Packet4d t0 = _mm256_permute2f128_pd(vecs[0].v,vecs[1].v, 0 + (2<<4)); + Packet4d t1 = _mm256_permute2f128_pd(vecs[0].v,vecs[1].v, 1 + (3<<4)); + + return Packet2cd(_mm256_add_pd(t0,t1)); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cd& a) +{ + return predux(pmul(Packet1cd(_mm256_extractf128_pd(a.v,0)), + Packet1cd(_mm256_extractf128_pd(a.v,1)))); +} + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet2cd& first, const Packet2cd& second) + { + if (Offset==0) return; + palign_impl::run(first.v, second.v); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet2cd& y, const Packet2cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) const + { + return internal::pmul(a, pconj(b)); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet2cd& y, const Packet2cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) const + { + return internal::pmul(pconj(a), b); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet2cd& y, const Packet2cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) const + { + return pconj(internal::pmul(a, b)); + } +}; + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cd,Packet4d) + +template<> EIGEN_STRONG_INLINE Packet2cd pdiv(const Packet2cd& a, const Packet2cd& b) +{ + Packet2cd num = pmul(a, pconj(b)); + __m256d tmp = _mm256_mul_pd(b.v, b.v); + __m256d denom = _mm256_hadd_pd(tmp, tmp); + return Packet2cd(_mm256_div_pd(num.v, denom)); +} + +template<> EIGEN_STRONG_INLINE Packet2cd pcplxflip(const Packet2cd& x) +{ + return Packet2cd(_mm256_shuffle_pd(x.v, x.v, 0x5)); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m256d P0 = _mm256_castps_pd(kernel.packet[0].v); + __m256d P1 = _mm256_castps_pd(kernel.packet[1].v); + __m256d P2 = _mm256_castps_pd(kernel.packet[2].v); + __m256d P3 = _mm256_castps_pd(kernel.packet[3].v); + + __m256d T0 = _mm256_shuffle_pd(P0, P1, 15); + __m256d T1 = _mm256_shuffle_pd(P0, P1, 0); + __m256d T2 = _mm256_shuffle_pd(P2, P3, 15); + __m256d T3 = _mm256_shuffle_pd(P2, P3, 0); + + kernel.packet[1].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 32)); + kernel.packet[3].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 49)); + kernel.packet[0].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 32)); + kernel.packet[2].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 49)); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m256d tmp = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 0+(2<<4)); + kernel.packet[1].v = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 1+(3<<4)); + kernel.packet[0].v = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet4cf pinsertfirst(const Packet4cf& a, std::complex b) +{ + return Packet4cf(_mm256_blend_ps(a.v,pset1(b).v,1|2)); +} + +template<> EIGEN_STRONG_INLINE Packet2cd pinsertfirst(const Packet2cd& a, std::complex b) +{ + return Packet2cd(_mm256_blend_pd(a.v,pset1(b).v,1|2)); +} + +template<> EIGEN_STRONG_INLINE Packet4cf pinsertlast(const Packet4cf& a, std::complex b) +{ + return Packet4cf(_mm256_blend_ps(a.v,pset1(b).v,(1<<7)|(1<<6))); +} + +template<> EIGEN_STRONG_INLINE Packet2cd pinsertlast(const Packet2cd& a, std::complex b) +{ + return Packet2cd(_mm256_blend_pd(a.v,pset1(b).v,(1<<3)|(1<<2))); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_AVX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/MathFunctions.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/MathFunctions.h new file mode 100644 index 000000000..6af67ce2d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/MathFunctions.h @@ -0,0 +1,439 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATH_FUNCTIONS_AVX_H +#define EIGEN_MATH_FUNCTIONS_AVX_H + +/* The sin, cos, exp, and log functions of this file are loosely derived from + * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ + */ + +namespace Eigen { + +namespace internal { + +inline Packet8i pshiftleft(Packet8i v, int n) +{ +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_slli_epi32(v, n); +#else + __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(v, 0), n); + __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(v, 1), n); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} + +inline Packet8f pshiftright(Packet8f v, int n) +{ +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_cvtepi32_ps(_mm256_srli_epi32(_mm256_castps_si256(v), n)); +#else + __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 0), n); + __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 1), n); + return _mm256_cvtepi32_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1)); +#endif +} + +// Sine function +// Computes sin(x) by wrapping x to the interval [-Pi/4,3*Pi/4] and +// evaluating interpolants in [-Pi/4,Pi/4] or [Pi/4,3*Pi/4]. The interpolants +// are (anti-)symmetric and thus have only odd/even coefficients +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +psin(const Packet8f& _x) { + Packet8f x = _x; + + // Some useful values. + _EIGEN_DECLARE_CONST_Packet8i(one, 1); + _EIGEN_DECLARE_CONST_Packet8f(one, 1.0f); + _EIGEN_DECLARE_CONST_Packet8f(two, 2.0f); + _EIGEN_DECLARE_CONST_Packet8f(one_over_four, 0.25f); + _EIGEN_DECLARE_CONST_Packet8f(one_over_pi, 3.183098861837907e-01f); + _EIGEN_DECLARE_CONST_Packet8f(neg_pi_first, -3.140625000000000e+00f); + _EIGEN_DECLARE_CONST_Packet8f(neg_pi_second, -9.670257568359375e-04f); + _EIGEN_DECLARE_CONST_Packet8f(neg_pi_third, -6.278329571784980e-07f); + _EIGEN_DECLARE_CONST_Packet8f(four_over_pi, 1.273239544735163e+00f); + + // Map x from [-Pi/4,3*Pi/4] to z in [-1,3] and subtract the shifted period. + Packet8f z = pmul(x, p8f_one_over_pi); + Packet8f shift = _mm256_floor_ps(padd(z, p8f_one_over_four)); + x = pmadd(shift, p8f_neg_pi_first, x); + x = pmadd(shift, p8f_neg_pi_second, x); + x = pmadd(shift, p8f_neg_pi_third, x); + z = pmul(x, p8f_four_over_pi); + + // Make a mask for the entries that need flipping, i.e. wherever the shift + // is odd. + Packet8i shift_ints = _mm256_cvtps_epi32(shift); + Packet8i shift_isodd = _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(shift_ints), _mm256_castsi256_ps(p8i_one))); + Packet8i sign_flip_mask = pshiftleft(shift_isodd, 31); + + // Create a mask for which interpolant to use, i.e. if z > 1, then the mask + // is set to ones for that entry. + Packet8f ival_mask = _mm256_cmp_ps(z, p8f_one, _CMP_GT_OQ); + + // Evaluate the polynomial for the interval [1,3] in z. + _EIGEN_DECLARE_CONST_Packet8f(coeff_right_0, 9.999999724233232e-01f); + _EIGEN_DECLARE_CONST_Packet8f(coeff_right_2, -3.084242535619928e-01f); + _EIGEN_DECLARE_CONST_Packet8f(coeff_right_4, 1.584991525700324e-02f); + _EIGEN_DECLARE_CONST_Packet8f(coeff_right_6, -3.188805084631342e-04f); + Packet8f z_minus_two = psub(z, p8f_two); + Packet8f z_minus_two2 = pmul(z_minus_two, z_minus_two); + Packet8f right = pmadd(p8f_coeff_right_6, z_minus_two2, p8f_coeff_right_4); + right = pmadd(right, z_minus_two2, p8f_coeff_right_2); + right = pmadd(right, z_minus_two2, p8f_coeff_right_0); + + // Evaluate the polynomial for the interval [-1,1] in z. + _EIGEN_DECLARE_CONST_Packet8f(coeff_left_1, 7.853981525427295e-01f); + _EIGEN_DECLARE_CONST_Packet8f(coeff_left_3, -8.074536727092352e-02f); + _EIGEN_DECLARE_CONST_Packet8f(coeff_left_5, 2.489871967827018e-03f); + _EIGEN_DECLARE_CONST_Packet8f(coeff_left_7, -3.587725841214251e-05f); + Packet8f z2 = pmul(z, z); + Packet8f left = pmadd(p8f_coeff_left_7, z2, p8f_coeff_left_5); + left = pmadd(left, z2, p8f_coeff_left_3); + left = pmadd(left, z2, p8f_coeff_left_1); + left = pmul(left, z); + + // Assemble the results, i.e. select the left and right polynomials. + left = _mm256_andnot_ps(ival_mask, left); + right = _mm256_and_ps(ival_mask, right); + Packet8f res = _mm256_or_ps(left, right); + + // Flip the sign on the odd intervals and return the result. + res = _mm256_xor_ps(res, _mm256_castsi256_ps(sign_flip_mask)); + return res; +} + +// Natural logarithm +// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2) +// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can +// be easily approximated by a polynomial centered on m=1 for stability. +// TODO(gonnet): Further reduce the interval allowing for lower-degree +// polynomial interpolants -> ... -> profit! +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +plog(const Packet8f& _x) { + Packet8f x = _x; + _EIGEN_DECLARE_CONST_Packet8f(1, 1.0f); + _EIGEN_DECLARE_CONST_Packet8f(half, 0.5f); + _EIGEN_DECLARE_CONST_Packet8f(126f, 126.0f); + + _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inv_mant_mask, ~0x7f800000); + + // The smallest non denormalized float number. + _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(min_norm_pos, 0x00800000); + _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(minus_inf, 0xff800000); + + // Polynomial coefficients. + _EIGEN_DECLARE_CONST_Packet8f(cephes_SQRTHF, 0.707106781186547524f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p0, 7.0376836292E-2f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p1, -1.1514610310E-1f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p2, 1.1676998740E-1f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p3, -1.2420140846E-1f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p4, +1.4249322787E-1f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p5, -1.6668057665E-1f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p6, +2.0000714765E-1f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p7, -2.4999993993E-1f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p8, +3.3333331174E-1f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q1, -2.12194440e-4f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q2, 0.693359375f); + + Packet8f invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_NGE_UQ); // not greater equal is true if x is NaN + Packet8f iszero_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_EQ_OQ); + + // Truncate input values to the minimum positive normal. + x = pmax(x, p8f_min_norm_pos); + + Packet8f emm0 = pshiftright(x,23); + Packet8f e = _mm256_sub_ps(emm0, p8f_126f); + + // Set the exponents to -1, i.e. x are in the range [0.5,1). + x = _mm256_and_ps(x, p8f_inv_mant_mask); + x = _mm256_or_ps(x, p8f_half); + + // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2)) + // and shift by -1. The values are then centered around 0, which improves + // the stability of the polynomial evaluation. + // if( x < SQRTHF ) { + // e -= 1; + // x = x + x - 1.0; + // } else { x = x - 1.0; } + Packet8f mask = _mm256_cmp_ps(x, p8f_cephes_SQRTHF, _CMP_LT_OQ); + Packet8f tmp = _mm256_and_ps(x, mask); + x = psub(x, p8f_1); + e = psub(e, _mm256_and_ps(p8f_1, mask)); + x = padd(x, tmp); + + Packet8f x2 = pmul(x, x); + Packet8f x3 = pmul(x2, x); + + // Evaluate the polynomial approximant of degree 8 in three parts, probably + // to improve instruction-level parallelism. + Packet8f y, y1, y2; + y = pmadd(p8f_cephes_log_p0, x, p8f_cephes_log_p1); + y1 = pmadd(p8f_cephes_log_p3, x, p8f_cephes_log_p4); + y2 = pmadd(p8f_cephes_log_p6, x, p8f_cephes_log_p7); + y = pmadd(y, x, p8f_cephes_log_p2); + y1 = pmadd(y1, x, p8f_cephes_log_p5); + y2 = pmadd(y2, x, p8f_cephes_log_p8); + y = pmadd(y, x3, y1); + y = pmadd(y, x3, y2); + y = pmul(y, x3); + + // Add the logarithm of the exponent back to the result of the interpolation. + y1 = pmul(e, p8f_cephes_log_q1); + tmp = pmul(x2, p8f_half); + y = padd(y, y1); + x = psub(x, tmp); + y2 = pmul(e, p8f_cephes_log_q2); + x = padd(x, y); + x = padd(x, y2); + + // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF. + return _mm256_or_ps( + _mm256_andnot_ps(iszero_mask, _mm256_or_ps(x, invalid_mask)), + _mm256_and_ps(iszero_mask, p8f_minus_inf)); +} + +// Exponential function. Works by writing "x = m*log(2) + r" where +// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then +// "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1). +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +pexp(const Packet8f& _x) { + _EIGEN_DECLARE_CONST_Packet8f(1, 1.0f); + _EIGEN_DECLARE_CONST_Packet8f(half, 0.5f); + _EIGEN_DECLARE_CONST_Packet8f(127, 127.0f); + + _EIGEN_DECLARE_CONST_Packet8f(exp_hi, 88.3762626647950f); + _EIGEN_DECLARE_CONST_Packet8f(exp_lo, -88.3762626647949f); + + _EIGEN_DECLARE_CONST_Packet8f(cephes_LOG2EF, 1.44269504088896341f); + + _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p0, 1.9875691500E-4f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p1, 1.3981999507E-3f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p2, 8.3334519073E-3f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p3, 4.1665795894E-2f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p4, 1.6666665459E-1f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p5, 5.0000001201E-1f); + + // Clamp x. + Packet8f x = pmax(pmin(_x, p8f_exp_hi), p8f_exp_lo); + + // Express exp(x) as exp(m*ln(2) + r), start by extracting + // m = floor(x/ln(2) + 0.5). + Packet8f m = _mm256_floor_ps(pmadd(x, p8f_cephes_LOG2EF, p8f_half)); + +// Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is +// subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating +// truncation errors. Note that we don't use the "pmadd" function here to +// ensure that a precision-preserving FMA instruction is used. +#ifdef EIGEN_VECTORIZE_FMA + _EIGEN_DECLARE_CONST_Packet8f(nln2, -0.6931471805599453f); + Packet8f r = _mm256_fmadd_ps(m, p8f_nln2, x); +#else + _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C1, 0.693359375f); + _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C2, -2.12194440e-4f); + Packet8f r = psub(x, pmul(m, p8f_cephes_exp_C1)); + r = psub(r, pmul(m, p8f_cephes_exp_C2)); +#endif + + Packet8f r2 = pmul(r, r); + + // TODO(gonnet): Split into odd/even polynomials and try to exploit + // instruction-level parallelism. + Packet8f y = p8f_cephes_exp_p0; + y = pmadd(y, r, p8f_cephes_exp_p1); + y = pmadd(y, r, p8f_cephes_exp_p2); + y = pmadd(y, r, p8f_cephes_exp_p3); + y = pmadd(y, r, p8f_cephes_exp_p4); + y = pmadd(y, r, p8f_cephes_exp_p5); + y = pmadd(y, r2, r); + y = padd(y, p8f_1); + + // Build emm0 = 2^m. + Packet8i emm0 = _mm256_cvttps_epi32(padd(m, p8f_127)); + emm0 = pshiftleft(emm0, 23); + + // Return 2^m * exp(r). + return pmax(pmul(y, _mm256_castsi256_ps(emm0)), _x); +} + +// Hyperbolic Tangent function. +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +ptanh(const Packet8f& x) { + return internal::generic_fast_tanh_float(x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d +pexp(const Packet4d& _x) { + Packet4d x = _x; + + _EIGEN_DECLARE_CONST_Packet4d(1, 1.0); + _EIGEN_DECLARE_CONST_Packet4d(2, 2.0); + _EIGEN_DECLARE_CONST_Packet4d(half, 0.5); + + _EIGEN_DECLARE_CONST_Packet4d(exp_hi, 709.437); + _EIGEN_DECLARE_CONST_Packet4d(exp_lo, -709.436139303); + + _EIGEN_DECLARE_CONST_Packet4d(cephes_LOG2EF, 1.4426950408889634073599); + + _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p0, 1.26177193074810590878e-4); + _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p1, 3.02994407707441961300e-2); + _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p2, 9.99999999999999999910e-1); + + _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q0, 3.00198505138664455042e-6); + _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q1, 2.52448340349684104192e-3); + _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q2, 2.27265548208155028766e-1); + _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q3, 2.00000000000000000009e0); + + _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C1, 0.693145751953125); + _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C2, 1.42860682030941723212e-6); + _EIGEN_DECLARE_CONST_Packet4i(1023, 1023); + + Packet4d tmp, fx; + + // clamp x + x = pmax(pmin(x, p4d_exp_hi), p4d_exp_lo); + // Express exp(x) as exp(g + n*log(2)). + fx = pmadd(p4d_cephes_LOG2EF, x, p4d_half); + + // Get the integer modulus of log(2), i.e. the "n" described above. + fx = _mm256_floor_pd(fx); + + // Get the remainder modulo log(2), i.e. the "g" described above. Subtract + // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last + // digits right. + tmp = pmul(fx, p4d_cephes_exp_C1); + Packet4d z = pmul(fx, p4d_cephes_exp_C2); + x = psub(x, tmp); + x = psub(x, z); + + Packet4d x2 = pmul(x, x); + + // Evaluate the numerator polynomial of the rational interpolant. + Packet4d px = p4d_cephes_exp_p0; + px = pmadd(px, x2, p4d_cephes_exp_p1); + px = pmadd(px, x2, p4d_cephes_exp_p2); + px = pmul(px, x); + + // Evaluate the denominator polynomial of the rational interpolant. + Packet4d qx = p4d_cephes_exp_q0; + qx = pmadd(qx, x2, p4d_cephes_exp_q1); + qx = pmadd(qx, x2, p4d_cephes_exp_q2); + qx = pmadd(qx, x2, p4d_cephes_exp_q3); + + // I don't really get this bit, copied from the SSE2 routines, so... + // TODO(gonnet): Figure out what is going on here, perhaps find a better + // rational interpolant? + x = _mm256_div_pd(px, psub(qx, px)); + x = pmadd(p4d_2, x, p4d_1); + + // Build e=2^n by constructing the exponents in a 128-bit vector and + // shifting them to where they belong in double-precision values. + __m128i emm0 = _mm256_cvtpd_epi32(fx); + emm0 = _mm_add_epi32(emm0, p4i_1023); + emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(3, 1, 2, 0)); + __m128i lo = _mm_slli_epi64(emm0, 52); + __m128i hi = _mm_slli_epi64(_mm_srli_epi64(emm0, 32), 52); + __m256i e = _mm256_insertf128_si256(_mm256_setzero_si256(), lo, 0); + e = _mm256_insertf128_si256(e, hi, 1); + + // Construct the result 2^n * exp(g) = e * x. The max is used to catch + // non-finite values in the input. + return pmax(pmul(x, _mm256_castsi256_pd(e)), _x); +} + +// Functions for sqrt. +// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step +// of Newton's method, at a cost of 1-2 bits of precision as opposed to the +// exact solution. It does not handle +inf, or denormalized numbers correctly. +// The main advantage of this approach is not just speed, but also the fact that +// it can be inlined and pipelined with other computations, further reducing its +// effective latency. This is similar to Quake3's fast inverse square root. +// For detail see here: http://www.beyond3d.com/content/articles/8/ +#if EIGEN_FAST_MATH +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +psqrt(const Packet8f& _x) { + Packet8f half = pmul(_x, pset1(.5f)); + Packet8f denormal_mask = _mm256_and_ps( + _mm256_cmp_ps(_x, pset1((std::numeric_limits::min)()), + _CMP_LT_OQ), + _mm256_cmp_ps(_x, _mm256_setzero_ps(), _CMP_GE_OQ)); + + // Compute approximate reciprocal sqrt. + Packet8f x = _mm256_rsqrt_ps(_x); + // Do a single step of Newton's iteration. + x = pmul(x, psub(pset1(1.5f), pmul(half, pmul(x,x)))); + // Flush results for denormals to zero. + return _mm256_andnot_ps(denormal_mask, pmul(_x,x)); +} +#else +template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet8f psqrt(const Packet8f& x) { + return _mm256_sqrt_ps(x); +} +#endif +template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4d psqrt(const Packet4d& x) { + return _mm256_sqrt_pd(x); +} +#if EIGEN_FAST_MATH + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet8f prsqrt(const Packet8f& _x) { + _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inf, 0x7f800000); + _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(nan, 0x7fc00000); + _EIGEN_DECLARE_CONST_Packet8f(one_point_five, 1.5f); + _EIGEN_DECLARE_CONST_Packet8f(minus_half, -0.5f); + _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(flt_min, 0x00800000); + + Packet8f neg_half = pmul(_x, p8f_minus_half); + + // select only the inverse sqrt of positive normal inputs (denormals are + // flushed to zero and cause infs as well). + Packet8f le_zero_mask = _mm256_cmp_ps(_x, p8f_flt_min, _CMP_LT_OQ); + Packet8f x = _mm256_andnot_ps(le_zero_mask, _mm256_rsqrt_ps(_x)); + + // Fill in NaNs and Infs for the negative/zero entries. + Packet8f neg_mask = _mm256_cmp_ps(_x, _mm256_setzero_ps(), _CMP_LT_OQ); + Packet8f zero_mask = _mm256_andnot_ps(neg_mask, le_zero_mask); + Packet8f infs_and_nans = _mm256_or_ps(_mm256_and_ps(neg_mask, p8f_nan), + _mm256_and_ps(zero_mask, p8f_inf)); + + // Do a single step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), p8f_one_point_five)); + + // Insert NaNs and Infs in all the right places. + return _mm256_or_ps(x, infs_and_nans); +} + +#else +template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet8f prsqrt(const Packet8f& x) { + _EIGEN_DECLARE_CONST_Packet8f(one, 1.0f); + return _mm256_div_ps(p8f_one, _mm256_sqrt_ps(x)); +} +#endif + +template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4d prsqrt(const Packet4d& x) { + _EIGEN_DECLARE_CONST_Packet4d(one, 1.0); + return _mm256_div_pd(p4d_one, _mm256_sqrt_pd(x)); +} + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_AVX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/PacketMath.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/PacketMath.h new file mode 100644 index 000000000..923a124b2 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/PacketMath.h @@ -0,0 +1,637 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_AVX_H +#define EIGEN_PACKET_MATH_AVX_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*)) +#endif + +#ifdef __FMA__ +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif +#endif + +typedef __m256 Packet8f; +typedef __m256i Packet8i; +typedef __m256d Packet4d; + +template<> struct is_arithmetic<__m256> { enum { value = true }; }; +template<> struct is_arithmetic<__m256i> { enum { value = true }; }; +template<> struct is_arithmetic<__m256d> { enum { value = true }; }; + +#define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \ + const Packet8f p8f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \ + const Packet4d p4d_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \ + const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1(X)) + +#define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \ + const Packet8i p8i_##NAME = pset1(X) + +// Use the packet_traits defined in AVX512/PacketMath.h instead if we're going +// to leverage AVX512 instructions. +#ifndef EIGEN_VECTORIZE_AVX512 +template<> struct packet_traits : default_packet_traits +{ + typedef Packet8f type; + typedef Packet4f half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=8, + HasHalfPacket = 1, + + HasDiv = 1, + HasSin = EIGEN_FAST_MATH, + HasCos = 0, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasTanh = EIGEN_FAST_MATH, + HasBlend = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1 + }; +}; +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4d type; + typedef Packet2d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=4, + HasHalfPacket = 1, + + HasDiv = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasBlend = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1 + }; +}; +#endif + +template<> struct scalar_div_cost { enum { value = 14 }; }; +template<> struct scalar_div_cost { enum { value = 16 }; }; + +/* Proper support for integers is only provided by AVX2. In the meantime, we'll + use SSE instructions and packets to deal with integers. +template<> struct packet_traits : default_packet_traits +{ + typedef Packet8i type; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=8 + }; +}; +*/ + +template<> struct unpacket_traits { typedef float type; typedef Packet4f half; enum {size=8, alignment=Aligned32}; }; +template<> struct unpacket_traits { typedef double type; typedef Packet2d half; enum {size=4, alignment=Aligned32}; }; +template<> struct unpacket_traits { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32}; }; + +template<> EIGEN_STRONG_INLINE Packet8f pset1(const float& from) { return _mm256_set1_ps(from); } +template<> EIGEN_STRONG_INLINE Packet4d pset1(const double& from) { return _mm256_set1_pd(from); } +template<> EIGEN_STRONG_INLINE Packet8i pset1(const int& from) { return _mm256_set1_epi32(from); } + +template<> EIGEN_STRONG_INLINE Packet8f pload1(const float* from) { return _mm256_broadcast_ss(from); } +template<> EIGEN_STRONG_INLINE Packet4d pload1(const double* from) { return _mm256_broadcast_sd(from); } + +template<> EIGEN_STRONG_INLINE Packet8f plset(const float& a) { return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); } +template<> EIGEN_STRONG_INLINE Packet4d plset(const double& a) { return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); } + +template<> EIGEN_STRONG_INLINE Packet8f padd(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d padd(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); } + +template<> EIGEN_STRONG_INLINE Packet8f psub(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d psub(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); } + +template<> EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a) +{ + return _mm256_sub_ps(_mm256_set1_ps(0.0),a); +} +template<> EIGEN_STRONG_INLINE Packet4d pnegate(const Packet4d& a) +{ + return _mm256_sub_pd(_mm256_set1_pd(0.0),a); +} + +template<> EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet8f pmul(const Packet8f& a, const Packet8f& b) { return _mm256_mul_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pmul(const Packet4d& a, const Packet4d& b) { return _mm256_mul_pd(a,b); } + + +template<> EIGEN_STRONG_INLINE Packet8f pdiv(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pdiv(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet8i pdiv(const Packet8i& /*a*/, const Packet8i& /*b*/) +{ eigen_assert(false && "packet integer division are not supported by AVX"); + return pset1(0); +} + +#ifdef __FMA__ +template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) { +#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) ) + // Clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers, + // and even register spilling with clang>=6.0 (bug 1637). + // Gcc stupidly generates a vfmadd132ps instruction. + // So let's enforce it to generate a vfmadd231ps instruction since the most common use + // case is to accumulate the result of the product. + Packet8f res = c; + __asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b)); + return res; +#else + return _mm256_fmadd_ps(a,b,c); +#endif +} +template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) { +#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) ) + // see above + Packet4d res = c; + __asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b)); + return res; +#else + return _mm256_fmadd_pd(a,b,c); +#endif +} +#endif + +template<> EIGEN_STRONG_INLINE Packet8f pmin(const Packet8f& a, const Packet8f& b) { return _mm256_min_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pmin(const Packet4d& a, const Packet4d& b) { return _mm256_min_pd(a,b); } + +template<> EIGEN_STRONG_INLINE Packet8f pmax(const Packet8f& a, const Packet8f& b) { return _mm256_max_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pmax(const Packet4d& a, const Packet4d& b) { return _mm256_max_pd(a,b); } + +template<> EIGEN_STRONG_INLINE Packet8f pround(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); } +template<> EIGEN_STRONG_INLINE Packet4d pround(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); } + +template<> EIGEN_STRONG_INLINE Packet8f pceil(const Packet8f& a) { return _mm256_ceil_ps(a); } +template<> EIGEN_STRONG_INLINE Packet4d pceil(const Packet4d& a) { return _mm256_ceil_pd(a); } + +template<> EIGEN_STRONG_INLINE Packet8f pfloor(const Packet8f& a) { return _mm256_floor_ps(a); } +template<> EIGEN_STRONG_INLINE Packet4d pfloor(const Packet4d& a) { return _mm256_floor_pd(a); } + +template<> EIGEN_STRONG_INLINE Packet8f pand(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pand(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); } + +template<> EIGEN_STRONG_INLINE Packet8f por(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d por(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); } + +template<> EIGEN_STRONG_INLINE Packet8f pxor(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pxor(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); } + +template<> EIGEN_STRONG_INLINE Packet8f pandnot(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pandnot(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(a,b); } + +template<> EIGEN_STRONG_INLINE Packet8f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); } +template<> EIGEN_STRONG_INLINE Packet4d pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); } +template<> EIGEN_STRONG_INLINE Packet8i pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast(from)); } + +template<> EIGEN_STRONG_INLINE Packet8f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); } +template<> EIGEN_STRONG_INLINE Packet4d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); } +template<> EIGEN_STRONG_INLINE Packet8i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast(from)); } + +// Loads 4 floats from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, a3} +template<> EIGEN_STRONG_INLINE Packet8f ploaddup(const float* from) +{ + // TODO try to find a way to avoid the need of a temporary register +// Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from)); +// tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1); +// return _mm256_unpacklo_ps(tmp,tmp); + + // _mm256_insertf128_ps is very slow on Haswell, thus: + Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from); + // mimic an "inplace" permutation of the lower 128bits using a blend + tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15); + // then we can perform a consistent permutation on the global register to get everything in shape: + return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2)); +} +// Loads 2 doubles from memory a returns the packet {a0, a0 a1, a1} +template<> EIGEN_STRONG_INLINE Packet4d ploaddup(const double* from) +{ + Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from); + return _mm256_permute_pd(tmp, 3<<2); +} + +// Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1} +template<> EIGEN_STRONG_INLINE Packet8f ploadquad(const float* from) +{ + Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from)); + return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1); +} + +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } + +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } + +// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available +// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4); +template<> EIGEN_DEVICE_FUNC inline Packet8f pgather(const float* from, Index stride) +{ + return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride], + from[3*stride], from[2*stride], from[1*stride], from[0*stride]); +} +template<> EIGEN_DEVICE_FUNC inline Packet4d pgather(const double* from, Index stride) +{ + return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet8f& from, Index stride) +{ + __m128 low = _mm256_extractf128_ps(from, 0); + to[stride*0] = _mm_cvtss_f32(low); + to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1)); + to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2)); + to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3)); + + __m128 high = _mm256_extractf128_ps(from, 1); + to[stride*4] = _mm_cvtss_f32(high); + to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1)); + to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2)); + to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3)); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet4d& from, Index stride) +{ + __m128d low = _mm256_extractf128_pd(from, 0); + to[stride*0] = _mm_cvtsd_f64(low); + to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1)); + __m128d high = _mm256_extractf128_pd(from, 1); + to[stride*2] = _mm_cvtsd_f64(high); + to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1)); +} + +template<> EIGEN_STRONG_INLINE void pstore1(float* to, const float& a) +{ + Packet8f pa = pset1(a); + pstore(to, pa); +} +template<> EIGEN_STRONG_INLINE void pstore1(double* to, const double& a) +{ + Packet4d pa = pset1(a); + pstore(to, pa); +} +template<> EIGEN_STRONG_INLINE void pstore1(int* to, const int& a) +{ + Packet8i pa = pset1(a); + pstore(to, pa); +} + +#ifndef EIGEN_VECTORIZE_AVX512 +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +#endif + +template<> EIGEN_STRONG_INLINE float pfirst(const Packet8f& a) { + return _mm_cvtss_f32(_mm256_castps256_ps128(a)); +} +template<> EIGEN_STRONG_INLINE double pfirst(const Packet4d& a) { + return _mm_cvtsd_f64(_mm256_castpd256_pd128(a)); +} +template<> EIGEN_STRONG_INLINE int pfirst(const Packet8i& a) { + return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); +} + + +template<> EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a) +{ + __m256 tmp = _mm256_shuffle_ps(a,a,0x1b); + return _mm256_permute2f128_ps(tmp, tmp, 1); +} +template<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a) +{ + __m256d tmp = _mm256_shuffle_pd(a,a,5); + return _mm256_permute2f128_pd(tmp, tmp, 1); + #if 0 + // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd + // exhibit the same latency/throughput, but it is here for future reference/benchmarking... + __m256d swap_halves = _mm256_permute2f128_pd(a,a,1); + return _mm256_permute_pd(swap_halves,5); + #endif +} + +// pabs should be ok +template<> EIGEN_STRONG_INLINE Packet8f pabs(const Packet8f& a) +{ + const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF)); + return _mm256_and_ps(a,mask); +} +template<> EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a) +{ + const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF)); + return _mm256_and_pd(a,mask); +} + +// preduxp should be ok +// FIXME: why is this ok? why isn't the simply implementation working as expected? +template<> EIGEN_STRONG_INLINE Packet8f preduxp(const Packet8f* vecs) +{ + __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]); + __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]); + __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]); + __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]); + + __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1); + __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2); + __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3); + __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4); + + __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23); + __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23); + __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23); + __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23); + + __m256 sum1 = _mm256_add_ps(perm1, hsum5); + __m256 sum2 = _mm256_add_ps(perm2, hsum6); + __m256 sum3 = _mm256_add_ps(perm3, hsum7); + __m256 sum4 = _mm256_add_ps(perm4, hsum8); + + __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc); + __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc); + + __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0); + return final; +} +template<> EIGEN_STRONG_INLINE Packet4d preduxp(const Packet4d* vecs) +{ + Packet4d tmp0, tmp1; + + tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]); + tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1)); + + tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]); + tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1)); + + return _mm256_blend_pd(tmp0, tmp1, 0xC); +} + +template<> EIGEN_STRONG_INLINE float predux(const Packet8f& a) +{ + return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1)))); +} +template<> EIGEN_STRONG_INLINE double predux(const Packet4d& a) +{ + return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1)))); +} + +template<> EIGEN_STRONG_INLINE Packet4f predux_downto4(const Packet8f& a) +{ + return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1)); +} + +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet8f& a) +{ + Packet8f tmp; + tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1)); + tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2))); + return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1))); +} +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet4d& a) +{ + Packet4d tmp; + tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1)); + return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1))); +} + +template<> EIGEN_STRONG_INLINE float predux_min(const Packet8f& a) +{ + Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1)); + tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2))); + return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1))); +} +template<> EIGEN_STRONG_INLINE double predux_min(const Packet4d& a) +{ + Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1)); + return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1))); +} + +template<> EIGEN_STRONG_INLINE float predux_max(const Packet8f& a) +{ + Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1)); + tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2))); + return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1))); +} + +template<> EIGEN_STRONG_INLINE double predux_max(const Packet4d& a) +{ + Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1)); + return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1))); +} + + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet8f& first, const Packet8f& second) + { + if (Offset==1) + { + first = _mm256_blend_ps(first, second, 1); + Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1)); + Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1); + first = _mm256_blend_ps(tmp1, tmp2, 0x88); + } + else if (Offset==2) + { + first = _mm256_blend_ps(first, second, 3); + Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2)); + Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1); + first = _mm256_blend_ps(tmp1, tmp2, 0xcc); + } + else if (Offset==3) + { + first = _mm256_blend_ps(first, second, 7); + Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3)); + Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1); + first = _mm256_blend_ps(tmp1, tmp2, 0xee); + } + else if (Offset==4) + { + first = _mm256_blend_ps(first, second, 15); + Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(3,2,1,0)); + Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1); + first = _mm256_permute_ps(tmp2, _MM_SHUFFLE(3,2,1,0)); + } + else if (Offset==5) + { + first = _mm256_blend_ps(first, second, 31); + first = _mm256_permute2f128_ps(first, first, 1); + Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1)); + first = _mm256_permute2f128_ps(tmp, tmp, 1); + first = _mm256_blend_ps(tmp, first, 0x88); + } + else if (Offset==6) + { + first = _mm256_blend_ps(first, second, 63); + first = _mm256_permute2f128_ps(first, first, 1); + Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2)); + first = _mm256_permute2f128_ps(tmp, tmp, 1); + first = _mm256_blend_ps(tmp, first, 0xcc); + } + else if (Offset==7) + { + first = _mm256_blend_ps(first, second, 127); + first = _mm256_permute2f128_ps(first, first, 1); + Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3)); + first = _mm256_permute2f128_ps(tmp, tmp, 1); + first = _mm256_blend_ps(tmp, first, 0xee); + } + } +}; + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet4d& first, const Packet4d& second) + { + if (Offset==1) + { + first = _mm256_blend_pd(first, second, 1); + __m256d tmp = _mm256_permute_pd(first, 5); + first = _mm256_permute2f128_pd(tmp, tmp, 1); + first = _mm256_blend_pd(tmp, first, 0xA); + } + else if (Offset==2) + { + first = _mm256_blend_pd(first, second, 3); + first = _mm256_permute2f128_pd(first, first, 1); + } + else if (Offset==3) + { + first = _mm256_blend_pd(first, second, 7); + __m256d tmp = _mm256_permute_pd(first, 5); + first = _mm256_permute2f128_pd(tmp, tmp, 1); + first = _mm256_blend_pd(tmp, first, 5); + } + } +}; + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]); + __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]); + __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]); + __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]); + __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]); + __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]); + __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]); + __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]); + __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0)); + __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2)); + __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0)); + __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2)); + __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0)); + __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2)); + __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0)); + __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2)); + kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20); + kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20); + kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20); + kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20); + kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31); + kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31); + kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31); + kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]); + __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]); + __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]); + __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]); + + __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0)); + __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2)); + __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0)); + __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2)); + + kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20); + kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20); + kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31); + kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15); + __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0); + __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15); + __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0); + + kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32); + kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49); + kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32); + kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49); +} + +template<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket, const Packet8f& elsePacket) { + const __m256 zero = _mm256_setzero_ps(); + const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); + __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ); + return _mm256_blendv_ps(thenPacket, elsePacket, false_mask); +} +template<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket, const Packet4d& elsePacket) { + const __m256d zero = _mm256_setzero_pd(); + const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); + __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ); + return _mm256_blendv_pd(thenPacket, elsePacket, false_mask); +} + +template<> EIGEN_STRONG_INLINE Packet8f pinsertfirst(const Packet8f& a, float b) +{ + return _mm256_blend_ps(a,pset1(b),1); +} + +template<> EIGEN_STRONG_INLINE Packet4d pinsertfirst(const Packet4d& a, double b) +{ + return _mm256_blend_pd(a,pset1(b),1); +} + +template<> EIGEN_STRONG_INLINE Packet8f pinsertlast(const Packet8f& a, float b) +{ + return _mm256_blend_ps(a,pset1(b),(1<<7)); +} + +template<> EIGEN_STRONG_INLINE Packet4d pinsertlast(const Packet4d& a, double b) +{ + return _mm256_blend_pd(a,pset1(b),(1<<3)); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_AVX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/TypeCasting.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/TypeCasting.h new file mode 100644 index 000000000..83bfdc604 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX/TypeCasting.h @@ -0,0 +1,51 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TYPE_CASTING_AVX_H +#define EIGEN_TYPE_CASTING_AVX_H + +namespace Eigen { + +namespace internal { + +// For now we use SSE to handle integers, so we can't use AVX instructions to cast +// from int to float +template <> +struct type_casting_traits { + enum { + VectorizedCast = 0, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 0, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + + + +template<> EIGEN_STRONG_INLINE Packet8i pcast(const Packet8f& a) { + return _mm256_cvtps_epi32(a); +} + +template<> EIGEN_STRONG_INLINE Packet8f pcast(const Packet8i& a) { + return _mm256_cvtepi32_ps(a); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TYPE_CASTING_AVX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX512/MathFunctions.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX512/MathFunctions.h new file mode 100644 index 000000000..9c1717f76 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX512/MathFunctions.h @@ -0,0 +1,391 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Pedro Gonnet (pedro.gonnet@gmail.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_ +#define THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_ + +namespace Eigen { + +namespace internal { + +// Disable the code for older versions of gcc that don't support many of the required avx512 instrinsics. +#if EIGEN_GNUC_AT_LEAST(5, 3) + +#define _EIGEN_DECLARE_CONST_Packet16f(NAME, X) \ + const Packet16f p16f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(NAME, X) \ + const Packet16f p16f_##NAME = (__m512)pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet8d(NAME, X) \ + const Packet8d p8d_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(NAME, X) \ + const Packet8d p8d_##NAME = _mm512_castsi512_pd(_mm512_set1_epi64(X)) + +// Natural logarithm +// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2) +// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can +// be easily approximated by a polynomial centered on m=1 for stability. +#if defined(EIGEN_VECTORIZE_AVX512DQ) +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +plog(const Packet16f& _x) { + Packet16f x = _x; + _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f); + _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f); + _EIGEN_DECLARE_CONST_Packet16f(126f, 126.0f); + + _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inv_mant_mask, ~0x7f800000); + + // The smallest non denormalized float number. + _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(min_norm_pos, 0x00800000); + _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(minus_inf, 0xff800000); + _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000); + + // Polynomial coefficients. + _EIGEN_DECLARE_CONST_Packet16f(cephes_SQRTHF, 0.707106781186547524f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p0, 7.0376836292E-2f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p1, -1.1514610310E-1f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p2, 1.1676998740E-1f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p3, -1.2420140846E-1f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p4, +1.4249322787E-1f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p5, -1.6668057665E-1f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p6, +2.0000714765E-1f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p7, -2.4999993993E-1f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p8, +3.3333331174E-1f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q1, -2.12194440e-4f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q2, 0.693359375f); + + // invalid_mask is set to true when x is NaN + __mmask16 invalid_mask = + _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_NGE_UQ); + __mmask16 iszero_mask = + _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_EQ_UQ); + + // Truncate input values to the minimum positive normal. + x = pmax(x, p16f_min_norm_pos); + + // Extract the shifted exponents. + Packet16f emm0 = _mm512_cvtepi32_ps(_mm512_srli_epi32((__m512i)x, 23)); + Packet16f e = _mm512_sub_ps(emm0, p16f_126f); + + // Set the exponents to -1, i.e. x are in the range [0.5,1). + x = _mm512_and_ps(x, p16f_inv_mant_mask); + x = _mm512_or_ps(x, p16f_half); + + // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2)) + // and shift by -1. The values are then centered around 0, which improves + // the stability of the polynomial evaluation. + // if( x < SQRTHF ) { + // e -= 1; + // x = x + x - 1.0; + // } else { x = x - 1.0; } + __mmask16 mask = _mm512_cmp_ps_mask(x, p16f_cephes_SQRTHF, _CMP_LT_OQ); + Packet16f tmp = _mm512_mask_blend_ps(mask, _mm512_setzero_ps(), x); + x = psub(x, p16f_1); + e = psub(e, _mm512_mask_blend_ps(mask, _mm512_setzero_ps(), p16f_1)); + x = padd(x, tmp); + + Packet16f x2 = pmul(x, x); + Packet16f x3 = pmul(x2, x); + + // Evaluate the polynomial approximant of degree 8 in three parts, probably + // to improve instruction-level parallelism. + Packet16f y, y1, y2; + y = pmadd(p16f_cephes_log_p0, x, p16f_cephes_log_p1); + y1 = pmadd(p16f_cephes_log_p3, x, p16f_cephes_log_p4); + y2 = pmadd(p16f_cephes_log_p6, x, p16f_cephes_log_p7); + y = pmadd(y, x, p16f_cephes_log_p2); + y1 = pmadd(y1, x, p16f_cephes_log_p5); + y2 = pmadd(y2, x, p16f_cephes_log_p8); + y = pmadd(y, x3, y1); + y = pmadd(y, x3, y2); + y = pmul(y, x3); + + // Add the logarithm of the exponent back to the result of the interpolation. + y1 = pmul(e, p16f_cephes_log_q1); + tmp = pmul(x2, p16f_half); + y = padd(y, y1); + x = psub(x, tmp); + y2 = pmul(e, p16f_cephes_log_q2); + x = padd(x, y); + x = padd(x, y2); + + // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF. + return _mm512_mask_blend_ps(iszero_mask, + _mm512_mask_blend_ps(invalid_mask, x, p16f_nan), + p16f_minus_inf); +} +#endif + +// Exponential function. Works by writing "x = m*log(2) + r" where +// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then +// "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1). +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +pexp(const Packet16f& _x) { + _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f); + _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f); + _EIGEN_DECLARE_CONST_Packet16f(127, 127.0f); + + _EIGEN_DECLARE_CONST_Packet16f(exp_hi, 88.3762626647950f); + _EIGEN_DECLARE_CONST_Packet16f(exp_lo, -88.3762626647949f); + + _EIGEN_DECLARE_CONST_Packet16f(cephes_LOG2EF, 1.44269504088896341f); + + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p0, 1.9875691500E-4f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p1, 1.3981999507E-3f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p2, 8.3334519073E-3f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p3, 4.1665795894E-2f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p4, 1.6666665459E-1f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p5, 5.0000001201E-1f); + + // Clamp x. + Packet16f x = pmax(pmin(_x, p16f_exp_hi), p16f_exp_lo); + + // Express exp(x) as exp(m*ln(2) + r), start by extracting + // m = floor(x/ln(2) + 0.5). + Packet16f m = _mm512_floor_ps(pmadd(x, p16f_cephes_LOG2EF, p16f_half)); + + // Get r = x - m*ln(2). Note that we can do this without losing more than one + // ulp precision due to the FMA instruction. + _EIGEN_DECLARE_CONST_Packet16f(nln2, -0.6931471805599453f); + Packet16f r = _mm512_fmadd_ps(m, p16f_nln2, x); + Packet16f r2 = pmul(r, r); + + // TODO(gonnet): Split into odd/even polynomials and try to exploit + // instruction-level parallelism. + Packet16f y = p16f_cephes_exp_p0; + y = pmadd(y, r, p16f_cephes_exp_p1); + y = pmadd(y, r, p16f_cephes_exp_p2); + y = pmadd(y, r, p16f_cephes_exp_p3); + y = pmadd(y, r, p16f_cephes_exp_p4); + y = pmadd(y, r, p16f_cephes_exp_p5); + y = pmadd(y, r2, r); + y = padd(y, p16f_1); + + // Build emm0 = 2^m. + Packet16i emm0 = _mm512_cvttps_epi32(padd(m, p16f_127)); + emm0 = _mm512_slli_epi32(emm0, 23); + + // Return 2^m * exp(r). + return pmax(pmul(y, _mm512_castsi512_ps(emm0)), _x); +} + +/*template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d +pexp(const Packet8d& _x) { + Packet8d x = _x; + + _EIGEN_DECLARE_CONST_Packet8d(1, 1.0); + _EIGEN_DECLARE_CONST_Packet8d(2, 2.0); + + _EIGEN_DECLARE_CONST_Packet8d(exp_hi, 709.437); + _EIGEN_DECLARE_CONST_Packet8d(exp_lo, -709.436139303); + + _EIGEN_DECLARE_CONST_Packet8d(cephes_LOG2EF, 1.4426950408889634073599); + + _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p0, 1.26177193074810590878e-4); + _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p1, 3.02994407707441961300e-2); + _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p2, 9.99999999999999999910e-1); + + _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q0, 3.00198505138664455042e-6); + _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q1, 2.52448340349684104192e-3); + _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q2, 2.27265548208155028766e-1); + _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q3, 2.00000000000000000009e0); + + _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C1, 0.693145751953125); + _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C2, 1.42860682030941723212e-6); + + // clamp x + x = pmax(pmin(x, p8d_exp_hi), p8d_exp_lo); + + // Express exp(x) as exp(g + n*log(2)). + const Packet8d n = + _mm512_mul_round_pd(p8d_cephes_LOG2EF, x, _MM_FROUND_TO_NEAREST_INT); + + // Get the remainder modulo log(2), i.e. the "g" described above. Subtract + // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last + // digits right. + const Packet8d nC1 = pmul(n, p8d_cephes_exp_C1); + const Packet8d nC2 = pmul(n, p8d_cephes_exp_C2); + x = psub(x, nC1); + x = psub(x, nC2); + + const Packet8d x2 = pmul(x, x); + + // Evaluate the numerator polynomial of the rational interpolant. + Packet8d px = p8d_cephes_exp_p0; + px = pmadd(px, x2, p8d_cephes_exp_p1); + px = pmadd(px, x2, p8d_cephes_exp_p2); + px = pmul(px, x); + + // Evaluate the denominator polynomial of the rational interpolant. + Packet8d qx = p8d_cephes_exp_q0; + qx = pmadd(qx, x2, p8d_cephes_exp_q1); + qx = pmadd(qx, x2, p8d_cephes_exp_q2); + qx = pmadd(qx, x2, p8d_cephes_exp_q3); + + // I don't really get this bit, copied from the SSE2 routines, so... + // TODO(gonnet): Figure out what is going on here, perhaps find a better + // rational interpolant? + x = _mm512_div_pd(px, psub(qx, px)); + x = pmadd(p8d_2, x, p8d_1); + + // Build e=2^n. + const Packet8d e = _mm512_castsi512_pd(_mm512_slli_epi64( + _mm512_add_epi64(_mm512_cvtpd_epi64(n), _mm512_set1_epi64(1023)), 52)); + + // Construct the result 2^n * exp(g) = e * x. The max is used to catch + // non-finite values in the input. + return pmax(pmul(x, e), _x); + }*/ + +// Functions for sqrt. +// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step +// of Newton's method, at a cost of 1-2 bits of precision as opposed to the +// exact solution. The main advantage of this approach is not just speed, but +// also the fact that it can be inlined and pipelined with other computations, +// further reducing its effective latency. +#if EIGEN_FAST_MATH +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +psqrt(const Packet16f& _x) { + _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f); + _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f); + _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000); + + Packet16f neg_half = pmul(_x, p16f_minus_half); + + // select only the inverse sqrt of positive normal inputs (denormals are + // flushed to zero and cause infs as well). + __mmask16 non_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_GE_OQ); + Packet16f x = _mm512_mask_blend_ps(non_zero_mask, _mm512_setzero_ps(), _mm512_rsqrt14_ps(_x)); + + // Do a single step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five)); + + // Multiply the original _x by it's reciprocal square root to extract the + // square root. + return pmul(_x, x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d +psqrt(const Packet8d& _x) { + _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5); + _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5); + _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL); + + Packet8d neg_half = pmul(_x, p8d_minus_half); + + // select only the inverse sqrt of positive normal inputs (denormals are + // flushed to zero and cause infs as well). + __mmask8 non_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_GE_OQ); + Packet8d x = _mm512_mask_blend_pd(non_zero_mask, _mm512_setzero_pd(), _mm512_rsqrt14_pd(_x)); + + // Do a first step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); + + // Do a second step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); + + // Multiply the original _x by it's reciprocal square root to extract the + // square root. + return pmul(_x, x); +} +#else +template <> +EIGEN_STRONG_INLINE Packet16f psqrt(const Packet16f& x) { + return _mm512_sqrt_ps(x); +} +template <> +EIGEN_STRONG_INLINE Packet8d psqrt(const Packet8d& x) { + return _mm512_sqrt_pd(x); +} +#endif + +// Functions for rsqrt. +// Almost identical to the sqrt routine, just leave out the last multiplication +// and fill in NaN/Inf where needed. Note that this function only exists as an +// iterative version for doubles since there is no instruction for diretly +// computing the reciprocal square root in AVX-512. +#ifdef EIGEN_FAST_MATH +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +prsqrt(const Packet16f& _x) { + _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inf, 0x7f800000); + _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000); + _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f); + _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f); + _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000); + + Packet16f neg_half = pmul(_x, p16f_minus_half); + + // select only the inverse sqrt of positive normal inputs (denormals are + // flushed to zero and cause infs as well). + __mmask16 le_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_LT_OQ); + Packet16f x = _mm512_mask_blend_ps(le_zero_mask, _mm512_rsqrt14_ps(_x), _mm512_setzero_ps()); + + // Fill in NaNs and Infs for the negative/zero entries. + __mmask16 neg_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LT_OQ); + Packet16f infs_and_nans = _mm512_mask_blend_ps( + neg_mask, _mm512_mask_blend_ps(le_zero_mask, _mm512_setzero_ps(), p16f_inf), p16f_nan); + + // Do a single step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five)); + + // Insert NaNs and Infs in all the right places. + return _mm512_mask_blend_ps(le_zero_mask, x, infs_and_nans); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d +prsqrt(const Packet8d& _x) { + _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(inf, 0x7ff0000000000000LL); + _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(nan, 0x7ff1000000000000LL); + _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5); + _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5); + _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL); + + Packet8d neg_half = pmul(_x, p8d_minus_half); + + // select only the inverse sqrt of positive normal inputs (denormals are + // flushed to zero and cause infs as well). + __mmask8 le_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_LT_OQ); + Packet8d x = _mm512_mask_blend_pd(le_zero_mask, _mm512_rsqrt14_pd(_x), _mm512_setzero_pd()); + + // Fill in NaNs and Infs for the negative/zero entries. + __mmask8 neg_mask = _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_LT_OQ); + Packet8d infs_and_nans = _mm512_mask_blend_pd( + neg_mask, _mm512_mask_blend_pd(le_zero_mask, _mm512_setzero_pd(), p8d_inf), p8d_nan); + + // Do a first step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); + + // Do a second step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); + + // Insert NaNs and Infs in all the right places. + return _mm512_mask_blend_pd(le_zero_mask, x, infs_and_nans); +} +#elif defined(EIGEN_VECTORIZE_AVX512ER) +template <> +EIGEN_STRONG_INLINE Packet16f prsqrt(const Packet16f& x) { + return _mm512_rsqrt28_ps(x); +} +#endif +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_ diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX512/PacketMath.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX512/PacketMath.h new file mode 100644 index 000000000..5adddc7ae --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AVX512/PacketMath.h @@ -0,0 +1,1316 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner (benoit.steiner.goog@gmail.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_AVX512_H +#define EIGEN_PACKET_MATH_AVX512_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*)) +#endif + +#ifdef __FMA__ +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif +#endif + +typedef __m512 Packet16f; +typedef __m512i Packet16i; +typedef __m512d Packet8d; + +template <> +struct is_arithmetic<__m512> { + enum { value = true }; +}; +template <> +struct is_arithmetic<__m512i> { + enum { value = true }; +}; +template <> +struct is_arithmetic<__m512d> { + enum { value = true }; +}; + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet16f type; + typedef Packet8f half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + HasHalfPacket = 1, +#if EIGEN_GNUC_AT_LEAST(5, 3) +#ifdef EIGEN_VECTORIZE_AVX512DQ + HasLog = 1, +#endif + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, +#endif + HasDiv = 1 + }; + }; +template<> struct packet_traits : default_packet_traits +{ + typedef Packet8d type; + typedef Packet4d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 1, +#if EIGEN_GNUC_AT_LEAST(5, 3) + HasSqrt = 1, + HasRsqrt = EIGEN_FAST_MATH, +#endif + HasDiv = 1 + }; +}; + +/* TODO Implement AVX512 for integers +template<> struct packet_traits : default_packet_traits +{ + typedef Packet16i type; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=8 + }; +}; +*/ + +template <> +struct unpacket_traits { + typedef float type; + typedef Packet8f half; + enum { size = 16, alignment=Aligned64 }; +}; +template <> +struct unpacket_traits { + typedef double type; + typedef Packet4d half; + enum { size = 8, alignment=Aligned64 }; +}; +template <> +struct unpacket_traits { + typedef int type; + typedef Packet8i half; + enum { size = 16, alignment=Aligned64 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet16f pset1(const float& from) { + return _mm512_set1_ps(from); +} +template <> +EIGEN_STRONG_INLINE Packet8d pset1(const double& from) { + return _mm512_set1_pd(from); +} +template <> +EIGEN_STRONG_INLINE Packet16i pset1(const int& from) { + return _mm512_set1_epi32(from); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pload1(const float* from) { + return _mm512_broadcastss_ps(_mm_load_ps1(from)); +} +template <> +EIGEN_STRONG_INLINE Packet8d pload1(const double* from) { + return _mm512_broadcastsd_pd(_mm_load_pd1(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet16f plset(const float& a) { + return _mm512_add_ps( + _mm512_set1_ps(a), + _mm512_set_ps(15.0f, 14.0f, 13.0f, 12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f, + 4.0f, 3.0f, 2.0f, 1.0f, 0.0f)); +} +template <> +EIGEN_STRONG_INLINE Packet8d plset(const double& a) { + return _mm512_add_pd(_mm512_set1_pd(a), + _mm512_set_pd(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0)); +} + +template <> +EIGEN_STRONG_INLINE Packet16f padd(const Packet16f& a, + const Packet16f& b) { + return _mm512_add_ps(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet8d padd(const Packet8d& a, + const Packet8d& b) { + return _mm512_add_pd(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f psub(const Packet16f& a, + const Packet16f& b) { + return _mm512_sub_ps(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet8d psub(const Packet8d& a, + const Packet8d& b) { + return _mm512_sub_pd(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pnegate(const Packet16f& a) { + return _mm512_sub_ps(_mm512_set1_ps(0.0), a); +} +template <> +EIGEN_STRONG_INLINE Packet8d pnegate(const Packet8d& a) { + return _mm512_sub_pd(_mm512_set1_pd(0.0), a); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pconj(const Packet16f& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet8d pconj(const Packet8d& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet16i pconj(const Packet16i& a) { + return a; +} + +template <> +EIGEN_STRONG_INLINE Packet16f pmul(const Packet16f& a, + const Packet16f& b) { + return _mm512_mul_ps(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet8d pmul(const Packet8d& a, + const Packet8d& b) { + return _mm512_mul_pd(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pdiv(const Packet16f& a, + const Packet16f& b) { + return _mm512_div_ps(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet8d pdiv(const Packet8d& a, + const Packet8d& b) { + return _mm512_div_pd(a, b); +} + +#ifdef __FMA__ +template <> +EIGEN_STRONG_INLINE Packet16f pmadd(const Packet16f& a, const Packet16f& b, + const Packet16f& c) { + return _mm512_fmadd_ps(a, b, c); +} +template <> +EIGEN_STRONG_INLINE Packet8d pmadd(const Packet8d& a, const Packet8d& b, + const Packet8d& c) { + return _mm512_fmadd_pd(a, b, c); +} +#endif + +template <> +EIGEN_STRONG_INLINE Packet16f pmin(const Packet16f& a, + const Packet16f& b) { + return _mm512_min_ps(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet8d pmin(const Packet8d& a, + const Packet8d& b) { + return _mm512_min_pd(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pmax(const Packet16f& a, + const Packet16f& b) { + return _mm512_max_ps(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet8d pmax(const Packet8d& a, + const Packet8d& b) { + return _mm512_max_pd(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pand(const Packet16f& a, + const Packet16f& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_and_ps(a, b); +#else + Packet16f res = _mm512_undefined_ps(); + Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0); + Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0); + res = _mm512_insertf32x4(res, _mm_and_ps(lane0_a, lane0_b), 0); + + Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1); + Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1); + res = _mm512_insertf32x4(res, _mm_and_ps(lane1_a, lane1_b), 1); + + Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2); + Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2); + res = _mm512_insertf32x4(res, _mm_and_ps(lane2_a, lane2_b), 2); + + Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3); + Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3); + res = _mm512_insertf32x4(res, _mm_and_ps(lane3_a, lane3_b), 3); + + return res; +#endif +} +template <> +EIGEN_STRONG_INLINE Packet8d pand(const Packet8d& a, + const Packet8d& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_and_pd(a, b); +#else + Packet8d res = _mm512_undefined_pd(); + Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0); + Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0); + res = _mm512_insertf64x4(res, _mm256_and_pd(lane0_a, lane0_b), 0); + + Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1); + Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1); + res = _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1); + + return res; +#endif +} +template <> +EIGEN_STRONG_INLINE Packet16f por(const Packet16f& a, + const Packet16f& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_or_ps(a, b); +#else + Packet16f res = _mm512_undefined_ps(); + Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0); + Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0); + res = _mm512_insertf32x4(res, _mm_or_ps(lane0_a, lane0_b), 0); + + Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1); + Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1); + res = _mm512_insertf32x4(res, _mm_or_ps(lane1_a, lane1_b), 1); + + Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2); + Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2); + res = _mm512_insertf32x4(res, _mm_or_ps(lane2_a, lane2_b), 2); + + Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3); + Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3); + res = _mm512_insertf32x4(res, _mm_or_ps(lane3_a, lane3_b), 3); + + return res; +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet8d por(const Packet8d& a, + const Packet8d& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_or_pd(a, b); +#else + Packet8d res = _mm512_undefined_pd(); + Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0); + Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0); + res = _mm512_insertf64x4(res, _mm256_or_pd(lane0_a, lane0_b), 0); + + Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1); + Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1); + res = _mm512_insertf64x4(res, _mm256_or_pd(lane1_a, lane1_b), 1); + + return res; +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet16f pxor(const Packet16f& a, + const Packet16f& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_xor_ps(a, b); +#else + Packet16f res = _mm512_undefined_ps(); + Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0); + Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0); + res = _mm512_insertf32x4(res, _mm_xor_ps(lane0_a, lane0_b), 0); + + Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1); + Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1); + res = _mm512_insertf32x4(res, _mm_xor_ps(lane1_a, lane1_b), 1); + + Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2); + Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2); + res = _mm512_insertf32x4(res, _mm_xor_ps(lane2_a, lane2_b), 2); + + Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3); + Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3); + res = _mm512_insertf32x4(res, _mm_xor_ps(lane3_a, lane3_b), 3); + + return res; +#endif +} +template <> +EIGEN_STRONG_INLINE Packet8d pxor(const Packet8d& a, + const Packet8d& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_xor_pd(a, b); +#else + Packet8d res = _mm512_undefined_pd(); + Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0); + Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0); + res = _mm512_insertf64x4(res, _mm256_xor_pd(lane0_a, lane0_b), 0); + + Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1); + Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1); + res = _mm512_insertf64x4(res, _mm256_xor_pd(lane1_a, lane1_b), 1); + + return res; +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet16f pandnot(const Packet16f& a, + const Packet16f& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_andnot_ps(a, b); +#else + Packet16f res = _mm512_undefined_ps(); + Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0); + Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0); + res = _mm512_insertf32x4(res, _mm_andnot_ps(lane0_a, lane0_b), 0); + + Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1); + Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1); + res = _mm512_insertf32x4(res, _mm_andnot_ps(lane1_a, lane1_b), 1); + + Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2); + Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2); + res = _mm512_insertf32x4(res, _mm_andnot_ps(lane2_a, lane2_b), 2); + + Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3); + Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3); + res = _mm512_insertf32x4(res, _mm_andnot_ps(lane3_a, lane3_b), 3); + + return res; +#endif +} +template <> +EIGEN_STRONG_INLINE Packet8d pandnot(const Packet8d& a, + const Packet8d& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_andnot_pd(a, b); +#else + Packet8d res = _mm512_undefined_pd(); + Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0); + Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0); + res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane0_a, lane0_b), 0); + + Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1); + Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1); + res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane1_a, lane1_b), 1); + + return res; +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet16f pload(const float* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_ps(from); +} +template <> +EIGEN_STRONG_INLINE Packet8d pload(const double* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_pd(from); +} +template <> +EIGEN_STRONG_INLINE Packet16i pload(const int* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512( + reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet16f ploadu(const float* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_ps(from); +} +template <> +EIGEN_STRONG_INLINE Packet8d ploadu(const double* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_pd(from); +} +template <> +EIGEN_STRONG_INLINE Packet16i ploadu(const int* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512( + reinterpret_cast(from)); +} + +// Loads 8 floats from memory a returns the packet +// {a0, a0 a1, a1, a2, a2, a3, a3, a4, a4, a5, a5, a6, a6, a7, a7} +template <> +EIGEN_STRONG_INLINE Packet16f ploaddup(const float* from) { + Packet8f lane0 = _mm256_broadcast_ps((const __m128*)(const void*)from); + // mimic an "inplace" permutation of the lower 128bits using a blend + lane0 = _mm256_blend_ps( + lane0, _mm256_castps128_ps256(_mm_permute_ps( + _mm256_castps256_ps128(lane0), _MM_SHUFFLE(1, 0, 1, 0))), + 15); + // then we can perform a consistent permutation on the global register to get + // everything in shape: + lane0 = _mm256_permute_ps(lane0, _MM_SHUFFLE(3, 3, 2, 2)); + + Packet8f lane1 = _mm256_broadcast_ps((const __m128*)(const void*)(from + 4)); + // mimic an "inplace" permutation of the lower 128bits using a blend + lane1 = _mm256_blend_ps( + lane1, _mm256_castps128_ps256(_mm_permute_ps( + _mm256_castps256_ps128(lane1), _MM_SHUFFLE(1, 0, 1, 0))), + 15); + // then we can perform a consistent permutation on the global register to get + // everything in shape: + lane1 = _mm256_permute_ps(lane1, _MM_SHUFFLE(3, 3, 2, 2)); + +#ifdef EIGEN_VECTORIZE_AVX512DQ + Packet16f res = _mm512_undefined_ps(); + return _mm512_insertf32x8(res, lane0, 0); + return _mm512_insertf32x8(res, lane1, 1); + return res; +#else + Packet16f res = _mm512_undefined_ps(); + res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane0, 0), 0); + res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane0, 1), 1); + res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane1, 0), 2); + res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane1, 1), 3); + return res; +#endif +} +// Loads 4 doubles from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, +// a3} +template <> +EIGEN_STRONG_INLINE Packet8d ploaddup(const double* from) { + Packet4d lane0 = _mm256_broadcast_pd((const __m128d*)(const void*)from); + lane0 = _mm256_permute_pd(lane0, 3 << 2); + + Packet4d lane1 = _mm256_broadcast_pd((const __m128d*)(const void*)(from + 2)); + lane1 = _mm256_permute_pd(lane1, 3 << 2); + + Packet8d res = _mm512_undefined_pd(); + res = _mm512_insertf64x4(res, lane0, 0); + return _mm512_insertf64x4(res, lane1, 1); +} + +// Loads 4 floats from memory a returns the packet +// {a0, a0 a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3} +template <> +EIGEN_STRONG_INLINE Packet16f ploadquad(const float* from) { + Packet16f tmp = _mm512_undefined_ps(); + tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from), 0); + tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 1), 1); + tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 2), 2); + tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 3), 3); + return tmp; +} +// Loads 2 doubles from memory a returns the packet +// {a0, a0 a0, a0, a1, a1, a1, a1} +template <> +EIGEN_STRONG_INLINE Packet8d ploadquad(const double* from) { + Packet8d tmp = _mm512_undefined_pd(); + Packet2d tmp0 = _mm_load_pd1(from); + Packet2d tmp1 = _mm_load_pd1(from + 1); + Packet4d lane0 = _mm256_broadcastsd_pd(tmp0); + Packet4d lane1 = _mm256_broadcastsd_pd(tmp1); + tmp = _mm512_insertf64x4(tmp, lane0, 0); + return _mm512_insertf64x4(tmp, lane1, 1); +} + +template <> +EIGEN_STRONG_INLINE void pstore(float* to, const Packet16f& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm512_store_ps(to, from); +} +template <> +EIGEN_STRONG_INLINE void pstore(double* to, const Packet8d& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm512_store_pd(to, from); +} +template <> +EIGEN_STRONG_INLINE void pstore(int* to, const Packet16i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm512_storeu_si512(reinterpret_cast<__m512i*>(to), + from); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet16f& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_ps(to, from); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet8d& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_pd(to, from); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet16i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512( + reinterpret_cast<__m512i*>(to), from); +} + +template <> +EIGEN_DEVICE_FUNC inline Packet16f pgather(const float* from, + Index stride) { + Packet16i stride_vector = _mm512_set1_epi32(stride); + Packet16i stride_multiplier = + _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier); + + return _mm512_i32gather_ps(indices, from, 4); +} +template <> +EIGEN_DEVICE_FUNC inline Packet8d pgather(const double* from, + Index stride) { + Packet8i stride_vector = _mm256_set1_epi32(stride); + Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); + Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier); + + return _mm512_i32gather_pd(indices, from, 8); +} + +template <> +EIGEN_DEVICE_FUNC inline void pscatter(float* to, + const Packet16f& from, + Index stride) { + Packet16i stride_vector = _mm512_set1_epi32(stride); + Packet16i stride_multiplier = + _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier); + _mm512_i32scatter_ps(to, indices, from, 4); +} +template <> +EIGEN_DEVICE_FUNC inline void pscatter(double* to, + const Packet8d& from, + Index stride) { + Packet8i stride_vector = _mm256_set1_epi32(stride); + Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); + Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier); + _mm512_i32scatter_pd(to, indices, from, 8); +} + +template <> +EIGEN_STRONG_INLINE void pstore1(float* to, const float& a) { + Packet16f pa = pset1(a); + pstore(to, pa); +} +template <> +EIGEN_STRONG_INLINE void pstore1(double* to, const double& a) { + Packet8d pa = pset1(a); + pstore(to, pa); +} +template <> +EIGEN_STRONG_INLINE void pstore1(int* to, const int& a) { + Packet16i pa = pset1(a); + pstore(to, pa); +} + +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } + +template <> +EIGEN_STRONG_INLINE float pfirst(const Packet16f& a) { + return _mm_cvtss_f32(_mm512_extractf32x4_ps(a, 0)); +} +template <> +EIGEN_STRONG_INLINE double pfirst(const Packet8d& a) { + return _mm_cvtsd_f64(_mm256_extractf128_pd(_mm512_extractf64x4_pd(a, 0), 0)); +} +template <> +EIGEN_STRONG_INLINE int pfirst(const Packet16i& a) { + return _mm_extract_epi32(_mm512_extracti32x4_epi32(a, 0), 0); +} + +template<> EIGEN_STRONG_INLINE Packet16f preverse(const Packet16f& a) +{ + return _mm512_permutexvar_ps(_mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), a); +} + +template<> EIGEN_STRONG_INLINE Packet8d preverse(const Packet8d& a) +{ + return _mm512_permutexvar_pd(_mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7), a); +} + +template<> EIGEN_STRONG_INLINE Packet16f pabs(const Packet16f& a) +{ + // _mm512_abs_ps intrinsic not found, so hack around it + return _mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(a), _mm512_set1_epi32(0x7fffffff))); +} +template <> +EIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) { + // _mm512_abs_ps intrinsic not found, so hack around it + return _mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(a), + _mm512_set1_epi64(0x7fffffffffffffff))); +} + +#ifdef EIGEN_VECTORIZE_AVX512DQ +// AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512 +#define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT) \ + __m256 OUTPUT##_0 = _mm512_extractf32x8_ps(INPUT, 0) __m256 OUTPUT##_1 = \ + _mm512_extractf32x8_ps(INPUT, 1) +#else +#define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT) \ + __m256 OUTPUT##_0 = _mm256_insertf128_ps( \ + _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 0)), \ + _mm512_extractf32x4_ps(INPUT, 1), 1); \ + __m256 OUTPUT##_1 = _mm256_insertf128_ps( \ + _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 2)), \ + _mm512_extractf32x4_ps(INPUT, 3), 1); +#endif + +#ifdef EIGEN_VECTORIZE_AVX512DQ +#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \ + OUTPUT = _mm512_insertf32x8(OUTPUT, INPUTA, 0); \ + OUTPUT = _mm512_insertf32x8(OUTPUT, INPUTB, 1); +#else +#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \ + OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 0), 0); \ + OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 1), 1); \ + OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 0), 2); \ + OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 1), 3); +#endif +template<> EIGEN_STRONG_INLINE Packet16f preduxp(const Packet16f* +vecs) +{ + EIGEN_EXTRACT_8f_FROM_16f(vecs[0], vecs0); + EIGEN_EXTRACT_8f_FROM_16f(vecs[1], vecs1); + EIGEN_EXTRACT_8f_FROM_16f(vecs[2], vecs2); + EIGEN_EXTRACT_8f_FROM_16f(vecs[3], vecs3); + EIGEN_EXTRACT_8f_FROM_16f(vecs[4], vecs4); + EIGEN_EXTRACT_8f_FROM_16f(vecs[5], vecs5); + EIGEN_EXTRACT_8f_FROM_16f(vecs[6], vecs6); + EIGEN_EXTRACT_8f_FROM_16f(vecs[7], vecs7); + EIGEN_EXTRACT_8f_FROM_16f(vecs[8], vecs8); + EIGEN_EXTRACT_8f_FROM_16f(vecs[9], vecs9); + EIGEN_EXTRACT_8f_FROM_16f(vecs[10], vecs10); + EIGEN_EXTRACT_8f_FROM_16f(vecs[11], vecs11); + EIGEN_EXTRACT_8f_FROM_16f(vecs[12], vecs12); + EIGEN_EXTRACT_8f_FROM_16f(vecs[13], vecs13); + EIGEN_EXTRACT_8f_FROM_16f(vecs[14], vecs14); + EIGEN_EXTRACT_8f_FROM_16f(vecs[15], vecs15); + + __m256 hsum1 = _mm256_hadd_ps(vecs0_0, vecs1_0); + __m256 hsum2 = _mm256_hadd_ps(vecs2_0, vecs3_0); + __m256 hsum3 = _mm256_hadd_ps(vecs4_0, vecs5_0); + __m256 hsum4 = _mm256_hadd_ps(vecs6_0, vecs7_0); + + __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1); + __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2); + __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3); + __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4); + + __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23); + __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23); + __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23); + __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23); + + __m256 sum1 = _mm256_add_ps(perm1, hsum5); + __m256 sum2 = _mm256_add_ps(perm2, hsum6); + __m256 sum3 = _mm256_add_ps(perm3, hsum7); + __m256 sum4 = _mm256_add_ps(perm4, hsum8); + + __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc); + __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc); + + __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0); + + hsum1 = _mm256_hadd_ps(vecs0_1, vecs1_1); + hsum2 = _mm256_hadd_ps(vecs2_1, vecs3_1); + hsum3 = _mm256_hadd_ps(vecs4_1, vecs5_1); + hsum4 = _mm256_hadd_ps(vecs6_1, vecs7_1); + + hsum5 = _mm256_hadd_ps(hsum1, hsum1); + hsum6 = _mm256_hadd_ps(hsum2, hsum2); + hsum7 = _mm256_hadd_ps(hsum3, hsum3); + hsum8 = _mm256_hadd_ps(hsum4, hsum4); + + perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23); + perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23); + perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23); + perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23); + + sum1 = _mm256_add_ps(perm1, hsum5); + sum2 = _mm256_add_ps(perm2, hsum6); + sum3 = _mm256_add_ps(perm3, hsum7); + sum4 = _mm256_add_ps(perm4, hsum8); + + blend1 = _mm256_blend_ps(sum1, sum2, 0xcc); + blend2 = _mm256_blend_ps(sum3, sum4, 0xcc); + + final = padd(final, _mm256_blend_ps(blend1, blend2, 0xf0)); + + hsum1 = _mm256_hadd_ps(vecs8_0, vecs9_0); + hsum2 = _mm256_hadd_ps(vecs10_0, vecs11_0); + hsum3 = _mm256_hadd_ps(vecs12_0, vecs13_0); + hsum4 = _mm256_hadd_ps(vecs14_0, vecs15_0); + + hsum5 = _mm256_hadd_ps(hsum1, hsum1); + hsum6 = _mm256_hadd_ps(hsum2, hsum2); + hsum7 = _mm256_hadd_ps(hsum3, hsum3); + hsum8 = _mm256_hadd_ps(hsum4, hsum4); + + perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23); + perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23); + perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23); + perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23); + + sum1 = _mm256_add_ps(perm1, hsum5); + sum2 = _mm256_add_ps(perm2, hsum6); + sum3 = _mm256_add_ps(perm3, hsum7); + sum4 = _mm256_add_ps(perm4, hsum8); + + blend1 = _mm256_blend_ps(sum1, sum2, 0xcc); + blend2 = _mm256_blend_ps(sum3, sum4, 0xcc); + + __m256 final_1 = _mm256_blend_ps(blend1, blend2, 0xf0); + + hsum1 = _mm256_hadd_ps(vecs8_1, vecs9_1); + hsum2 = _mm256_hadd_ps(vecs10_1, vecs11_1); + hsum3 = _mm256_hadd_ps(vecs12_1, vecs13_1); + hsum4 = _mm256_hadd_ps(vecs14_1, vecs15_1); + + hsum5 = _mm256_hadd_ps(hsum1, hsum1); + hsum6 = _mm256_hadd_ps(hsum2, hsum2); + hsum7 = _mm256_hadd_ps(hsum3, hsum3); + hsum8 = _mm256_hadd_ps(hsum4, hsum4); + + perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23); + perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23); + perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23); + perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23); + + sum1 = _mm256_add_ps(perm1, hsum5); + sum2 = _mm256_add_ps(perm2, hsum6); + sum3 = _mm256_add_ps(perm3, hsum7); + sum4 = _mm256_add_ps(perm4, hsum8); + + blend1 = _mm256_blend_ps(sum1, sum2, 0xcc); + blend2 = _mm256_blend_ps(sum3, sum4, 0xcc); + + final_1 = padd(final_1, _mm256_blend_ps(blend1, blend2, 0xf0)); + + __m512 final_output; + + EIGEN_INSERT_8f_INTO_16f(final_output, final, final_1); + return final_output; +} + +template<> EIGEN_STRONG_INLINE Packet8d preduxp(const Packet8d* vecs) +{ + Packet4d vecs0_0 = _mm512_extractf64x4_pd(vecs[0], 0); + Packet4d vecs0_1 = _mm512_extractf64x4_pd(vecs[0], 1); + + Packet4d vecs1_0 = _mm512_extractf64x4_pd(vecs[1], 0); + Packet4d vecs1_1 = _mm512_extractf64x4_pd(vecs[1], 1); + + Packet4d vecs2_0 = _mm512_extractf64x4_pd(vecs[2], 0); + Packet4d vecs2_1 = _mm512_extractf64x4_pd(vecs[2], 1); + + Packet4d vecs3_0 = _mm512_extractf64x4_pd(vecs[3], 0); + Packet4d vecs3_1 = _mm512_extractf64x4_pd(vecs[3], 1); + + Packet4d vecs4_0 = _mm512_extractf64x4_pd(vecs[4], 0); + Packet4d vecs4_1 = _mm512_extractf64x4_pd(vecs[4], 1); + + Packet4d vecs5_0 = _mm512_extractf64x4_pd(vecs[5], 0); + Packet4d vecs5_1 = _mm512_extractf64x4_pd(vecs[5], 1); + + Packet4d vecs6_0 = _mm512_extractf64x4_pd(vecs[6], 0); + Packet4d vecs6_1 = _mm512_extractf64x4_pd(vecs[6], 1); + + Packet4d vecs7_0 = _mm512_extractf64x4_pd(vecs[7], 0); + Packet4d vecs7_1 = _mm512_extractf64x4_pd(vecs[7], 1); + + Packet4d tmp0, tmp1; + + tmp0 = _mm256_hadd_pd(vecs0_0, vecs1_0); + tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1)); + + tmp1 = _mm256_hadd_pd(vecs2_0, vecs3_0); + tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1)); + + __m256d final_0 = _mm256_blend_pd(tmp0, tmp1, 0xC); + + tmp0 = _mm256_hadd_pd(vecs0_1, vecs1_1); + tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1)); + + tmp1 = _mm256_hadd_pd(vecs2_1, vecs3_1); + tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1)); + + final_0 = padd(final_0, _mm256_blend_pd(tmp0, tmp1, 0xC)); + + tmp0 = _mm256_hadd_pd(vecs4_0, vecs5_0); + tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1)); + + tmp1 = _mm256_hadd_pd(vecs6_0, vecs7_0); + tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1)); + + __m256d final_1 = _mm256_blend_pd(tmp0, tmp1, 0xC); + + tmp0 = _mm256_hadd_pd(vecs4_1, vecs5_1); + tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1)); + + tmp1 = _mm256_hadd_pd(vecs6_1, vecs7_1); + tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1)); + + final_1 = padd(final_1, _mm256_blend_pd(tmp0, tmp1, 0xC)); + + __m512d final_output = _mm512_insertf64x4(final_output, final_0, 0); + + return _mm512_insertf64x4(final_output, final_1, 1); +} + +template <> +EIGEN_STRONG_INLINE float predux(const Packet16f& a) { + //#ifdef EIGEN_VECTORIZE_AVX512DQ +#if 0 + Packet8f lane0 = _mm512_extractf32x8_ps(a, 0); + Packet8f lane1 = _mm512_extractf32x8_ps(a, 1); + Packet8f sum = padd(lane0, lane1); + Packet8f tmp0 = _mm256_hadd_ps(sum, _mm256_permute2f128_ps(a, a, 1)); + tmp0 = _mm256_hadd_ps(tmp0, tmp0); + return pfirst(_mm256_hadd_ps(tmp0, tmp0)); +#else + Packet4f lane0 = _mm512_extractf32x4_ps(a, 0); + Packet4f lane1 = _mm512_extractf32x4_ps(a, 1); + Packet4f lane2 = _mm512_extractf32x4_ps(a, 2); + Packet4f lane3 = _mm512_extractf32x4_ps(a, 3); + Packet4f sum = padd(padd(lane0, lane1), padd(lane2, lane3)); + sum = _mm_hadd_ps(sum, sum); + sum = _mm_hadd_ps(sum, _mm_permute_ps(sum, 1)); + return pfirst(sum); +#endif +} +template <> +EIGEN_STRONG_INLINE double predux(const Packet8d& a) { + Packet4d lane0 = _mm512_extractf64x4_pd(a, 0); + Packet4d lane1 = _mm512_extractf64x4_pd(a, 1); + Packet4d sum = padd(lane0, lane1); + Packet4d tmp0 = _mm256_hadd_pd(sum, _mm256_permute2f128_pd(sum, sum, 1)); + return pfirst(_mm256_hadd_pd(tmp0, tmp0)); +} + +template <> +EIGEN_STRONG_INLINE Packet8f predux_downto4(const Packet16f& a) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + Packet8f lane0 = _mm512_extractf32x8_ps(a, 0); + Packet8f lane1 = _mm512_extractf32x8_ps(a, 1); + return padd(lane0, lane1); +#else + Packet4f lane0 = _mm512_extractf32x4_ps(a, 0); + Packet4f lane1 = _mm512_extractf32x4_ps(a, 1); + Packet4f lane2 = _mm512_extractf32x4_ps(a, 2); + Packet4f lane3 = _mm512_extractf32x4_ps(a, 3); + Packet4f sum0 = padd(lane0, lane2); + Packet4f sum1 = padd(lane1, lane3); + return _mm256_insertf128_ps(_mm256_castps128_ps256(sum0), sum1, 1); +#endif +} +template <> +EIGEN_STRONG_INLINE Packet4d predux_downto4(const Packet8d& a) { + Packet4d lane0 = _mm512_extractf64x4_pd(a, 0); + Packet4d lane1 = _mm512_extractf64x4_pd(a, 1); + Packet4d res = padd(lane0, lane1); + return res; +} + +template <> +EIGEN_STRONG_INLINE float predux_mul(const Packet16f& a) { +//#ifdef EIGEN_VECTORIZE_AVX512DQ +#if 0 + Packet8f lane0 = _mm512_extractf32x8_ps(a, 0); + Packet8f lane1 = _mm512_extractf32x8_ps(a, 1); + Packet8f res = pmul(lane0, lane1); + res = pmul(res, _mm256_permute2f128_ps(res, res, 1)); + res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2))); + return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1)))); +#else + Packet4f lane0 = _mm512_extractf32x4_ps(a, 0); + Packet4f lane1 = _mm512_extractf32x4_ps(a, 1); + Packet4f lane2 = _mm512_extractf32x4_ps(a, 2); + Packet4f lane3 = _mm512_extractf32x4_ps(a, 3); + Packet4f res = pmul(pmul(lane0, lane1), pmul(lane2, lane3)); + res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2))); + return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1)))); +#endif +} +template <> +EIGEN_STRONG_INLINE double predux_mul(const Packet8d& a) { + Packet4d lane0 = _mm512_extractf64x4_pd(a, 0); + Packet4d lane1 = _mm512_extractf64x4_pd(a, 1); + Packet4d res = pmul(lane0, lane1); + res = pmul(res, _mm256_permute2f128_pd(res, res, 1)); + return pfirst(pmul(res, _mm256_shuffle_pd(res, res, 1))); +} + +template <> +EIGEN_STRONG_INLINE float predux_min(const Packet16f& a) { + Packet4f lane0 = _mm512_extractf32x4_ps(a, 0); + Packet4f lane1 = _mm512_extractf32x4_ps(a, 1); + Packet4f lane2 = _mm512_extractf32x4_ps(a, 2); + Packet4f lane3 = _mm512_extractf32x4_ps(a, 3); + Packet4f res = _mm_min_ps(_mm_min_ps(lane0, lane1), _mm_min_ps(lane2, lane3)); + res = _mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2))); + return pfirst(_mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1)))); +} +template <> +EIGEN_STRONG_INLINE double predux_min(const Packet8d& a) { + Packet4d lane0 = _mm512_extractf64x4_pd(a, 0); + Packet4d lane1 = _mm512_extractf64x4_pd(a, 1); + Packet4d res = _mm256_min_pd(lane0, lane1); + res = _mm256_min_pd(res, _mm256_permute2f128_pd(res, res, 1)); + return pfirst(_mm256_min_pd(res, _mm256_shuffle_pd(res, res, 1))); +} + +template <> +EIGEN_STRONG_INLINE float predux_max(const Packet16f& a) { + Packet4f lane0 = _mm512_extractf32x4_ps(a, 0); + Packet4f lane1 = _mm512_extractf32x4_ps(a, 1); + Packet4f lane2 = _mm512_extractf32x4_ps(a, 2); + Packet4f lane3 = _mm512_extractf32x4_ps(a, 3); + Packet4f res = _mm_max_ps(_mm_max_ps(lane0, lane1), _mm_max_ps(lane2, lane3)); + res = _mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2))); + return pfirst(_mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1)))); +} +template <> +EIGEN_STRONG_INLINE double predux_max(const Packet8d& a) { + Packet4d lane0 = _mm512_extractf64x4_pd(a, 0); + Packet4d lane1 = _mm512_extractf64x4_pd(a, 1); + Packet4d res = _mm256_max_pd(lane0, lane1); + res = _mm256_max_pd(res, _mm256_permute2f128_pd(res, res, 1)); + return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1))); +} + +template +struct palign_impl { + static EIGEN_STRONG_INLINE void run(Packet16f& first, + const Packet16f& second) { + if (Offset != 0) { + __m512i first_idx = _mm512_set_epi32( + Offset + 15, Offset + 14, Offset + 13, Offset + 12, Offset + 11, + Offset + 10, Offset + 9, Offset + 8, Offset + 7, Offset + 6, + Offset + 5, Offset + 4, Offset + 3, Offset + 2, Offset + 1, Offset); + + __m512i second_idx = + _mm512_set_epi32(Offset - 1, Offset - 2, Offset - 3, Offset - 4, + Offset - 5, Offset - 6, Offset - 7, Offset - 8, + Offset - 9, Offset - 10, Offset - 11, Offset - 12, + Offset - 13, Offset - 14, Offset - 15, Offset - 16); + + unsigned short mask = 0xFFFF; + mask <<= (16 - Offset); + + first = _mm512_permutexvar_ps(first_idx, first); + Packet16f tmp = _mm512_permutexvar_ps(second_idx, second); + first = _mm512_mask_blend_ps(mask, first, tmp); + } + } +}; +template +struct palign_impl { + static EIGEN_STRONG_INLINE void run(Packet8d& first, const Packet8d& second) { + if (Offset != 0) { + __m512i first_idx = _mm512_set_epi32( + 0, Offset + 7, 0, Offset + 6, 0, Offset + 5, 0, Offset + 4, 0, + Offset + 3, 0, Offset + 2, 0, Offset + 1, 0, Offset); + + __m512i second_idx = _mm512_set_epi32( + 0, Offset - 1, 0, Offset - 2, 0, Offset - 3, 0, Offset - 4, 0, + Offset - 5, 0, Offset - 6, 0, Offset - 7, 0, Offset - 8); + + unsigned char mask = 0xFF; + mask <<= (8 - Offset); + + first = _mm512_permutexvar_pd(first_idx, first); + Packet8d tmp = _mm512_permutexvar_pd(second_idx, second); + first = _mm512_mask_blend_pd(mask, first, tmp); + } + } +}; + + +#define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE) \ + EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[INDEX], INPUT[INDEX + STRIDE]); + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]); + __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]); + __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]); + __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]); + __m512 T4 = _mm512_unpacklo_ps(kernel.packet[4], kernel.packet[5]); + __m512 T5 = _mm512_unpackhi_ps(kernel.packet[4], kernel.packet[5]); + __m512 T6 = _mm512_unpacklo_ps(kernel.packet[6], kernel.packet[7]); + __m512 T7 = _mm512_unpackhi_ps(kernel.packet[6], kernel.packet[7]); + __m512 T8 = _mm512_unpacklo_ps(kernel.packet[8], kernel.packet[9]); + __m512 T9 = _mm512_unpackhi_ps(kernel.packet[8], kernel.packet[9]); + __m512 T10 = _mm512_unpacklo_ps(kernel.packet[10], kernel.packet[11]); + __m512 T11 = _mm512_unpackhi_ps(kernel.packet[10], kernel.packet[11]); + __m512 T12 = _mm512_unpacklo_ps(kernel.packet[12], kernel.packet[13]); + __m512 T13 = _mm512_unpackhi_ps(kernel.packet[12], kernel.packet[13]); + __m512 T14 = _mm512_unpacklo_ps(kernel.packet[14], kernel.packet[15]); + __m512 T15 = _mm512_unpackhi_ps(kernel.packet[14], kernel.packet[15]); + __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S4 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S5 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S6 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S7 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S8 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S9 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S10 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S11 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S12 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S13 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S14 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S15 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(3, 2, 3, 2)); + + EIGEN_EXTRACT_8f_FROM_16f(S0, S0); + EIGEN_EXTRACT_8f_FROM_16f(S1, S1); + EIGEN_EXTRACT_8f_FROM_16f(S2, S2); + EIGEN_EXTRACT_8f_FROM_16f(S3, S3); + EIGEN_EXTRACT_8f_FROM_16f(S4, S4); + EIGEN_EXTRACT_8f_FROM_16f(S5, S5); + EIGEN_EXTRACT_8f_FROM_16f(S6, S6); + EIGEN_EXTRACT_8f_FROM_16f(S7, S7); + EIGEN_EXTRACT_8f_FROM_16f(S8, S8); + EIGEN_EXTRACT_8f_FROM_16f(S9, S9); + EIGEN_EXTRACT_8f_FROM_16f(S10, S10); + EIGEN_EXTRACT_8f_FROM_16f(S11, S11); + EIGEN_EXTRACT_8f_FROM_16f(S12, S12); + EIGEN_EXTRACT_8f_FROM_16f(S13, S13); + EIGEN_EXTRACT_8f_FROM_16f(S14, S14); + EIGEN_EXTRACT_8f_FROM_16f(S15, S15); + + PacketBlock tmp; + + tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S4_0, 0x20); + tmp.packet[1] = _mm256_permute2f128_ps(S1_0, S5_0, 0x20); + tmp.packet[2] = _mm256_permute2f128_ps(S2_0, S6_0, 0x20); + tmp.packet[3] = _mm256_permute2f128_ps(S3_0, S7_0, 0x20); + tmp.packet[4] = _mm256_permute2f128_ps(S0_0, S4_0, 0x31); + tmp.packet[5] = _mm256_permute2f128_ps(S1_0, S5_0, 0x31); + tmp.packet[6] = _mm256_permute2f128_ps(S2_0, S6_0, 0x31); + tmp.packet[7] = _mm256_permute2f128_ps(S3_0, S7_0, 0x31); + + tmp.packet[8] = _mm256_permute2f128_ps(S0_1, S4_1, 0x20); + tmp.packet[9] = _mm256_permute2f128_ps(S1_1, S5_1, 0x20); + tmp.packet[10] = _mm256_permute2f128_ps(S2_1, S6_1, 0x20); + tmp.packet[11] = _mm256_permute2f128_ps(S3_1, S7_1, 0x20); + tmp.packet[12] = _mm256_permute2f128_ps(S0_1, S4_1, 0x31); + tmp.packet[13] = _mm256_permute2f128_ps(S1_1, S5_1, 0x31); + tmp.packet[14] = _mm256_permute2f128_ps(S2_1, S6_1, 0x31); + tmp.packet[15] = _mm256_permute2f128_ps(S3_1, S7_1, 0x31); + + // Second set of _m256 outputs + tmp.packet[16] = _mm256_permute2f128_ps(S8_0, S12_0, 0x20); + tmp.packet[17] = _mm256_permute2f128_ps(S9_0, S13_0, 0x20); + tmp.packet[18] = _mm256_permute2f128_ps(S10_0, S14_0, 0x20); + tmp.packet[19] = _mm256_permute2f128_ps(S11_0, S15_0, 0x20); + tmp.packet[20] = _mm256_permute2f128_ps(S8_0, S12_0, 0x31); + tmp.packet[21] = _mm256_permute2f128_ps(S9_0, S13_0, 0x31); + tmp.packet[22] = _mm256_permute2f128_ps(S10_0, S14_0, 0x31); + tmp.packet[23] = _mm256_permute2f128_ps(S11_0, S15_0, 0x31); + + tmp.packet[24] = _mm256_permute2f128_ps(S8_1, S12_1, 0x20); + tmp.packet[25] = _mm256_permute2f128_ps(S9_1, S13_1, 0x20); + tmp.packet[26] = _mm256_permute2f128_ps(S10_1, S14_1, 0x20); + tmp.packet[27] = _mm256_permute2f128_ps(S11_1, S15_1, 0x20); + tmp.packet[28] = _mm256_permute2f128_ps(S8_1, S12_1, 0x31); + tmp.packet[29] = _mm256_permute2f128_ps(S9_1, S13_1, 0x31); + tmp.packet[30] = _mm256_permute2f128_ps(S10_1, S14_1, 0x31); + tmp.packet[31] = _mm256_permute2f128_ps(S11_1, S15_1, 0x31); + + // Pack them into the output + PACK_OUTPUT(kernel.packet, tmp.packet, 0, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 1, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 2, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 3, 16); + + PACK_OUTPUT(kernel.packet, tmp.packet, 4, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 5, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 6, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 7, 16); + + PACK_OUTPUT(kernel.packet, tmp.packet, 8, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 9, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 10, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 11, 16); + + PACK_OUTPUT(kernel.packet, tmp.packet, 12, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 13, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 14, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 15, 16); +} +#define PACK_OUTPUT_2(OUTPUT, INPUT, INDEX, STRIDE) \ + EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[2 * INDEX], \ + INPUT[2 * INDEX + STRIDE]); + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]); + __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]); + __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]); + __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]); + + __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2)); + + EIGEN_EXTRACT_8f_FROM_16f(S0, S0); + EIGEN_EXTRACT_8f_FROM_16f(S1, S1); + EIGEN_EXTRACT_8f_FROM_16f(S2, S2); + EIGEN_EXTRACT_8f_FROM_16f(S3, S3); + + PacketBlock tmp; + + tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S1_0, 0x20); + tmp.packet[1] = _mm256_permute2f128_ps(S2_0, S3_0, 0x20); + tmp.packet[2] = _mm256_permute2f128_ps(S0_0, S1_0, 0x31); + tmp.packet[3] = _mm256_permute2f128_ps(S2_0, S3_0, 0x31); + + tmp.packet[4] = _mm256_permute2f128_ps(S0_1, S1_1, 0x20); + tmp.packet[5] = _mm256_permute2f128_ps(S2_1, S3_1, 0x20); + tmp.packet[6] = _mm256_permute2f128_ps(S0_1, S1_1, 0x31); + tmp.packet[7] = _mm256_permute2f128_ps(S2_1, S3_1, 0x31); + + PACK_OUTPUT_2(kernel.packet, tmp.packet, 0, 1); + PACK_OUTPUT_2(kernel.packet, tmp.packet, 1, 1); + PACK_OUTPUT_2(kernel.packet, tmp.packet, 2, 1); + PACK_OUTPUT_2(kernel.packet, tmp.packet, 3, 1); +} + +#define PACK_OUTPUT_SQ_D(OUTPUT, INPUT, INDEX, STRIDE) \ + OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX], 0); \ + OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX + STRIDE], 1); + +#define PACK_OUTPUT_D(OUTPUT, INPUT, INDEX, STRIDE) \ + OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX)], 0); \ + OUTPUT[INDEX] = \ + _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX) + STRIDE], 1); + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + __m512d T0 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0); + __m512d T1 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0xff); + __m512d T2 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0); + __m512d T3 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0xff); + + PacketBlock tmp; + + tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0), + _mm512_extractf64x4_pd(T2, 0), 0x20); + tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0), + _mm512_extractf64x4_pd(T3, 0), 0x20); + tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0), + _mm512_extractf64x4_pd(T2, 0), 0x31); + tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0), + _mm512_extractf64x4_pd(T3, 0), 0x31); + + tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1), + _mm512_extractf64x4_pd(T2, 1), 0x20); + tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1), + _mm512_extractf64x4_pd(T3, 1), 0x20); + tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1), + _mm512_extractf64x4_pd(T2, 1), 0x31); + tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1), + _mm512_extractf64x4_pd(T3, 1), 0x31); + + PACK_OUTPUT_D(kernel.packet, tmp.packet, 0, 1); + PACK_OUTPUT_D(kernel.packet, tmp.packet, 1, 1); + PACK_OUTPUT_D(kernel.packet, tmp.packet, 2, 1); + PACK_OUTPUT_D(kernel.packet, tmp.packet, 3, 1); +} + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + __m512d T0 = _mm512_unpacklo_pd(kernel.packet[0], kernel.packet[1]); + __m512d T1 = _mm512_unpackhi_pd(kernel.packet[0], kernel.packet[1]); + __m512d T2 = _mm512_unpacklo_pd(kernel.packet[2], kernel.packet[3]); + __m512d T3 = _mm512_unpackhi_pd(kernel.packet[2], kernel.packet[3]); + __m512d T4 = _mm512_unpacklo_pd(kernel.packet[4], kernel.packet[5]); + __m512d T5 = _mm512_unpackhi_pd(kernel.packet[4], kernel.packet[5]); + __m512d T6 = _mm512_unpacklo_pd(kernel.packet[6], kernel.packet[7]); + __m512d T7 = _mm512_unpackhi_pd(kernel.packet[6], kernel.packet[7]); + + PacketBlock tmp; + + tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0), + _mm512_extractf64x4_pd(T2, 0), 0x20); + tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0), + _mm512_extractf64x4_pd(T3, 0), 0x20); + tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0), + _mm512_extractf64x4_pd(T2, 0), 0x31); + tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0), + _mm512_extractf64x4_pd(T3, 0), 0x31); + + tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1), + _mm512_extractf64x4_pd(T2, 1), 0x20); + tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1), + _mm512_extractf64x4_pd(T3, 1), 0x20); + tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1), + _mm512_extractf64x4_pd(T2, 1), 0x31); + tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1), + _mm512_extractf64x4_pd(T3, 1), 0x31); + + tmp.packet[8] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0), + _mm512_extractf64x4_pd(T6, 0), 0x20); + tmp.packet[9] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0), + _mm512_extractf64x4_pd(T7, 0), 0x20); + tmp.packet[10] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0), + _mm512_extractf64x4_pd(T6, 0), 0x31); + tmp.packet[11] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0), + _mm512_extractf64x4_pd(T7, 0), 0x31); + + tmp.packet[12] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1), + _mm512_extractf64x4_pd(T6, 1), 0x20); + tmp.packet[13] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1), + _mm512_extractf64x4_pd(T7, 1), 0x20); + tmp.packet[14] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1), + _mm512_extractf64x4_pd(T6, 1), 0x31); + tmp.packet[15] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1), + _mm512_extractf64x4_pd(T7, 1), 0x31); + + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 0, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 1, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 2, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 3, 8); + + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 4, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 5, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 6, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 7, 8); +} +template <> +EIGEN_STRONG_INLINE Packet16f pblend(const Selector<16>& /*ifPacket*/, + const Packet16f& /*thenPacket*/, + const Packet16f& /*elsePacket*/) { + assert(false && "To be implemented"); + return Packet16f(); +} +template <> +EIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& /*ifPacket*/, + const Packet8d& /*thenPacket*/, + const Packet8d& /*elsePacket*/) { + assert(false && "To be implemented"); + return Packet8d(); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_AVX512_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AltiVec/Complex.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AltiVec/Complex.h new file mode 100644 index 000000000..3e665730c --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AltiVec/Complex.h @@ -0,0 +1,430 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2010-2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX32_ALTIVEC_H +#define EIGEN_COMPLEX32_ALTIVEC_H + +namespace Eigen { + +namespace internal { + +static Packet4ui p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_MZERO);//{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; +#ifdef __VSX__ +#if defined(_BIG_ENDIAN) +static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_MZERO, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 }; +static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_MZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 }; +#else +static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_MZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 }; +static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2d_MZERO, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 }; +#endif +#endif + +//---------- float ---------- +struct Packet2cf +{ + EIGEN_STRONG_INLINE explicit Packet2cf() : v(p4f_ZERO) {} + EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {} + Packet4f v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet2cf type; + typedef Packet2cf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, +#ifdef __VSX__ + HasBlend = 1, +#endif + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { typedef std::complex type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; }; + +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) +{ + Packet2cf res; + if((std::ptrdiff_t(&from) % 16) == 0) + res.v = pload((const float *)&from); + else + res.v = ploadu((const float *)&from); + res.v = vec_perm(res.v, res.v, p16uc_PSET64_HI); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet2cf pload(const std::complex* from) { return Packet2cf(pload((const float *) from)); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { return Packet2cf(ploadu((const float*) from)); } +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { pstore((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { pstoreu((float*)to, from.v); } + +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, Index stride) +{ + std::complex EIGEN_ALIGN16 af[2]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + return pload(af); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, Index stride) +{ + std::complex EIGEN_ALIGN16 af[2]; + pstore >((std::complex *) af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; +} + +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(a.v + b.v); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(a.v - b.v); } +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf(pxor(a.v, reinterpret_cast(p4ui_CONJ_XOR))); } + +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) +{ + Packet4f v1, v2; + + // Permute and multiply the real parts of a and b + v1 = vec_perm(a.v, a.v, p16uc_PSET32_WODD); + // Get the imaginary parts of a + v2 = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN); + // multiply a_re * b + v1 = vec_madd(v1, b.v, p4f_ZERO); + // multiply a_im * b and get the conjugate result + v2 = vec_madd(v2, b.v, p4f_ZERO); + v2 = reinterpret_cast(pxor(v2, reinterpret_cast(p4ui_CONJ_XOR))); + // permute back to a proper order + v2 = vec_perm(v2, v2, p16uc_COMPLEX32_REV); + + return Packet2cf(padd(v1, v2)); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot(a.v, b.v)); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_PPC_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) +{ + std::complex EIGEN_ALIGN16 res[2]; + pstore((float *)&res, a.v); + + return res[0]; +} + +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) +{ + Packet4f rev_a; + rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX32_REV2); + return Packet2cf(rev_a); +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) +{ + Packet4f b; + b = vec_sld(a.v, a.v, 8); + b = padd(a.v, b); + return pfirst(Packet2cf(b)); +} + +template<> EIGEN_STRONG_INLINE Packet2cf preduxp(const Packet2cf* vecs) +{ + Packet4f b1, b2; +#ifdef _BIG_ENDIAN + b1 = vec_sld(vecs[0].v, vecs[1].v, 8); + b2 = vec_sld(vecs[1].v, vecs[0].v, 8); +#else + b1 = vec_sld(vecs[1].v, vecs[0].v, 8); + b2 = vec_sld(vecs[0].v, vecs[1].v, 8); +#endif + b2 = vec_sld(b2, b2, 8); + b2 = padd(b1, b2); + + return Packet2cf(b2); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) +{ + Packet4f b; + Packet2cf prod; + b = vec_sld(a.v, a.v, 8); + prod = pmul(a, Packet2cf(b)); + + return pfirst(prod); +} + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second) + { + if (Offset==1) + { +#ifdef _BIG_ENDIAN + first.v = vec_sld(first.v, second.v, 8); +#else + first.v = vec_sld(second.v, first.v, 8); +#endif + } + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + return internal::pmul(a, pconj(b)); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + return internal::pmul(pconj(a), b); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + return pconj(internal::pmul(a, b)); + } +}; + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f) + +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) +{ + // TODO optimize it for AltiVec + Packet2cf res = conj_helper().pmul(a, b); + Packet4f s = pmul(b.v, b.v); + return Packet2cf(pdiv(res.v, padd(s, vec_perm(s, s, p16uc_COMPLEX32_REV)))); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip(const Packet2cf& x) +{ + return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX32_REV)); +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet4f tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI); + kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO); + kernel.packet[0].v = tmp; +} + +#ifdef __VSX__ +template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) { + Packet2cf result; + result.v = reinterpret_cast(pblend(ifPacket, reinterpret_cast(thenPacket.v), reinterpret_cast(elsePacket.v))); + return result; +} +#endif + +//---------- double ---------- +#ifdef __VSX__ +struct Packet1cd +{ + EIGEN_STRONG_INLINE Packet1cd() {} + EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {} + Packet2d v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet1cd type; + typedef Packet1cd half; + enum { + Vectorizable = 1, + AlignedOnScalar = 0, + size = 1, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { typedef std::complex type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; }; + +template<> EIGEN_STRONG_INLINE Packet1cd pload (const std::complex* from) { return Packet1cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd ploadu(const std::complex* from) { return Packet1cd(ploadu((const double*)from)); } +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet1cd& from) { pstore((double*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet1cd& from) { pstoreu((double*)to, from.v); } + +template<> EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) +{ /* here we really have to use unaligned loads :( */ return ploadu(&from); } + +template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>(const std::complex* from, Index stride) +{ + std::complex EIGEN_ALIGN16 af[2]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + return pload(af); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>(std::complex* to, const Packet1cd& from, Index stride) +{ + std::complex EIGEN_ALIGN16 af[2]; + pstore >(af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; +} + +template<> EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); } +template<> EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v - b.v); } +template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); } +template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd(pxor(a.v, reinterpret_cast(p2ul_CONJ_XOR2))); } + +template<> EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) +{ + Packet2d a_re, a_im, v1, v2; + + // Permute and multiply the real parts of a and b + a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI); + // Get the imaginary parts of a + a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO); + // multiply a_re * b + v1 = vec_madd(a_re, b.v, p2d_ZERO); + // multiply a_im * b and get the conjugate result + v2 = vec_madd(a_im, b.v, p2d_ZERO); + v2 = reinterpret_cast(vec_sld(reinterpret_cast(v2), reinterpret_cast(v2), 8)); + v2 = pxor(v2, reinterpret_cast(p2ul_CONJ_XOR1)); + + return Packet1cd(padd(v1, v2)); +} + +template<> EIGEN_STRONG_INLINE Packet1cd pand (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pand(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd por (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(por(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pxor (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pxor(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pandnot(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pandnot(a.v, b.v)); } + +template<> EIGEN_STRONG_INLINE Packet1cd ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_PPC_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cd& a) +{ + std::complex EIGEN_ALIGN16 res[2]; + pstore >(res, a); + + return res[0]; +} + +template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; } + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet1cd& a) { return pfirst(a); } +template<> EIGEN_STRONG_INLINE Packet1cd preduxp(const Packet1cd* vecs) { return vecs[0]; } + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cd& a) { return pfirst(a); } + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/) + { + // FIXME is it sure we never have to align a Packet1cd? + // Even though a std::complex has 16 bytes, it is not necessarily aligned on a 16 bytes boundary... + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + return internal::pmul(a, pconj(b)); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + return internal::pmul(pconj(a), b); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + return pconj(internal::pmul(a, b)); + } +}; + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d) + +template<> EIGEN_STRONG_INLINE Packet1cd pdiv(const Packet1cd& a, const Packet1cd& b) +{ + // TODO optimize it for AltiVec + Packet1cd res = conj_helper().pmul(a,b); + Packet2d s = pmul(b.v, b.v); + return Packet1cd(pdiv(res.v, padd(s, vec_perm(s, s, p16uc_REVERSE64)))); +} + +EIGEN_STRONG_INLINE Packet1cd pcplxflip/**/(const Packet1cd& x) +{ + return Packet1cd(preverse(Packet2d(x.v))); +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI); + kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO); + kernel.packet[0].v = tmp; +} +#endif // __VSX__ +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX32_ALTIVEC_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AltiVec/MathFunctions.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AltiVec/MathFunctions.h new file mode 100644 index 000000000..c5e4bede7 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AltiVec/MathFunctions.h @@ -0,0 +1,322 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Julien Pommier +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* The sin, cos, exp, and log functions of this file come from + * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ + */ + +#ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H +#define EIGEN_MATH_FUNCTIONS_ALTIVEC_H + +namespace Eigen { + +namespace internal { + +static _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); +static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); +static _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f); +static _EIGEN_DECLARE_CONST_Packet4i(23, 23); + +static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000); + +/* the smallest non denormalized float number */ +static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000); +static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000); // -1.f/0.f +static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_nan, 0xffffffff); + +/* natural logarithm computed for 4 simultaneous float + return NaN for x <= 0 +*/ +static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f); + +static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f); +static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f); + +static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f); + +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f); + +#ifdef __VSX__ +static _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0); +static _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0); +static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5); + +static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437); +static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6); + +#ifdef __POWER8_VECTOR__ +static Packet2l p2l_1023 = { 1023, 1023 }; +static Packet2ul p2ul_52 = { 52, 52 }; +#endif + +#endif + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f plog(const Packet4f& _x) +{ + Packet4f x = _x; + + Packet4i emm0; + + /* isvalid_mask is 0 if x < 0 or x is NaN. */ + Packet4ui isvalid_mask = reinterpret_cast(vec_cmpge(x, p4f_ZERO)); + Packet4ui iszero_mask = reinterpret_cast(vec_cmpeq(x, p4f_ZERO)); + + x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */ + emm0 = vec_sr(reinterpret_cast(x), + reinterpret_cast(p4i_23)); + + /* keep only the fractional part */ + x = pand(x, p4f_inv_mant_mask); + x = por(x, p4f_half); + + emm0 = psub(emm0, p4i_0x7f); + Packet4f e = padd(vec_ctf(emm0, 0), p4f_1); + + /* part2: + if( x < SQRTHF ) { + e -= 1; + x = x + x - 1.0; + } else { x = x - 1.0; } + */ + Packet4f mask = reinterpret_cast(vec_cmplt(x, p4f_cephes_SQRTHF)); + Packet4f tmp = pand(x, mask); + x = psub(x, p4f_1); + e = psub(e, pand(p4f_1, mask)); + x = padd(x, tmp); + + Packet4f x2 = pmul(x,x); + Packet4f x3 = pmul(x2,x); + + Packet4f y, y1, y2; + y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1); + y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4); + y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7); + y = pmadd(y , x, p4f_cephes_log_p2); + y1 = pmadd(y1, x, p4f_cephes_log_p5); + y2 = pmadd(y2, x, p4f_cephes_log_p8); + y = pmadd(y, x3, y1); + y = pmadd(y, x3, y2); + y = pmul(y, x3); + + y1 = pmul(e, p4f_cephes_log_q1); + tmp = pmul(x2, p4f_half); + y = padd(y, y1); + x = psub(x, tmp); + y2 = pmul(e, p4f_cephes_log_q2); + x = padd(x, y); + x = padd(x, y2); + // negative arg will be NAN, 0 will be -INF + x = vec_sel(x, p4f_minus_inf, iszero_mask); + x = vec_sel(p4f_minus_nan, x, isvalid_mask); + return x; +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pexp(const Packet4f& _x) +{ + Packet4f x = _x; + + Packet4f tmp, fx; + Packet4i emm0; + + // clamp x + x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo); + + // express exp(x) as exp(g + n*log(2)) + fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half); + + fx = pfloor(fx); + + tmp = pmul(fx, p4f_cephes_exp_C1); + Packet4f z = pmul(fx, p4f_cephes_exp_C2); + x = psub(x, tmp); + x = psub(x, z); + + z = pmul(x,x); + + Packet4f y = p4f_cephes_exp_p0; + y = pmadd(y, x, p4f_cephes_exp_p1); + y = pmadd(y, x, p4f_cephes_exp_p2); + y = pmadd(y, x, p4f_cephes_exp_p3); + y = pmadd(y, x, p4f_cephes_exp_p4); + y = pmadd(y, x, p4f_cephes_exp_p5); + y = pmadd(y, z, x); + y = padd(y, p4f_1); + + // build 2^n + emm0 = vec_cts(fx, 0); + emm0 = vec_add(emm0, p4i_0x7f); + emm0 = vec_sl(emm0, reinterpret_cast(p4i_23)); + + // Altivec's max & min operators just drop silent NaNs. Check NaNs in + // inputs and return them unmodified. + Packet4ui isnumber_mask = reinterpret_cast(vec_cmpeq(_x, _x)); + return vec_sel(_x, pmax(pmul(y, reinterpret_cast(emm0)), _x), + isnumber_mask); +} + +#ifndef EIGEN_COMP_CLANG +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f prsqrt(const Packet4f& x) +{ + return vec_rsqrt(x); +} +#endif + +#ifdef __VSX__ +#ifndef EIGEN_COMP_CLANG +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d prsqrt(const Packet2d& x) +{ + return vec_rsqrt(x); +} +#endif + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f psqrt(const Packet4f& x) +{ + return vec_sqrt(x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d psqrt(const Packet2d& x) +{ + return vec_sqrt(x); +} + +// VSX support varies between different compilers and even different +// versions of the same compiler. For gcc version >= 4.9.3, we can use +// vec_cts to efficiently convert Packet2d to Packet2l. Otherwise, use +// a slow version that works with older compilers. +// Update: apparently vec_cts/vec_ctf intrinsics for 64-bit doubles +// are buggy, https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70963 +static inline Packet2l ConvertToPacket2l(const Packet2d& x) { +#if EIGEN_GNUC_AT_LEAST(5, 4) || \ + (EIGEN_GNUC_AT(6, 1) && __GNUC_PATCHLEVEL__ >= 1) + return vec_cts(x, 0); // TODO: check clang version. +#else + double tmp[2]; + memcpy(tmp, &x, sizeof(tmp)); + Packet2l l = { static_cast(tmp[0]), + static_cast(tmp[1]) }; + return l; +#endif +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d pexp(const Packet2d& _x) +{ + Packet2d x = _x; + + Packet2d tmp, fx; + Packet2l emm0; + + // clamp x + x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo); + + /* express exp(x) as exp(g + n*log(2)) */ + fx = pmadd(x, p2d_cephes_LOG2EF, p2d_half); + + fx = pfloor(fx); + + tmp = pmul(fx, p2d_cephes_exp_C1); + Packet2d z = pmul(fx, p2d_cephes_exp_C2); + x = psub(x, tmp); + x = psub(x, z); + + Packet2d x2 = pmul(x,x); + + Packet2d px = p2d_cephes_exp_p0; + px = pmadd(px, x2, p2d_cephes_exp_p1); + px = pmadd(px, x2, p2d_cephes_exp_p2); + px = pmul (px, x); + + Packet2d qx = p2d_cephes_exp_q0; + qx = pmadd(qx, x2, p2d_cephes_exp_q1); + qx = pmadd(qx, x2, p2d_cephes_exp_q2); + qx = pmadd(qx, x2, p2d_cephes_exp_q3); + + x = pdiv(px,psub(qx,px)); + x = pmadd(p2d_2,x,p2d_1); + + // build 2^n + emm0 = ConvertToPacket2l(fx); + +#ifdef __POWER8_VECTOR__ + emm0 = vec_add(emm0, p2l_1023); + emm0 = vec_sl(emm0, p2ul_52); +#else + // Code is a bit complex for POWER7. There is actually a + // vec_xxsldi intrinsic but it is not supported by some gcc versions. + // So we shift (52-32) bits and do a word swap with zeros. + _EIGEN_DECLARE_CONST_Packet4i(1023, 1023); + _EIGEN_DECLARE_CONST_Packet4i(20, 20); // 52 - 32 + + Packet4i emm04i = reinterpret_cast(emm0); + emm04i = vec_add(emm04i, p4i_1023); + emm04i = vec_sl(emm04i, reinterpret_cast(p4i_20)); + static const Packet16uc perm = { + 0x14, 0x15, 0x16, 0x17, 0x00, 0x01, 0x02, 0x03, + 0x1c, 0x1d, 0x1e, 0x1f, 0x08, 0x09, 0x0a, 0x0b }; +#ifdef _BIG_ENDIAN + emm0 = reinterpret_cast(vec_perm(p4i_ZERO, emm04i, perm)); +#else + emm0 = reinterpret_cast(vec_perm(emm04i, p4i_ZERO, perm)); +#endif + +#endif + + // Altivec's max & min operators just drop silent NaNs. Check NaNs in + // inputs and return them unmodified. + Packet2ul isnumber_mask = reinterpret_cast(vec_cmpeq(_x, _x)); + return vec_sel(_x, pmax(pmul(x, reinterpret_cast(emm0)), _x), + isnumber_mask); +} +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_ALTIVEC_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h new file mode 100644 index 000000000..08a27d153 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h @@ -0,0 +1,1061 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_ALTIVEC_H +#define EIGEN_PACKET_MATH_ALTIVEC_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4 +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD +#endif + +// NOTE Altivec has 32 registers, but Eigen only accepts a value of 8 or 16 +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 +#endif + +typedef __vector float Packet4f; +typedef __vector int Packet4i; +typedef __vector unsigned int Packet4ui; +typedef __vector __bool int Packet4bi; +typedef __vector short int Packet8i; +typedef __vector unsigned char Packet16uc; + +// We don't want to write the same code all the time, but we need to reuse the constants +// and it doesn't really work to declare them global, so we define macros instead + +#define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \ + Packet4f p4f_##NAME = reinterpret_cast(vec_splat_s32(X)) + +#define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \ + Packet4i p4i_##NAME = vec_splat_s32(X) + +#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ + Packet4f p4f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ + Packet4i p4i_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \ + Packet2d p2d_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet2l(NAME,X) \ + Packet2l p2l_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ + const Packet4f p4f_##NAME = reinterpret_cast(pset1(X)) + +#define DST_CHAN 1 +#define DST_CTRL(size, count, stride) (((size) << 24) | ((count) << 16) | (stride)) + + +// These constants are endian-agnostic +static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0); //{ 0.0, 0.0, 0.0, 0.0} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE,1); //{ 1, 1, 1, 1} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS16,-16); //{ -16, -16, -16, -16} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1); //{ -1, -1, -1, -1} +static Packet4f p4f_MZERO = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1); //{ 0x80000000, 0x80000000, 0x80000000, 0x80000000} +#ifndef __VSX__ +static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0); //{ 1.0, 1.0, 1.0, 1.0} +#endif + +static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 }; +static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 }; + +static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 }; +static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 }; + +// Mask alignment +#ifdef __PPC64__ +#define _EIGEN_MASK_ALIGNMENT 0xfffffffffffffff0 +#else +#define _EIGEN_MASK_ALIGNMENT 0xfffffff0 +#endif + +#define _EIGEN_ALIGNED_PTR(x) ((std::ptrdiff_t)(x) & _EIGEN_MASK_ALIGNMENT) + +// Handle endianness properly while loading constants +// Define global static constants: +#ifdef _BIG_ENDIAN +static Packet16uc p16uc_FORWARD = vec_lvsl(0, (float*)0); +#ifdef __VSX__ +static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; +#endif +static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 }; +static Packet16uc p16uc_PSET32_WEVEN = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 }; +static Packet16uc p16uc_HALF64_0_16 = vec_sld((Packet16uc)p4i_ZERO, vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 3), 8); //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16}; +#else +static Packet16uc p16uc_FORWARD = p16uc_REVERSE32; +static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; +static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 }; +static Packet16uc p16uc_PSET32_WEVEN = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 }; +static Packet16uc p16uc_HALF64_0_16 = vec_sld(vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 0), (Packet16uc)p4i_ZERO, 8); //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16}; +#endif // _BIG_ENDIAN + +static Packet16uc p16uc_PSET64_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN); //{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 }; +static Packet16uc p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN); //{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 }; +static Packet16uc p16uc_TRANSPOSE64_HI = p16uc_PSET64_HI + p16uc_HALF64_0_16; //{ 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23}; +static Packet16uc p16uc_TRANSPOSE64_LO = p16uc_PSET64_LO + p16uc_HALF64_0_16; //{ 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31}; + +static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8); //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 }; + +#ifdef _BIG_ENDIAN +static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; +#else +static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_PSET64_HI, p16uc_PSET64_LO, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; +#endif // _BIG_ENDIAN + +#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC + #define EIGEN_PPC_PREFETCH(ADDR) __builtin_prefetch(ADDR); +#else + #define EIGEN_PPC_PREFETCH(ADDR) asm( " dcbt [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" ); +#endif + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4f type; + typedef Packet4f half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=4, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasMin = 1, + HasMax = 1, + HasAbs = 1, + HasSin = 0, + HasCos = 0, + HasLog = 0, + HasExp = 1, +#ifdef __VSX__ + HasSqrt = 1, +#if !EIGEN_COMP_CLANG + HasRsqrt = 1, +#else + HasRsqrt = 0, +#endif +#else + HasSqrt = 0, + HasRsqrt = 0, +#endif + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasNegate = 1, + HasBlend = 1 + }; +}; +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4i type; + typedef Packet4i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 0, + HasBlend = 1 + }; +}; + + +template<> struct unpacket_traits { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; }; +template<> struct unpacket_traits { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; }; + +inline std::ostream & operator <<(std::ostream & s, const Packet16uc & v) +{ + union { + Packet16uc v; + unsigned char n[16]; + } vt; + vt.v = v; + for (int i=0; i< 16; i++) + s << (int)vt.n[i] << ", "; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet4f & v) +{ + union { + Packet4f v; + float n[4]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet4i & v) +{ + union { + Packet4i v; + int n[4]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet4ui & v) +{ + union { + Packet4ui v; + unsigned int n[4]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3]; + return s; +} + +// Need to define them first or we get specialization after instantiation errors +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) +{ + EIGEN_DEBUG_ALIGNED_LOAD +#ifdef __VSX__ + return vec_vsx_ld(0, from); +#else + return vec_ld(0, from); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) +{ + EIGEN_DEBUG_ALIGNED_LOAD +#ifdef __VSX__ + return vec_vsx_ld(0, from); +#else + return vec_ld(0, from); +#endif +} + +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) +{ + EIGEN_DEBUG_ALIGNED_STORE +#ifdef __VSX__ + vec_vsx_st(from, 0, to); +#else + vec_st(from, 0, to); +#endif +} + +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) +{ + EIGEN_DEBUG_ALIGNED_STORE +#ifdef __VSX__ + vec_vsx_st(from, 0, to); +#else + vec_st(from, 0, to); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { + Packet4f v = {from, from, from, from}; + return v; +} + +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { + Packet4i v = {from, from, from, from}; + return v; +} +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const float *a, + Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3) +{ + a3 = pload(a); + a0 = vec_splat(a3, 0); + a1 = vec_splat(a3, 1); + a2 = vec_splat(a3, 2); + a3 = vec_splat(a3, 3); +} +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const int *a, + Packet4i& a0, Packet4i& a1, Packet4i& a2, Packet4i& a3) +{ + a3 = pload(a); + a0 = vec_splat(a3, 0); + a1 = vec_splat(a3, 1); + a2 = vec_splat(a3, 2); + a3 = vec_splat(a3, 3); +} + +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) +{ + float EIGEN_ALIGN16 af[4]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + af[2] = from[2*stride]; + af[3] = from[3*stride]; + return pload(af); +} +template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, Index stride) +{ + int EIGEN_ALIGN16 ai[4]; + ai[0] = from[0*stride]; + ai[1] = from[1*stride]; + ai[2] = from[2*stride]; + ai[3] = from[3*stride]; + return pload(ai); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) +{ + float EIGEN_ALIGN16 af[4]; + pstore(af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; + to[2*stride] = af[2]; + to[3*stride] = af[3]; +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, Index stride) +{ + int EIGEN_ALIGN16 ai[4]; + pstore((int *)ai, from); + to[0*stride] = ai[0]; + to[1*stride] = ai[1]; + to[2*stride] = ai[2]; + to[3*stride] = ai[3]; +} + +template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { return pset1(a) + p4f_COUNTDOWN; } +template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { return pset1(a) + p4i_COUNTDOWN; } + +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return a + b; } +template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return a + b; } + +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return a - b; } +template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return a - b; } + +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return p4f_ZERO - a; } +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return p4i_ZERO - a; } + +template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b, p4f_MZERO); } +template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { return a * b; } + +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) +{ +#ifndef __VSX__ // VSX actually provides a div instruction + Packet4f t, y_0, y_1; + + // Altivec does not offer a divide instruction, we have to do a reciprocal approximation + y_0 = vec_re(b); + + // Do one Newton-Raphson iteration to get the needed accuracy + t = vec_nmsub(y_0, b, p4f_ONE); + y_1 = vec_madd(y_0, t, y_0); + + return vec_madd(a, y_1, p4f_MZERO); +#else + return vec_div(a, b); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) +{ eigen_assert(false && "packet integer division are not supported by AltiVec"); + return pset1(0); +} + +// for some weird raisons, it has to be overloaded for packet of integers +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a,b,c); } +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return a*b + c; } + +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) +{ + #ifdef __VSX__ + Packet4f ret; + __asm__ ("xvcmpgesp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; + #else + return vec_min(a, b); + #endif +} +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) +{ + #ifdef __VSX__ + Packet4f ret; + __asm__ ("xvcmpgtsp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; + #else + return vec_max(a, b); + #endif +} +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); } +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return vec_and(a, vec_nor(b, b)); } + +template<> EIGEN_STRONG_INLINE Packet4f pround(const Packet4f& a) { return vec_round(a); } +template<> EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) { return vec_ceil(a); } +template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) { return vec_floor(a); } + +#ifdef _BIG_ENDIAN +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) +{ + EIGEN_DEBUG_ALIGNED_LOAD + Packet16uc MSQ, LSQ; + Packet16uc mask; + MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword + LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword + mask = vec_lvsl(0, from); // create the permute mask + return (Packet4f) vec_perm(MSQ, LSQ, mask); // align the data + +} +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) +{ + EIGEN_DEBUG_ALIGNED_LOAD + // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html + Packet16uc MSQ, LSQ; + Packet16uc mask; + MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword + LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword + mask = vec_lvsl(0, from); // create the permute mask + return (Packet4i) vec_perm(MSQ, LSQ, mask); // align the data +} +#else +// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD + return (Packet4i) vec_vsx_ld((long)from & 15, (const int*) _EIGEN_ALIGNED_PTR(from)); +} +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD + return (Packet4f) vec_vsx_ld((long)from & 15, (const float*) _EIGEN_ALIGNED_PTR(from)); +} +#endif + +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) +{ + Packet4f p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_DUPLICATE32_HI); +} +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) +{ + Packet4i p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_DUPLICATE32_HI); +} + +#ifdef _BIG_ENDIAN +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) +{ + EIGEN_DEBUG_UNALIGNED_STORE + // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html + // Warning: not thread safe! + Packet16uc MSQ, LSQ, edges; + Packet16uc edgeAlign, align; + + MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword + LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword + edgeAlign = vec_lvsl(0, to); // permute map to extract edges + edges=vec_perm(LSQ,MSQ,edgeAlign); // extract the edges + align = vec_lvsr( 0, to ); // permute map to misalign data + MSQ = vec_perm(edges,(Packet16uc)from,align); // misalign the data (MSQ) + LSQ = vec_perm((Packet16uc)from,edges,align); // misalign the data (LSQ) + vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first + vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part +} +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) +{ + EIGEN_DEBUG_UNALIGNED_STORE + // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html + // Warning: not thread safe! + Packet16uc MSQ, LSQ, edges; + Packet16uc edgeAlign, align; + + MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword + LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword + edgeAlign = vec_lvsl(0, to); // permute map to extract edges + edges=vec_perm(LSQ, MSQ, edgeAlign); // extract the edges + align = vec_lvsr( 0, to ); // permute map to misalign data + MSQ = vec_perm(edges, (Packet16uc) from, align); // misalign the data (MSQ) + LSQ = vec_perm((Packet16uc) from, edges, align); // misalign the data (LSQ) + vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first + vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part +} +#else +// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) +{ + EIGEN_DEBUG_ALIGNED_STORE + vec_vsx_st(from, (long)to & 15, (int*) _EIGEN_ALIGNED_PTR(to)); +} +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) +{ + EIGEN_DEBUG_ALIGNED_STORE + vec_vsx_st(from, (long)to & 15, (float*) _EIGEN_ALIGNED_PTR(to)); +} +#endif + +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { EIGEN_PPC_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { EIGEN_PPC_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float EIGEN_ALIGN16 x; vec_ste(a, 0, &x); return x; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int EIGEN_ALIGN16 x; vec_ste(a, 0, &x); return x; } + +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE32)); +} +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE32)); } + +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); } + +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) +{ + Packet4f b, sum; + b = vec_sld(a, a, 8); + sum = a + b; + b = vec_sld(sum, sum, 4); + sum += b; + return pfirst(sum); +} + +template<> EIGEN_STRONG_INLINE Packet4f preduxp(const Packet4f* vecs) +{ + Packet4f v[4], sum[4]; + + // It's easier and faster to transpose then add as columns + // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation + // Do the transpose, first set of moves + v[0] = vec_mergeh(vecs[0], vecs[2]); + v[1] = vec_mergel(vecs[0], vecs[2]); + v[2] = vec_mergeh(vecs[1], vecs[3]); + v[3] = vec_mergel(vecs[1], vecs[3]); + // Get the resulting vectors + sum[0] = vec_mergeh(v[0], v[2]); + sum[1] = vec_mergel(v[0], v[2]); + sum[2] = vec_mergeh(v[1], v[3]); + sum[3] = vec_mergel(v[1], v[3]); + + // Now do the summation: + // Lines 0+1 + sum[0] = sum[0] + sum[1]; + // Lines 2+3 + sum[1] = sum[2] + sum[3]; + // Add the results + sum[0] = sum[0] + sum[1]; + + return sum[0]; +} + +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) +{ + Packet4i sum; + sum = vec_sums(a, p4i_ZERO); +#ifdef _BIG_ENDIAN + sum = vec_sld(sum, p4i_ZERO, 12); +#else + sum = vec_sld(p4i_ZERO, sum, 4); +#endif + return pfirst(sum); +} + +template<> EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) +{ + Packet4i v[4], sum[4]; + + // It's easier and faster to transpose then add as columns + // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation + // Do the transpose, first set of moves + v[0] = vec_mergeh(vecs[0], vecs[2]); + v[1] = vec_mergel(vecs[0], vecs[2]); + v[2] = vec_mergeh(vecs[1], vecs[3]); + v[3] = vec_mergel(vecs[1], vecs[3]); + // Get the resulting vectors + sum[0] = vec_mergeh(v[0], v[2]); + sum[1] = vec_mergel(v[0], v[2]); + sum[2] = vec_mergeh(v[1], v[3]); + sum[3] = vec_mergel(v[1], v[3]); + + // Now do the summation: + // Lines 0+1 + sum[0] = sum[0] + sum[1]; + // Lines 2+3 + sum[1] = sum[2] + sum[3]; + // Add the results + sum[0] = sum[0] + sum[1]; + + return sum[0]; +} + +// Other reduction functions: +// mul +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) +{ + Packet4f prod; + prod = pmul(a, vec_sld(a, a, 8)); + return pfirst(pmul(prod, vec_sld(prod, prod, 4))); +} + +template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) +{ + EIGEN_ALIGN16 int aux[4]; + pstore(aux, a); + return aux[0] * aux[1] * aux[2] * aux[3]; +} + +// min +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) +{ + Packet4f b, res; + b = vec_min(a, vec_sld(a, a, 8)); + res = vec_min(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) +{ + Packet4i b, res; + b = vec_min(a, vec_sld(a, a, 8)); + res = vec_min(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +// max +template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) +{ + Packet4f b, res; + b = vec_max(a, vec_sld(a, a, 8)); + res = vec_max(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) +{ + Packet4i b, res; + b = vec_max(a, vec_sld(a, a, 8)); + res = vec_max(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second) + { +#ifdef _BIG_ENDIAN + switch (Offset % 4) { + case 1: + first = vec_sld(first, second, 4); break; + case 2: + first = vec_sld(first, second, 8); break; + case 3: + first = vec_sld(first, second, 12); break; + } +#else + switch (Offset % 4) { + case 1: + first = vec_sld(second, first, 12); break; + case 2: + first = vec_sld(second, first, 8); break; + case 3: + first = vec_sld(second, first, 4); break; + } +#endif + } +}; + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second) + { +#ifdef _BIG_ENDIAN + switch (Offset % 4) { + case 1: + first = vec_sld(first, second, 4); break; + case 2: + first = vec_sld(first, second, 8); break; + case 3: + first = vec_sld(first, second, 12); break; + } +#else + switch (Offset % 4) { + case 1: + first = vec_sld(second, first, 12); break; + case 2: + first = vec_sld(second, first, 8); break; + case 3: + first = vec_sld(second, first, 4); break; + } +#endif + } +}; + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet4f t0, t1, t2, t3; + t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]); + t1 = vec_mergel(kernel.packet[0], kernel.packet[2]); + t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]); + t3 = vec_mergel(kernel.packet[1], kernel.packet[3]); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet4i t0, t1, t2, t3; + t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]); + t1 = vec_mergel(kernel.packet[0], kernel.packet[2]); + t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]); + t3 = vec_mergel(kernel.packet[1], kernel.packet[3]); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + +template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) { + Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] }; + Packet4ui mask = reinterpret_cast(vec_cmpeq(reinterpret_cast(select), reinterpret_cast(p4i_ONE))); + return vec_sel(elsePacket, thenPacket, mask); +} + +template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) { + Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] }; + Packet4ui mask = reinterpret_cast(vec_cmpeq(reinterpret_cast(select), reinterpret_cast(p4i_ONE))); + return vec_sel(elsePacket, thenPacket, mask); +} + + +//---------- double ---------- +#ifdef __VSX__ +typedef __vector double Packet2d; +typedef __vector unsigned long long Packet2ul; +typedef __vector long long Packet2l; +#if EIGEN_COMP_CLANG +typedef Packet2ul Packet2bl; +#else +typedef __vector __bool long Packet2bl; +#endif + +static Packet2l p2l_ONE = { 1, 1 }; +static Packet2l p2l_ZERO = reinterpret_cast(p4i_ZERO); +static Packet2d p2d_ONE = { 1.0, 1.0 }; +static Packet2d p2d_ZERO = reinterpret_cast(p4f_ZERO); +static Packet2d p2d_MZERO = { -0.0, -0.0 }; + +#ifdef _BIG_ENDIAN +static Packet2d p2d_COUNTDOWN = reinterpret_cast(vec_sld(reinterpret_cast(p2d_ZERO), reinterpret_cast(p2d_ONE), 8)); +#else +static Packet2d p2d_COUNTDOWN = reinterpret_cast(vec_sld(reinterpret_cast(p2d_ONE), reinterpret_cast(p2d_ZERO), 8)); +#endif + +template Packet2d vec_splat_dbl(Packet2d& a); + +template<> EIGEN_STRONG_INLINE Packet2d vec_splat_dbl<0>(Packet2d& a) +{ + return reinterpret_cast(vec_perm(a, a, p16uc_PSET64_HI)); +} + +template<> EIGEN_STRONG_INLINE Packet2d vec_splat_dbl<1>(Packet2d& a) +{ + return reinterpret_cast(vec_perm(a, a, p16uc_PSET64_LO)); +} + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet2d type; + typedef Packet2d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=2, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasMin = 1, + HasMax = 1, + HasAbs = 1, + HasSin = 0, + HasCos = 0, + HasLog = 0, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasNegate = 1, + HasBlend = 1 + }; +}; + +template<> struct unpacket_traits { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; }; + +inline std::ostream & operator <<(std::ostream & s, const Packet2l & v) +{ + union { + Packet2l v; + int64_t n[2]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet2d & v) +{ + union { + Packet2d v; + double n[2]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1]; + return s; +} + +// Need to define them first or we get specialization after instantiation errors +template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) +{ + EIGEN_DEBUG_ALIGNED_LOAD +#ifdef __VSX__ + return vec_vsx_ld(0, from); +#else + return vec_ld(0, from); +#endif +} + +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) +{ + EIGEN_DEBUG_ALIGNED_STORE +#ifdef __VSX__ + vec_vsx_st(from, 0, to); +#else + vec_st(from, 0, to); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { + Packet2d v = {from, from}; + return v; +} + +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const double *a, + Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3) +{ + a1 = pload(a); + a0 = vec_splat_dbl<0>(a1); + a1 = vec_splat_dbl<1>(a1); + a3 = pload(a+2); + a2 = vec_splat_dbl<0>(a3); + a3 = vec_splat_dbl<1>(a3); +} + +template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) +{ + double EIGEN_ALIGN16 af[2]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + return pload(af); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) +{ + double EIGEN_ALIGN16 af[2]; + pstore(af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; +} + +template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { return pset1(a) + p2d_COUNTDOWN; } + +template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return a + b; } + +template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return a - b; } + +template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return p2d_ZERO - a; } + +template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return vec_madd(a,b,p2d_MZERO); } +template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return vec_div(a,b); } + +// for some weird raisons, it has to be overloaded for packet of integers +template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); } + +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) +{ + Packet2d ret; + __asm__ ("xvcmpgedp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; + } + +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) +{ + Packet2d ret; + __asm__ ("xvcmpgtdp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; +} + +template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); } + +template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); } + +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { return vec_xor(a, b); } + +template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); } + +template<> EIGEN_STRONG_INLINE Packet2d pround(const Packet2d& a) { return vec_round(a); } +template<> EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) { return vec_ceil(a); } +template<> EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) { return vec_floor(a); } + +template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) +{ + EIGEN_DEBUG_ALIGNED_LOAD + return (Packet2d) vec_vsx_ld((long)from & 15, (const double*) _EIGEN_ALIGNED_PTR(from)); +} + +template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) +{ + Packet2d p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_splat_dbl<0>(p); +} + +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) +{ + EIGEN_DEBUG_ALIGNED_STORE + vec_vsx_st((Packet4f)from, (long)to & 15, (float*) _EIGEN_ALIGNED_PTR(to)); +} + +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { EIGEN_PPC_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { double EIGEN_ALIGN16 x[2]; pstore(x, a); return x[0]; } + +template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE64)); +} +template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vec_abs(a); } + +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) +{ + Packet2d b, sum; + b = reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)); + sum = a + b; + return pfirst(sum); +} + +template<> EIGEN_STRONG_INLINE Packet2d preduxp(const Packet2d* vecs) +{ + Packet2d v[2], sum; + v[0] = vecs[0] + reinterpret_cast(vec_sld(reinterpret_cast(vecs[0]), reinterpret_cast(vecs[0]), 8)); + v[1] = vecs[1] + reinterpret_cast(vec_sld(reinterpret_cast(vecs[1]), reinterpret_cast(vecs[1]), 8)); + +#ifdef _BIG_ENDIAN + sum = reinterpret_cast(vec_sld(reinterpret_cast(v[0]), reinterpret_cast(v[1]), 8)); +#else + sum = reinterpret_cast(vec_sld(reinterpret_cast(v[1]), reinterpret_cast(v[0]), 8)); +#endif + + return sum; +} +// Other reduction functions: +// mul +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) +{ + return pfirst(pmul(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +// min +template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) +{ + return pfirst(pmin(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +// max +template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) +{ + return pfirst(pmax(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second) + { + if (Offset == 1) +#ifdef _BIG_ENDIAN + first = reinterpret_cast(vec_sld(reinterpret_cast(first), reinterpret_cast(second), 8)); +#else + first = reinterpret_cast(vec_sld(reinterpret_cast(second), reinterpret_cast(first), 8)); +#endif + } +}; + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet2d t0, t1; + t0 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_HI); + t1 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_LO); + kernel.packet[0] = t0; + kernel.packet[1] = t1; +} + +template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) { + Packet2l select = { ifPacket.select[0], ifPacket.select[1] }; + Packet2bl mask = reinterpret_cast( vec_cmpeq(reinterpret_cast(select), reinterpret_cast(p2l_ONE)) ); + return vec_sel(elsePacket, thenPacket, mask); +} +#endif // __VSX__ +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_ALTIVEC_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/Complex.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/Complex.h new file mode 100644 index 000000000..9c2536509 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/Complex.h @@ -0,0 +1,103 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_CUDA_H +#define EIGEN_COMPLEX_CUDA_H + +// clang-format off + +namespace Eigen { + +namespace internal { + +#if defined(__CUDACC__) && defined(EIGEN_USE_GPU) + +// Many std::complex methods such as operator+, operator-, operator* and +// operator/ are not constexpr. Due to this, clang does not treat them as device +// functions and thus Eigen functors making use of these operators fail to +// compile. Here, we manually specialize these functors for complex types when +// building for CUDA to avoid non-constexpr methods. + +// Sum +template struct scalar_sum_op, const std::complex > : binary_op_base, const std::complex > { + typedef typename std::complex result_type; + + EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex operator() (const std::complex& a, const std::complex& b) const { + return std::complex(numext::real(a) + numext::real(b), + numext::imag(a) + numext::imag(b)); + } +}; + +template struct scalar_sum_op, std::complex > : scalar_sum_op, const std::complex > {}; + + +// Difference +template struct scalar_difference_op, const std::complex > : binary_op_base, const std::complex > { + typedef typename std::complex result_type; + + EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex operator() (const std::complex& a, const std::complex& b) const { + return std::complex(numext::real(a) - numext::real(b), + numext::imag(a) - numext::imag(b)); + } +}; + +template struct scalar_difference_op, std::complex > : scalar_difference_op, const std::complex > {}; + + +// Product +template struct scalar_product_op, const std::complex > : binary_op_base, const std::complex > { + enum { + Vectorizable = packet_traits>::HasMul + }; + typedef typename std::complex result_type; + + EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex operator() (const std::complex& a, const std::complex& b) const { + const T a_real = numext::real(a); + const T a_imag = numext::imag(a); + const T b_real = numext::real(b); + const T b_imag = numext::imag(b); + return std::complex(a_real * b_real - a_imag * b_imag, + a_real * b_imag + a_imag * b_real); + } +}; + +template struct scalar_product_op, std::complex > : scalar_product_op, const std::complex > {}; + + +// Quotient +template struct scalar_quotient_op, const std::complex > : binary_op_base, const std::complex > { + enum { + Vectorizable = packet_traits>::HasDiv + }; + typedef typename std::complex result_type; + + EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex operator() (const std::complex& a, const std::complex& b) const { + const T a_real = numext::real(a); + const T a_imag = numext::imag(a); + const T b_real = numext::real(b); + const T b_imag = numext::imag(b); + const T norm = T(1) / (b_real * b_real + b_imag * b_imag); + return std::complex((a_real * b_real + a_imag * b_imag) * norm, + (a_imag * b_real - a_real * b_imag) * norm); + } +}; + +template struct scalar_quotient_op, std::complex > : scalar_quotient_op, const std::complex > {}; + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_CUDA_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/Half.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/Half.h new file mode 100644 index 000000000..755e6209d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/Half.h @@ -0,0 +1,674 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +// +// The conversion routines are Copyright (c) Fabian Giesen, 2016. +// The original license follows: +// +// Copyright (c) Fabian Giesen, 2016 +// All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted. +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// Standard 16-bit float type, mostly useful for GPUs. Defines a new +// type Eigen::half (inheriting from CUDA's __half struct) with +// operator overloads such that it behaves basically as an arithmetic +// type. It will be quite slow on CPUs (so it is recommended to stay +// in float32_bits for CPUs, except for simple parameter conversions, I/O +// to disk and the likes), but fast on GPUs. + + +#ifndef EIGEN_HALF_CUDA_H +#define EIGEN_HALF_CUDA_H + +#if __cplusplus > 199711L +#define EIGEN_EXPLICIT_CAST(tgt_type) explicit operator tgt_type() +#else +#define EIGEN_EXPLICIT_CAST(tgt_type) operator tgt_type() +#endif + + +namespace Eigen { + +struct half; + +namespace half_impl { + +#if !defined(EIGEN_HAS_CUDA_FP16) +// Make our own __half_raw definition that is similar to CUDA's. +struct __half_raw { + EIGEN_DEVICE_FUNC __half_raw() : x(0) {} + explicit EIGEN_DEVICE_FUNC __half_raw(unsigned short raw) : x(raw) {} + unsigned short x; +}; +#elif defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000 +// In CUDA < 9.0, __half is the equivalent of CUDA 9's __half_raw +typedef __half __half_raw; +#endif + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw raw_uint16_to_half(unsigned short x); +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff); +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h); + +struct half_base : public __half_raw { + EIGEN_DEVICE_FUNC half_base() {} + EIGEN_DEVICE_FUNC half_base(const half_base& h) : __half_raw(h) {} + EIGEN_DEVICE_FUNC half_base(const __half_raw& h) : __half_raw(h) {} +#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER >= 90000 + EIGEN_DEVICE_FUNC half_base(const __half& h) : __half_raw(*(__half_raw*)&h) {} +#endif +}; + +} // namespace half_impl + +// Class definition. +struct half : public half_impl::half_base { + #if !defined(EIGEN_HAS_CUDA_FP16) || (defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000) + typedef half_impl::__half_raw __half_raw; + #endif + + EIGEN_DEVICE_FUNC half() {} + + EIGEN_DEVICE_FUNC half(const __half_raw& h) : half_impl::half_base(h) {} + EIGEN_DEVICE_FUNC half(const half& h) : half_impl::half_base(h) {} +#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER >= 90000 + EIGEN_DEVICE_FUNC half(const __half& h) : half_impl::half_base(h) {} +#endif + + explicit EIGEN_DEVICE_FUNC half(bool b) + : half_impl::half_base(half_impl::raw_uint16_to_half(b ? 0x3c00 : 0)) {} + template + explicit EIGEN_DEVICE_FUNC half(const T& val) + : half_impl::half_base(half_impl::float_to_half_rtne(static_cast(val))) {} + explicit EIGEN_DEVICE_FUNC half(float f) + : half_impl::half_base(half_impl::float_to_half_rtne(f)) {} + + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(bool) const { + // +0.0 and -0.0 become false, everything else becomes true. + return (x & 0x7fff) != 0; + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(signed char) const { + return static_cast(half_impl::half_to_float(*this)); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned char) const { + return static_cast(half_impl::half_to_float(*this)); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(short) const { + return static_cast(half_impl::half_to_float(*this)); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned short) const { + return static_cast(half_impl::half_to_float(*this)); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(int) const { + return static_cast(half_impl::half_to_float(*this)); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned int) const { + return static_cast(half_impl::half_to_float(*this)); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(long) const { + return static_cast(half_impl::half_to_float(*this)); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned long) const { + return static_cast(half_impl::half_to_float(*this)); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(long long) const { + return static_cast(half_impl::half_to_float(*this)); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned long long) const { + return static_cast(half_to_float(*this)); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(float) const { + return half_impl::half_to_float(*this); + } + EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(double) const { + return static_cast(half_impl::half_to_float(*this)); + } + + EIGEN_DEVICE_FUNC half& operator=(const half& other) { + x = other.x; + return *this; + } +}; + +} // end namespace Eigen + +namespace std { +template<> +struct numeric_limits { + static const bool is_specialized = true; + static const bool is_signed = true; + static const bool is_integer = false; + static const bool is_exact = false; + static const bool has_infinity = true; + static const bool has_quiet_NaN = true; + static const bool has_signaling_NaN = true; + static const float_denorm_style has_denorm = denorm_present; + static const bool has_denorm_loss = false; + static const std::float_round_style round_style = std::round_to_nearest; + static const bool is_iec559 = false; + static const bool is_bounded = false; + static const bool is_modulo = false; + static const int digits = 11; + static const int digits10 = 3; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html + static const int max_digits10 = 5; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html + static const int radix = 2; + static const int min_exponent = -13; + static const int min_exponent10 = -4; + static const int max_exponent = 16; + static const int max_exponent10 = 4; + static const bool traps = true; + static const bool tinyness_before = false; + + static Eigen::half (min)() { return Eigen::half_impl::raw_uint16_to_half(0x400); } + static Eigen::half lowest() { return Eigen::half_impl::raw_uint16_to_half(0xfbff); } + static Eigen::half (max)() { return Eigen::half_impl::raw_uint16_to_half(0x7bff); } + static Eigen::half epsilon() { return Eigen::half_impl::raw_uint16_to_half(0x0800); } + static Eigen::half round_error() { return Eigen::half(0.5); } + static Eigen::half infinity() { return Eigen::half_impl::raw_uint16_to_half(0x7c00); } + static Eigen::half quiet_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7e00); } + static Eigen::half signaling_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7e00); } + static Eigen::half denorm_min() { return Eigen::half_impl::raw_uint16_to_half(0x1); } +}; + +// If std::numeric_limits is specialized, should also specialize +// std::numeric_limits, std::numeric_limits, and +// std::numeric_limits +// https://stackoverflow.com/a/16519653/ +template<> +struct numeric_limits : numeric_limits {}; +template<> +struct numeric_limits : numeric_limits {}; +template<> +struct numeric_limits : numeric_limits {}; +} // end namespace std + +namespace Eigen { + +namespace half_impl { + +#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530 + +// Intrinsics for native fp16 support. Note that on current hardware, +// these are no faster than float32_bits arithmetic (you need to use the half2 +// versions to get the ALU speed increased), but you do save the +// conversion steps back and forth. + +EIGEN_STRONG_INLINE __device__ half operator + (const half& a, const half& b) { + return __hadd(a, b); +} +EIGEN_STRONG_INLINE __device__ half operator * (const half& a, const half& b) { + return __hmul(a, b); +} +EIGEN_STRONG_INLINE __device__ half operator - (const half& a, const half& b) { + return __hsub(a, b); +} +EIGEN_STRONG_INLINE __device__ half operator / (const half& a, const half& b) { + float num = __half2float(a); + float denom = __half2float(b); + return __float2half(num / denom); +} +EIGEN_STRONG_INLINE __device__ half operator - (const half& a) { + return __hneg(a); +} +EIGEN_STRONG_INLINE __device__ half& operator += (half& a, const half& b) { + a = a + b; + return a; +} +EIGEN_STRONG_INLINE __device__ half& operator *= (half& a, const half& b) { + a = a * b; + return a; +} +EIGEN_STRONG_INLINE __device__ half& operator -= (half& a, const half& b) { + a = a - b; + return a; +} +EIGEN_STRONG_INLINE __device__ half& operator /= (half& a, const half& b) { + a = a / b; + return a; +} +EIGEN_STRONG_INLINE __device__ bool operator == (const half& a, const half& b) { + return __heq(a, b); +} +EIGEN_STRONG_INLINE __device__ bool operator != (const half& a, const half& b) { + return __hne(a, b); +} +EIGEN_STRONG_INLINE __device__ bool operator < (const half& a, const half& b) { + return __hlt(a, b); +} +EIGEN_STRONG_INLINE __device__ bool operator <= (const half& a, const half& b) { + return __hle(a, b); +} +EIGEN_STRONG_INLINE __device__ bool operator > (const half& a, const half& b) { + return __hgt(a, b); +} +EIGEN_STRONG_INLINE __device__ bool operator >= (const half& a, const half& b) { + return __hge(a, b); +} + +#else // Emulate support for half floats + +// Definitions for CPUs and older CUDA, mostly working through conversion +// to/from float32_bits. + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) { + return half(float(a) + float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) { + return half(float(a) * float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) { + return half(float(a) - float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) { + return half(float(a) / float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) { + half result; + result.x = a.x ^ 0x8000; + return result; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) { + a = half(float(a) + float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) { + a = half(float(a) * float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) { + a = half(float(a) - float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) { + a = half(float(a) / float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) { + return numext::equal_strict(float(a),float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) { + return numext::not_equal_strict(float(a), float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) { + return float(a) < float(b); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) { + return float(a) <= float(b); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) { + return float(a) > float(b); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) { + return float(a) >= float(b); +} + +#endif // Emulate support for half floats + +// Division by an index. Do it in full float precision to avoid accuracy +// issues in converting the denominator to half. +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, Index b) { + return half(static_cast(a) / static_cast(b)); +} + +// Conversion routines, including fallbacks for the host or older CUDA. +// Note that newer Intel CPUs (Haswell or newer) have vectorized versions of +// these in hardware. If we need more performance on older/other CPUs, they are +// also possible to vectorize directly. + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw raw_uint16_to_half(unsigned short x) { + __half_raw h; + h.x = x; + return h; +} + +union float32_bits { + unsigned int u; + float f; +}; + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff) { +#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300 + __half tmp_ff = __float2half(ff); + return *(__half_raw*)&tmp_ff; + +#elif defined(EIGEN_HAS_FP16_C) + __half_raw h; + h.x = _cvtss_sh(ff, 0); + return h; + +#else + float32_bits f; f.f = ff; + + const float32_bits f32infty = { 255 << 23 }; + const float32_bits f16max = { (127 + 16) << 23 }; + const float32_bits denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 }; + unsigned int sign_mask = 0x80000000u; + __half_raw o; + o.x = static_cast(0x0u); + + unsigned int sign = f.u & sign_mask; + f.u ^= sign; + + // NOTE all the integer compares in this function can be safely + // compiled into signed compares since all operands are below + // 0x80000000. Important if you want fast straight SSE2 code + // (since there's no unsigned PCMPGTD). + + if (f.u >= f16max.u) { // result is Inf or NaN (all exponent bits set) + o.x = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf + } else { // (De)normalized number or zero + if (f.u < (113 << 23)) { // resulting FP16 is subnormal or zero + // use a magic value to align our 10 mantissa bits at the bottom of + // the float. as long as FP addition is round-to-nearest-even this + // just works. + f.f += denorm_magic.f; + + // and one integer subtract of the bias later, we have our final float! + o.x = static_cast(f.u - denorm_magic.u); + } else { + unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd + + // update exponent, rounding bias part 1 + f.u += ((unsigned int)(15 - 127) << 23) + 0xfff; + // rounding bias part 2 + f.u += mant_odd; + // take the bits! + o.x = static_cast(f.u >> 13); + } + } + + o.x |= static_cast(sign >> 16); + return o; +#endif +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h) { +#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300 + return __half2float(h); + +#elif defined(EIGEN_HAS_FP16_C) + return _cvtsh_ss(h.x); + +#else + const float32_bits magic = { 113 << 23 }; + const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift + float32_bits o; + + o.u = (h.x & 0x7fff) << 13; // exponent/mantissa bits + unsigned int exp = shifted_exp & o.u; // just the exponent + o.u += (127 - 15) << 23; // exponent adjust + + // handle exponent special cases + if (exp == shifted_exp) { // Inf/NaN? + o.u += (128 - 16) << 23; // extra exp adjust + } else if (exp == 0) { // Zero/Denormal? + o.u += 1 << 23; // extra exp adjust + o.f -= magic.f; // renormalize + } + + o.u |= (h.x & 0x8000) << 16; // sign bit + return o.f; +#endif +} + +// --- standard functions --- + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const half& a) { + return (a.x & 0x7fff) == 0x7c00; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const half& a) { +#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530 + return __hisnan(a); +#else + return (a.x & 0x7fff) > 0x7c00; +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const half& a) { + return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a)); +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half abs(const half& a) { + half result; + result.x = a.x & 0x7FFF; + return result; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half exp(const half& a) { +#if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530 + return half(hexp(a)); +#else + return half(::expf(float(a))); +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log(const half& a) { +#if defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDACC_VER >= 80000 && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530 + return half(::hlog(a)); +#else + return half(::logf(float(a))); +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log1p(const half& a) { + return half(numext::log1p(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log10(const half& a) { + return half(::log10f(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sqrt(const half& a) { +#if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530 + return half(hsqrt(a)); +#else + return half(::sqrtf(float(a))); +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half pow(const half& a, const half& b) { + return half(::powf(float(a), float(b))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sin(const half& a) { + return half(::sinf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half cos(const half& a) { + return half(::cosf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tan(const half& a) { + return half(::tanf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tanh(const half& a) { + return half(::tanhf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half floor(const half& a) { +#if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300 + return half(hfloor(a)); +#else + return half(::floorf(float(a))); +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half ceil(const half& a) { +#if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300 + return half(hceil(a)); +#else + return half(::ceilf(float(a))); +#endif +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (min)(const half& a, const half& b) { +#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530 + return __hlt(b, a) ? b : a; +#else + const float f1 = static_cast(a); + const float f2 = static_cast(b); + return f2 < f1 ? b : a; +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (max)(const half& a, const half& b) { +#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530 + return __hlt(a, b) ? b : a; +#else + const float f1 = static_cast(a); + const float f2 = static_cast(b); + return f1 < f2 ? b : a; +#endif +} + +EIGEN_ALWAYS_INLINE std::ostream& operator << (std::ostream& os, const half& v) { + os << static_cast(v); + return os; +} + +} // end namespace half_impl + +// import Eigen::half_impl::half into Eigen namespace +// using half_impl::half; + +namespace internal { + +template<> +struct random_default_impl +{ + static inline half run(const half& x, const half& y) + { + return x + (y-x) * half(float(std::rand()) / float(RAND_MAX)); + } + static inline half run() + { + return run(half(-1.f), half(1.f)); + } +}; + +template<> struct is_arithmetic { enum { value = true }; }; + +} // end namespace internal + +template<> struct NumTraits + : GenericNumTraits +{ + enum { + IsSigned = true, + IsInteger = false, + IsComplex = false, + RequireInitialization = false + }; + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half epsilon() { + return half_impl::raw_uint16_to_half(0x0800); + } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half dummy_precision() { return Eigen::half(1e-2f); } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half highest() { + return half_impl::raw_uint16_to_half(0x7bff); + } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half lowest() { + return half_impl::raw_uint16_to_half(0xfbff); + } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half infinity() { + return half_impl::raw_uint16_to_half(0x7c00); + } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half quiet_NaN() { + return half_impl::raw_uint16_to_half(0x7c01); + } +}; + +} // end namespace Eigen + +// C-like standard mathematical functions and trancendentals. +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half fabsh(const Eigen::half& a) { + Eigen::half result; + result.x = a.x & 0x7FFF; + return result; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half exph(const Eigen::half& a) { + return Eigen::half(::expf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half logh(const Eigen::half& a) { +#if EIGEN_CUDACC_VER >= 80000 && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530 + return Eigen::half(::hlog(a)); +#else + return Eigen::half(::logf(float(a))); +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half sqrth(const Eigen::half& a) { + return Eigen::half(::sqrtf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half powh(const Eigen::half& a, const Eigen::half& b) { + return Eigen::half(::powf(float(a), float(b))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half floorh(const Eigen::half& a) { + return Eigen::half(::floorf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half ceilh(const Eigen::half& a) { + return Eigen::half(::ceilf(float(a))); +} + +namespace std { + +#if __cplusplus > 199711L +template <> +struct hash { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t operator()(const Eigen::half& a) const { + return static_cast(a.x); + } +}; +#endif + +} // end namespace std + + +// Add the missing shfl_xor intrinsic +#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300 +__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor(Eigen::half var, int laneMask, int width=warpSize) { + #if EIGEN_CUDACC_VER < 90000 + return static_cast(__shfl_xor(static_cast(var), laneMask, width)); + #else + return static_cast(__shfl_xor_sync(0xFFFFFFFF, static_cast(var), laneMask, width)); + #endif +} +#endif + +// ldg() has an overload for __half_raw, but we also need one for Eigen::half. +#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350 +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half __ldg(const Eigen::half* ptr) { + return Eigen::half_impl::raw_uint16_to_half( + __ldg(reinterpret_cast(ptr))); +} +#endif + + +#if defined(EIGEN_CUDA_ARCH) +namespace Eigen { +namespace numext { + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool (isnan)(const Eigen::half& h) { + return (half_impl::isnan)(h); +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool (isinf)(const Eigen::half& h) { + return (half_impl::isinf)(h); +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool (isfinite)(const Eigen::half& h) { + return (half_impl::isfinite)(h); +} + +} // namespace Eigen +} // namespace numext +#endif + +#endif // EIGEN_HALF_CUDA_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/MathFunctions.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/MathFunctions.h new file mode 100644 index 000000000..0348b41db --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/MathFunctions.h @@ -0,0 +1,91 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATH_FUNCTIONS_CUDA_H +#define EIGEN_MATH_FUNCTIONS_CUDA_H + +namespace Eigen { + +namespace internal { + +// Make sure this is only available when targeting a GPU: we don't want to +// introduce conflicts between these packet_traits definitions and the ones +// we'll use on the host side (SSE, AVX, ...) +#if defined(__CUDACC__) && defined(EIGEN_USE_GPU) +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 plog(const float4& a) +{ + return make_float4(logf(a.x), logf(a.y), logf(a.z), logf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 plog(const double2& a) +{ + using ::log; + return make_double2(log(a.x), log(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 plog1p(const float4& a) +{ + return make_float4(log1pf(a.x), log1pf(a.y), log1pf(a.z), log1pf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 plog1p(const double2& a) +{ + return make_double2(log1p(a.x), log1p(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 pexp(const float4& a) +{ + return make_float4(expf(a.x), expf(a.y), expf(a.z), expf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 pexp(const double2& a) +{ + using ::exp; + return make_double2(exp(a.x), exp(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 psqrt(const float4& a) +{ + return make_float4(sqrtf(a.x), sqrtf(a.y), sqrtf(a.z), sqrtf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 psqrt(const double2& a) +{ + using ::sqrt; + return make_double2(sqrt(a.x), sqrt(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 prsqrt(const float4& a) +{ + return make_float4(rsqrtf(a.x), rsqrtf(a.y), rsqrtf(a.z), rsqrtf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 prsqrt(const double2& a) +{ + return make_double2(rsqrt(a.x), rsqrt(a.y)); +} + + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_CUDA_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/PacketMath.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/PacketMath.h new file mode 100644 index 000000000..4dda63188 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/PacketMath.h @@ -0,0 +1,333 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_CUDA_H +#define EIGEN_PACKET_MATH_CUDA_H + +namespace Eigen { + +namespace internal { + +// Make sure this is only available when targeting a GPU: we don't want to +// introduce conflicts between these packet_traits definitions and the ones +// we'll use on the host side (SSE, AVX, ...) +#if defined(__CUDACC__) && defined(EIGEN_USE_GPU) +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; + +template<> struct packet_traits : default_packet_traits +{ + typedef float4 type; + typedef float4 half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=4, + HasHalfPacket = 0, + + HasDiv = 1, + HasSin = 0, + HasCos = 0, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasLGamma = 1, + HasDiGamma = 1, + HasZeta = 1, + HasPolygamma = 1, + HasErf = 1, + HasErfc = 1, + HasIGamma = 1, + HasIGammac = 1, + HasBetaInc = 1, + + HasBlend = 0, + }; +}; + +template<> struct packet_traits : default_packet_traits +{ + typedef double2 type; + typedef double2 half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=2, + HasHalfPacket = 0, + + HasDiv = 1, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasLGamma = 1, + HasDiGamma = 1, + HasZeta = 1, + HasPolygamma = 1, + HasErf = 1, + HasErfc = 1, + HasIGamma = 1, + HasIGammac = 1, + HasBetaInc = 1, + + HasBlend = 0, + }; +}; + + +template<> struct unpacket_traits { typedef float type; enum {size=4, alignment=Aligned16}; typedef float4 half; }; +template<> struct unpacket_traits { typedef double type; enum {size=2, alignment=Aligned16}; typedef double2 half; }; + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pset1(const float& from) { + return make_float4(from, from, from, from); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pset1(const double& from) { + return make_double2(from, from); +} + + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plset(const float& a) { + return make_float4(a, a+1, a+2, a+3); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 plset(const double& a) { + return make_double2(a, a+1); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 padd(const float4& a, const float4& b) { + return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 padd(const double2& a, const double2& b) { + return make_double2(a.x+b.x, a.y+b.y); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 psub(const float4& a, const float4& b) { + return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 psub(const double2& a, const double2& b) { + return make_double2(a.x-b.x, a.y-b.y); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pnegate(const float4& a) { + return make_float4(-a.x, -a.y, -a.z, -a.w); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pnegate(const double2& a) { + return make_double2(-a.x, -a.y); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pconj(const float4& a) { return a; } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pconj(const double2& a) { return a; } + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmul(const float4& a, const float4& b) { + return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmul(const double2& a, const double2& b) { + return make_double2(a.x*b.x, a.y*b.y); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pdiv(const float4& a, const float4& b) { + return make_float4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pdiv(const double2& a, const double2& b) { + return make_double2(a.x/b.x, a.y/b.y); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmin(const float4& a, const float4& b) { + return make_float4(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z), fminf(a.w, b.w)); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmin(const double2& a, const double2& b) { + return make_double2(fmin(a.x, b.x), fmin(a.y, b.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmax(const float4& a, const float4& b) { + return make_float4(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z), fmaxf(a.w, b.w)); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmax(const double2& a, const double2& b) { + return make_double2(fmax(a.x, b.x), fmax(a.y, b.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pload(const float* from) { + return *reinterpret_cast(from); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pload(const double* from) { + return *reinterpret_cast(from); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploadu(const float* from) { + return make_float4(from[0], from[1], from[2], from[3]); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploadu(const double* from) { + return make_double2(from[0], from[1]); +} + +template<> EIGEN_STRONG_INLINE float4 ploaddup(const float* from) { + return make_float4(from[0], from[0], from[1], from[1]); +} +template<> EIGEN_STRONG_INLINE double2 ploaddup(const double* from) { + return make_double2(from[0], from[0]); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore(float* to, const float4& from) { + *reinterpret_cast(to) = from; +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore(double* to, const double2& from) { + *reinterpret_cast(to) = from; +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu(float* to, const float4& from) { + to[0] = from.x; + to[1] = from.y; + to[2] = from.z; + to[3] = from.w; +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu(double* to, const double2& from) { + to[0] = from.x; + to[1] = from.y; +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro(const float* from) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350 + return __ldg((const float4*)from); +#else + return make_float4(from[0], from[1], from[2], from[3]); +#endif +} +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro(const double* from) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350 + return __ldg((const double2*)from); +#else + return make_double2(from[0], from[1]); +#endif +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro(const float* from) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350 + return make_float4(__ldg(from+0), __ldg(from+1), __ldg(from+2), __ldg(from+3)); +#else + return make_float4(from[0], from[1], from[2], from[3]); +#endif +} +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro(const double* from) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350 + return make_double2(__ldg(from+0), __ldg(from+1)); +#else + return make_double2(from[0], from[1]); +#endif +} + +template<> EIGEN_DEVICE_FUNC inline float4 pgather(const float* from, Index stride) { + return make_float4(from[0*stride], from[1*stride], from[2*stride], from[3*stride]); +} + +template<> EIGEN_DEVICE_FUNC inline double2 pgather(const double* from, Index stride) { + return make_double2(from[0*stride], from[1*stride]); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const float4& from, Index stride) { + to[stride*0] = from.x; + to[stride*1] = from.y; + to[stride*2] = from.z; + to[stride*3] = from.w; +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const double2& from, Index stride) { + to[stride*0] = from.x; + to[stride*1] = from.y; +} + +template<> EIGEN_DEVICE_FUNC inline float pfirst(const float4& a) { + return a.x; +} +template<> EIGEN_DEVICE_FUNC inline double pfirst(const double2& a) { + return a.x; +} + +template<> EIGEN_DEVICE_FUNC inline float predux(const float4& a) { + return a.x + a.y + a.z + a.w; +} +template<> EIGEN_DEVICE_FUNC inline double predux(const double2& a) { + return a.x + a.y; +} + +template<> EIGEN_DEVICE_FUNC inline float predux_max(const float4& a) { + return fmaxf(fmaxf(a.x, a.y), fmaxf(a.z, a.w)); +} +template<> EIGEN_DEVICE_FUNC inline double predux_max(const double2& a) { + return fmax(a.x, a.y); +} + +template<> EIGEN_DEVICE_FUNC inline float predux_min(const float4& a) { + return fminf(fminf(a.x, a.y), fminf(a.z, a.w)); +} +template<> EIGEN_DEVICE_FUNC inline double predux_min(const double2& a) { + return fmin(a.x, a.y); +} + +template<> EIGEN_DEVICE_FUNC inline float predux_mul(const float4& a) { + return a.x * a.y * a.z * a.w; +} +template<> EIGEN_DEVICE_FUNC inline double predux_mul(const double2& a) { + return a.x * a.y; +} + +template<> EIGEN_DEVICE_FUNC inline float4 pabs(const float4& a) { + return make_float4(fabsf(a.x), fabsf(a.y), fabsf(a.z), fabsf(a.w)); +} +template<> EIGEN_DEVICE_FUNC inline double2 pabs(const double2& a) { + return make_double2(fabs(a.x), fabs(a.y)); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + float tmp = kernel.packet[0].y; + kernel.packet[0].y = kernel.packet[1].x; + kernel.packet[1].x = tmp; + + tmp = kernel.packet[0].z; + kernel.packet[0].z = kernel.packet[2].x; + kernel.packet[2].x = tmp; + + tmp = kernel.packet[0].w; + kernel.packet[0].w = kernel.packet[3].x; + kernel.packet[3].x = tmp; + + tmp = kernel.packet[1].z; + kernel.packet[1].z = kernel.packet[2].y; + kernel.packet[2].y = tmp; + + tmp = kernel.packet[1].w; + kernel.packet[1].w = kernel.packet[3].y; + kernel.packet[3].y = tmp; + + tmp = kernel.packet[2].w; + kernel.packet[2].w = kernel.packet[3].z; + kernel.packet[3].z = tmp; +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + double tmp = kernel.packet[0].y; + kernel.packet[0].y = kernel.packet[1].x; + kernel.packet[1].x = tmp; +} + +#endif + +} // end namespace internal + +} // end namespace Eigen + + +#endif // EIGEN_PACKET_MATH_CUDA_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/PacketMathHalf.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/PacketMathHalf.h new file mode 100644 index 000000000..c66d38469 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/PacketMathHalf.h @@ -0,0 +1,1124 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_HALF_CUDA_H +#define EIGEN_PACKET_MATH_HALF_CUDA_H + + +namespace Eigen { +namespace internal { + +// Most of the following operations require arch >= 3.0 +#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDACC__) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 + +template<> struct is_arithmetic { enum { value = true }; }; + +template<> struct packet_traits : default_packet_traits +{ + typedef half2 type; + typedef half2 half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=2, + HasHalfPacket = 0, + HasAdd = 1, + HasMul = 1, + HasDiv = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasExp = 1, + HasLog = 1, + HasLog1p = 1 + }; +}; + +template<> struct unpacket_traits { typedef Eigen::half type; enum {size=2, alignment=Aligned16}; typedef half2 half; }; + +template<> __device__ EIGEN_STRONG_INLINE half2 pset1(const Eigen::half& from) { + return __half2half2(from); +} + +template<> __device__ EIGEN_STRONG_INLINE half2 pload(const Eigen::half* from) { + return *reinterpret_cast(from); +} + +template<> __device__ EIGEN_STRONG_INLINE half2 ploadu(const Eigen::half* from) { + return __halves2half2(from[0], from[1]); +} + +template<> EIGEN_STRONG_INLINE half2 ploaddup(const Eigen::half* from) { + return __halves2half2(from[0], from[0]); +} + +template<> __device__ EIGEN_STRONG_INLINE void pstore(Eigen::half* to, const half2& from) { + *reinterpret_cast(to) = from; +} + +template<> __device__ EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, const half2& from) { + to[0] = __low2half(from); + to[1] = __high2half(from); +} + +template<> + __device__ EIGEN_ALWAYS_INLINE half2 ploadt_ro(const Eigen::half* from) { +#if __CUDA_ARCH__ >= 350 + return __ldg((const half2*)from); +#else + return __halves2half2(*(from+0), *(from+1)); +#endif +} + +template<> +__device__ EIGEN_ALWAYS_INLINE half2 ploadt_ro(const Eigen::half* from) { +#if __CUDA_ARCH__ >= 350 + return __halves2half2(__ldg(from+0), __ldg(from+1)); +#else + return __halves2half2(*(from+0), *(from+1)); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE half2 pgather(const Eigen::half* from, Index stride) { + return __halves2half2(from[0*stride], from[1*stride]); +} + +template<> __device__ EIGEN_STRONG_INLINE void pscatter(Eigen::half* to, const half2& from, Index stride) { + to[stride*0] = __low2half(from); + to[stride*1] = __high2half(from); +} + +template<> __device__ EIGEN_STRONG_INLINE Eigen::half pfirst(const half2& a) { + return __low2half(a); +} + +template<> __device__ EIGEN_STRONG_INLINE half2 pabs(const half2& a) { + half2 result; + unsigned temp = *(reinterpret_cast(&(a))); + *(reinterpret_cast(&(result))) = temp & 0x7FFF7FFF; + return result; +} + + +__device__ EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + __half a1 = __low2half(kernel.packet[0]); + __half a2 = __high2half(kernel.packet[0]); + __half b1 = __low2half(kernel.packet[1]); + __half b2 = __high2half(kernel.packet[1]); + kernel.packet[0] = __halves2half2(a1, b1); + kernel.packet[1] = __halves2half2(a2, b2); +} + +template<> __device__ EIGEN_STRONG_INLINE half2 plset(const Eigen::half& a) { +#if __CUDA_ARCH__ >= 530 + return __halves2half2(a, __hadd(a, __float2half(1.0f))); +#else + float f = __half2float(a) + 1.0f; + return __halves2half2(a, __float2half(f)); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE half2 padd(const half2& a, const half2& b) { +#if __CUDA_ARCH__ >= 530 + return __hadd2(a, b); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 + b1; + float r2 = a2 + b2; + return __floats2half2_rn(r1, r2); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE half2 psub(const half2& a, const half2& b) { +#if __CUDA_ARCH__ >= 530 + return __hsub2(a, b); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 - b1; + float r2 = a2 - b2; + return __floats2half2_rn(r1, r2); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE half2 pnegate(const half2& a) { +#if __CUDA_ARCH__ >= 530 + return __hneg2(a); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + return __floats2half2_rn(-a1, -a2); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE half2 pconj(const half2& a) { return a; } + +template<> __device__ EIGEN_STRONG_INLINE half2 pmul(const half2& a, const half2& b) { +#if __CUDA_ARCH__ >= 530 + return __hmul2(a, b); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 * b1; + float r2 = a2 * b2; + return __floats2half2_rn(r1, r2); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE half2 pmadd(const half2& a, const half2& b, const half2& c) { +#if __CUDA_ARCH__ >= 530 + return __hfma2(a, b, c); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float c1 = __low2float(c); + float c2 = __high2float(c); + float r1 = a1 * b1 + c1; + float r2 = a2 * b2 + c2; + return __floats2half2_rn(r1, r2); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE half2 pdiv(const half2& a, const half2& b) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 / b1; + float r2 = a2 / b2; + return __floats2half2_rn(r1, r2); +} + +template<> __device__ EIGEN_STRONG_INLINE half2 pmin(const half2& a, const half2& b) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + __half r1 = a1 < b1 ? __low2half(a) : __low2half(b); + __half r2 = a2 < b2 ? __high2half(a) : __high2half(b); + return __halves2half2(r1, r2); +} + +template<> __device__ EIGEN_STRONG_INLINE half2 pmax(const half2& a, const half2& b) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + __half r1 = a1 > b1 ? __low2half(a) : __low2half(b); + __half r2 = a2 > b2 ? __high2half(a) : __high2half(b); + return __halves2half2(r1, r2); +} + +template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux(const half2& a) { +#if __CUDA_ARCH__ >= 530 + return __hadd(__low2half(a), __high2half(a)); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + return Eigen::half(half_impl::raw_uint16_to_half(__float2half_rn(a1 + a2))); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_max(const half2& a) { +#if __CUDA_ARCH__ >= 530 + __half first = __low2half(a); + __half second = __high2half(a); + return __hgt(first, second) ? first : second; +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + return a1 > a2 ? __low2half(a) : __high2half(a); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_min(const half2& a) { +#if __CUDA_ARCH__ >= 530 + __half first = __low2half(a); + __half second = __high2half(a); + return __hlt(first, second) ? first : second; +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + return a1 < a2 ? __low2half(a) : __high2half(a); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_mul(const half2& a) { +#if __CUDA_ARCH__ >= 530 + return __hmul(__low2half(a), __high2half(a)); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + return Eigen::half(half_impl::raw_uint16_to_half(__float2half_rn(a1 * a2))); +#endif +} + +template<> __device__ EIGEN_STRONG_INLINE half2 plog1p(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = log1pf(a1); + float r2 = log1pf(a2); + return __floats2half2_rn(r1, r2); +} + +#if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530 + +template<> __device__ EIGEN_STRONG_INLINE +half2 plog(const half2& a) { + return h2log(a); +} + +template<> __device__ EIGEN_STRONG_INLINE +half2 pexp(const half2& a) { + return h2exp(a); +} + +template<> __device__ EIGEN_STRONG_INLINE +half2 psqrt(const half2& a) { + return h2sqrt(a); +} + +template<> __device__ EIGEN_STRONG_INLINE +half2 prsqrt(const half2& a) { + return h2rsqrt(a); +} + +#else + +template<> __device__ EIGEN_STRONG_INLINE half2 plog(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = logf(a1); + float r2 = logf(a2); + return __floats2half2_rn(r1, r2); +} + +template<> __device__ EIGEN_STRONG_INLINE half2 pexp(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = expf(a1); + float r2 = expf(a2); + return __floats2half2_rn(r1, r2); +} + +template<> __device__ EIGEN_STRONG_INLINE half2 psqrt(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = sqrtf(a1); + float r2 = sqrtf(a2); + return __floats2half2_rn(r1, r2); +} + +template<> __device__ EIGEN_STRONG_INLINE half2 prsqrt(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = rsqrtf(a1); + float r2 = rsqrtf(a2); + return __floats2half2_rn(r1, r2); +} + +#endif + +#elif defined EIGEN_VECTORIZE_AVX512 + +typedef struct { + __m256i x; +} Packet16h; + + +template<> struct is_arithmetic { enum { value = true }; }; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet16h type; + // There is no half-size packet for Packet16h. + typedef Packet16h half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + HasHalfPacket = 0, + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0, + HasDiv = 0, + HasSqrt = 0, + HasRsqrt = 0, + HasExp = 0, + HasLog = 0, + HasBlend = 0 + }; +}; + + +template<> struct unpacket_traits { typedef Eigen::half type; enum {size=16, alignment=Aligned32}; typedef Packet16h half; }; + +template<> EIGEN_STRONG_INLINE Packet16h pset1(const Eigen::half& from) { + Packet16h result; + result.x = _mm256_set1_epi16(from.x); + return result; +} + +template<> EIGEN_STRONG_INLINE Eigen::half pfirst(const Packet16h& from) { + return half_impl::raw_uint16_to_half(static_cast(_mm256_extract_epi16(from.x, 0))); +} + +template<> EIGEN_STRONG_INLINE Packet16h pload(const Eigen::half* from) { + Packet16h result; + result.x = _mm256_load_si256(reinterpret_cast(from)); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet16h ploadu(const Eigen::half* from) { + Packet16h result; + result.x = _mm256_loadu_si256(reinterpret_cast(from)); + return result; +} + +template<> EIGEN_STRONG_INLINE void pstore(Eigen::half* to, const Packet16h& from) { + _mm256_store_si256((__m256i*)to, from.x); +} + +template<> EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, const Packet16h& from) { + _mm256_storeu_si256((__m256i*)to, from.x); +} + +template<> EIGEN_STRONG_INLINE Packet16h +ploadquad(const Eigen::half* from) { + Packet16h result; + unsigned short a = from[0].x; + unsigned short b = from[1].x; + unsigned short c = from[2].x; + unsigned short d = from[3].x; + result.x = _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a); + return result; +} + +EIGEN_STRONG_INLINE Packet16f half2float(const Packet16h& a) { +#ifdef EIGEN_HAS_FP16_C + return _mm512_cvtph_ps(a.x); +#else + EIGEN_ALIGN64 half aux[16]; + pstore(aux, a); + float f0(aux[0]); + float f1(aux[1]); + float f2(aux[2]); + float f3(aux[3]); + float f4(aux[4]); + float f5(aux[5]); + float f6(aux[6]); + float f7(aux[7]); + float f8(aux[8]); + float f9(aux[9]); + float fa(aux[10]); + float fb(aux[11]); + float fc(aux[12]); + float fd(aux[13]); + float fe(aux[14]); + float ff(aux[15]); + + return _mm512_set_ps( + ff, fe, fd, fc, fb, fa, f9, f8, f7, f6, f5, f4, f3, f2, f1, f0); +#endif +} + +EIGEN_STRONG_INLINE Packet16h float2half(const Packet16f& a) { +#ifdef EIGEN_HAS_FP16_C + Packet16h result; + result.x = _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC); + return result; +#else + EIGEN_ALIGN64 float aux[16]; + pstore(aux, a); + half h0(aux[0]); + half h1(aux[1]); + half h2(aux[2]); + half h3(aux[3]); + half h4(aux[4]); + half h5(aux[5]); + half h6(aux[6]); + half h7(aux[7]); + half h8(aux[8]); + half h9(aux[9]); + half ha(aux[10]); + half hb(aux[11]); + half hc(aux[12]); + half hd(aux[13]); + half he(aux[14]); + half hf(aux[15]); + + Packet16h result; + result.x = _mm256_set_epi16( + hf.x, he.x, hd.x, hc.x, hb.x, ha.x, h9.x, h8.x, + h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x); + return result; +#endif +} + +template<> EIGEN_STRONG_INLINE Packet16h padd(const Packet16h& a, const Packet16h& b) { + Packet16f af = half2float(a); + Packet16f bf = half2float(b); + Packet16f rf = padd(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE Packet16h pmul(const Packet16h& a, const Packet16h& b) { + Packet16f af = half2float(a); + Packet16f bf = half2float(b); + Packet16f rf = pmul(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE half predux(const Packet16h& from) { + Packet16f from_float = half2float(from); + return half(predux(from_float)); +} + +template<> EIGEN_STRONG_INLINE Packet16h pgather(const Eigen::half* from, Index stride) +{ + Packet16h result; + result.x = _mm256_set_epi16( + from[15*stride].x, from[14*stride].x, from[13*stride].x, from[12*stride].x, + from[11*stride].x, from[10*stride].x, from[9*stride].x, from[8*stride].x, + from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x, + from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x); + return result; +} + +template<> EIGEN_STRONG_INLINE void pscatter(half* to, const Packet16h& from, Index stride) +{ + EIGEN_ALIGN64 half aux[16]; + pstore(aux, from); + to[stride*0].x = aux[0].x; + to[stride*1].x = aux[1].x; + to[stride*2].x = aux[2].x; + to[stride*3].x = aux[3].x; + to[stride*4].x = aux[4].x; + to[stride*5].x = aux[5].x; + to[stride*6].x = aux[6].x; + to[stride*7].x = aux[7].x; + to[stride*8].x = aux[8].x; + to[stride*9].x = aux[9].x; + to[stride*10].x = aux[10].x; + to[stride*11].x = aux[11].x; + to[stride*12].x = aux[12].x; + to[stride*13].x = aux[13].x; + to[stride*14].x = aux[14].x; + to[stride*15].x = aux[15].x; +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + __m256i a = kernel.packet[0].x; + __m256i b = kernel.packet[1].x; + __m256i c = kernel.packet[2].x; + __m256i d = kernel.packet[3].x; + __m256i e = kernel.packet[4].x; + __m256i f = kernel.packet[5].x; + __m256i g = kernel.packet[6].x; + __m256i h = kernel.packet[7].x; + __m256i i = kernel.packet[8].x; + __m256i j = kernel.packet[9].x; + __m256i k = kernel.packet[10].x; + __m256i l = kernel.packet[11].x; + __m256i m = kernel.packet[12].x; + __m256i n = kernel.packet[13].x; + __m256i o = kernel.packet[14].x; + __m256i p = kernel.packet[15].x; + + __m256i ab_07 = _mm256_unpacklo_epi16(a, b); + __m256i cd_07 = _mm256_unpacklo_epi16(c, d); + __m256i ef_07 = _mm256_unpacklo_epi16(e, f); + __m256i gh_07 = _mm256_unpacklo_epi16(g, h); + __m256i ij_07 = _mm256_unpacklo_epi16(i, j); + __m256i kl_07 = _mm256_unpacklo_epi16(k, l); + __m256i mn_07 = _mm256_unpacklo_epi16(m, n); + __m256i op_07 = _mm256_unpacklo_epi16(o, p); + + __m256i ab_8f = _mm256_unpackhi_epi16(a, b); + __m256i cd_8f = _mm256_unpackhi_epi16(c, d); + __m256i ef_8f = _mm256_unpackhi_epi16(e, f); + __m256i gh_8f = _mm256_unpackhi_epi16(g, h); + __m256i ij_8f = _mm256_unpackhi_epi16(i, j); + __m256i kl_8f = _mm256_unpackhi_epi16(k, l); + __m256i mn_8f = _mm256_unpackhi_epi16(m, n); + __m256i op_8f = _mm256_unpackhi_epi16(o, p); + + __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07); + __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07); + __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07); + __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07); + __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07); + __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07); + __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07); + __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07); + + __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f); + __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f); + __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f); + __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f); + __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f); + __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f); + __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f); + __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f); + + __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03); + __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03); + __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03); + __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03); + __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47); + __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47); + __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47); + __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47); + __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b); + __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b); + __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b); + __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b); + __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf); + __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf); + __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf); + __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf); + + // NOTE: no unpacklo/hi instr in this case, so using permute instr. + __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20); + __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31); + __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20); + __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31); + __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20); + __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31); + __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20); + __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31); + __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20); + __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31); + __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20); + __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31); + __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20); + __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31); + __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20); + __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31); + + kernel.packet[0].x = a_p_0; + kernel.packet[1].x = a_p_1; + kernel.packet[2].x = a_p_2; + kernel.packet[3].x = a_p_3; + kernel.packet[4].x = a_p_4; + kernel.packet[5].x = a_p_5; + kernel.packet[6].x = a_p_6; + kernel.packet[7].x = a_p_7; + kernel.packet[8].x = a_p_8; + kernel.packet[9].x = a_p_9; + kernel.packet[10].x = a_p_a; + kernel.packet[11].x = a_p_b; + kernel.packet[12].x = a_p_c; + kernel.packet[13].x = a_p_d; + kernel.packet[14].x = a_p_e; + kernel.packet[15].x = a_p_f; +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + EIGEN_ALIGN64 half in[8][16]; + pstore(in[0], kernel.packet[0]); + pstore(in[1], kernel.packet[1]); + pstore(in[2], kernel.packet[2]); + pstore(in[3], kernel.packet[3]); + pstore(in[4], kernel.packet[4]); + pstore(in[5], kernel.packet[5]); + pstore(in[6], kernel.packet[6]); + pstore(in[7], kernel.packet[7]); + + EIGEN_ALIGN64 half out[8][16]; + + for (int i = 0; i < 8; ++i) { + for (int j = 0; j < 8; ++j) { + out[i][j] = in[j][2*i]; + } + for (int j = 0; j < 8; ++j) { + out[i][j+8] = in[j][2*i+1]; + } + } + + kernel.packet[0] = pload(out[0]); + kernel.packet[1] = pload(out[1]); + kernel.packet[2] = pload(out[2]); + kernel.packet[3] = pload(out[3]); + kernel.packet[4] = pload(out[4]); + kernel.packet[5] = pload(out[5]); + kernel.packet[6] = pload(out[6]); + kernel.packet[7] = pload(out[7]); +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + EIGEN_ALIGN64 half in[4][16]; + pstore(in[0], kernel.packet[0]); + pstore(in[1], kernel.packet[1]); + pstore(in[2], kernel.packet[2]); + pstore(in[3], kernel.packet[3]); + + EIGEN_ALIGN64 half out[4][16]; + + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 4; ++j) { + out[i][j] = in[j][4*i]; + } + for (int j = 0; j < 4; ++j) { + out[i][j+4] = in[j][4*i+1]; + } + for (int j = 0; j < 4; ++j) { + out[i][j+8] = in[j][4*i+2]; + } + for (int j = 0; j < 4; ++j) { + out[i][j+12] = in[j][4*i+3]; + } + } + + kernel.packet[0] = pload(out[0]); + kernel.packet[1] = pload(out[1]); + kernel.packet[2] = pload(out[2]); + kernel.packet[3] = pload(out[3]); +} + + +#elif defined EIGEN_VECTORIZE_AVX + +typedef struct { + __m128i x; +} Packet8h; + + +template<> struct is_arithmetic { enum { value = true }; }; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet8h type; + // There is no half-size packet for Packet8h. + typedef Packet8h half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 0, + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0, + HasDiv = 0, + HasSqrt = 0, + HasRsqrt = 0, + HasExp = 0, + HasLog = 0, + HasBlend = 0 + }; +}; + + +template<> struct unpacket_traits { typedef Eigen::half type; enum {size=8, alignment=Aligned16}; typedef Packet8h half; }; + +template<> EIGEN_STRONG_INLINE Packet8h pset1(const Eigen::half& from) { + Packet8h result; + result.x = _mm_set1_epi16(from.x); + return result; +} + +template<> EIGEN_STRONG_INLINE Eigen::half pfirst(const Packet8h& from) { + return half_impl::raw_uint16_to_half(static_cast(_mm_extract_epi16(from.x, 0))); +} + +template<> EIGEN_STRONG_INLINE Packet8h pload(const Eigen::half* from) { + Packet8h result; + result.x = _mm_load_si128(reinterpret_cast(from)); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet8h ploadu(const Eigen::half* from) { + Packet8h result; + result.x = _mm_loadu_si128(reinterpret_cast(from)); + return result; +} + +template<> EIGEN_STRONG_INLINE void pstore(Eigen::half* to, const Packet8h& from) { + _mm_store_si128(reinterpret_cast<__m128i*>(to), from.x); +} + +template<> EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, const Packet8h& from) { + _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from.x); +} + +template<> EIGEN_STRONG_INLINE Packet8h +ploadquad(const Eigen::half* from) { + Packet8h result; + unsigned short a = from[0].x; + unsigned short b = from[1].x; + result.x = _mm_set_epi16(b, b, b, b, a, a, a, a); + return result; +} + +EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) { +#ifdef EIGEN_HAS_FP16_C + return _mm256_cvtph_ps(a.x); +#else + EIGEN_ALIGN32 Eigen::half aux[8]; + pstore(aux, a); + float f0(aux[0]); + float f1(aux[1]); + float f2(aux[2]); + float f3(aux[3]); + float f4(aux[4]); + float f5(aux[5]); + float f6(aux[6]); + float f7(aux[7]); + + return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0); +#endif +} + +EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) { +#ifdef EIGEN_HAS_FP16_C + Packet8h result; + result.x = _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC); + return result; +#else + EIGEN_ALIGN32 float aux[8]; + pstore(aux, a); + Eigen::half h0(aux[0]); + Eigen::half h1(aux[1]); + Eigen::half h2(aux[2]); + Eigen::half h3(aux[3]); + Eigen::half h4(aux[4]); + Eigen::half h5(aux[5]); + Eigen::half h6(aux[6]); + Eigen::half h7(aux[7]); + + Packet8h result; + result.x = _mm_set_epi16(h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x); + return result; +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet8h padd(const Packet8h& a, const Packet8h& b) { + Packet8f af = half2float(a); + Packet8f bf = half2float(b); + Packet8f rf = padd(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE Packet8h pmul(const Packet8h& a, const Packet8h& b) { + Packet8f af = half2float(a); + Packet8f bf = half2float(b); + Packet8f rf = pmul(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE Packet8h pgather(const Eigen::half* from, Index stride) +{ + Packet8h result; + result.x = _mm_set_epi16(from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x, from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x); + return result; +} + +template<> EIGEN_STRONG_INLINE void pscatter(Eigen::half* to, const Packet8h& from, Index stride) +{ + EIGEN_ALIGN32 Eigen::half aux[8]; + pstore(aux, from); + to[stride*0].x = aux[0].x; + to[stride*1].x = aux[1].x; + to[stride*2].x = aux[2].x; + to[stride*3].x = aux[3].x; + to[stride*4].x = aux[4].x; + to[stride*5].x = aux[5].x; + to[stride*6].x = aux[6].x; + to[stride*7].x = aux[7].x; +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_max(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux_max(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_min(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux_min(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_mul(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux_mul(af); + return Eigen::half(reduced); +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + __m128i a = kernel.packet[0].x; + __m128i b = kernel.packet[1].x; + __m128i c = kernel.packet[2].x; + __m128i d = kernel.packet[3].x; + __m128i e = kernel.packet[4].x; + __m128i f = kernel.packet[5].x; + __m128i g = kernel.packet[6].x; + __m128i h = kernel.packet[7].x; + + __m128i a03b03 = _mm_unpacklo_epi16(a, b); + __m128i c03d03 = _mm_unpacklo_epi16(c, d); + __m128i e03f03 = _mm_unpacklo_epi16(e, f); + __m128i g03h03 = _mm_unpacklo_epi16(g, h); + __m128i a47b47 = _mm_unpackhi_epi16(a, b); + __m128i c47d47 = _mm_unpackhi_epi16(c, d); + __m128i e47f47 = _mm_unpackhi_epi16(e, f); + __m128i g47h47 = _mm_unpackhi_epi16(g, h); + + __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03); + __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03); + __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03); + __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03); + __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47); + __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47); + __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47); + __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47); + + __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01); + __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01); + __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23); + __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23); + __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45); + __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45); + __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67); + __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67); + + kernel.packet[0].x = a0b0c0d0e0f0g0h0; + kernel.packet[1].x = a1b1c1d1e1f1g1h1; + kernel.packet[2].x = a2b2c2d2e2f2g2h2; + kernel.packet[3].x = a3b3c3d3e3f3g3h3; + kernel.packet[4].x = a4b4c4d4e4f4g4h4; + kernel.packet[5].x = a5b5c5d5e5f5g5h5; + kernel.packet[6].x = a6b6c6d6e6f6g6h6; + kernel.packet[7].x = a7b7c7d7e7f7g7h7; +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + EIGEN_ALIGN32 Eigen::half in[4][8]; + pstore(in[0], kernel.packet[0]); + pstore(in[1], kernel.packet[1]); + pstore(in[2], kernel.packet[2]); + pstore(in[3], kernel.packet[3]); + + EIGEN_ALIGN32 Eigen::half out[4][8]; + + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 4; ++j) { + out[i][j] = in[j][2*i]; + } + for (int j = 0; j < 4; ++j) { + out[i][j+4] = in[j][2*i+1]; + } + } + + kernel.packet[0] = pload(out[0]); + kernel.packet[1] = pload(out[1]); + kernel.packet[2] = pload(out[2]); + kernel.packet[3] = pload(out[3]); +} + + +// Disable the following code since it's broken on too many platforms / compilers. +//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC) +#elif 0 + +typedef struct { + __m64 x; +} Packet4h; + + +template<> struct is_arithmetic { enum { value = true }; }; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet4h type; + // There is no half-size packet for Packet4h. + typedef Packet4h half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0, + HasDiv = 0, + HasSqrt = 0, + HasRsqrt = 0, + HasExp = 0, + HasLog = 0, + HasBlend = 0 + }; +}; + + +template<> struct unpacket_traits { typedef Eigen::half type; enum {size=4, alignment=Aligned16}; typedef Packet4h half; }; + +template<> EIGEN_STRONG_INLINE Packet4h pset1(const Eigen::half& from) { + Packet4h result; + result.x = _mm_set1_pi16(from.x); + return result; +} + +template<> EIGEN_STRONG_INLINE Eigen::half pfirst(const Packet4h& from) { + return half_impl::raw_uint16_to_half(static_cast(_mm_cvtsi64_si32(from.x))); +} + +template<> EIGEN_STRONG_INLINE Packet4h pconj(const Packet4h& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet4h padd(const Packet4h& a, const Packet4h& b) { + __int64_t a64 = _mm_cvtm64_si64(a.x); + __int64_t b64 = _mm_cvtm64_si64(b.x); + + Eigen::half h[4]; + + Eigen::half ha = half_impl::raw_uint16_to_half(static_cast(a64)); + Eigen::half hb = half_impl::raw_uint16_to_half(static_cast(b64)); + h[0] = ha + hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 16)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 16)); + h[1] = ha + hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 32)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 32)); + h[2] = ha + hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 48)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 48)); + h[3] = ha + hb; + Packet4h result; + result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4h pmul(const Packet4h& a, const Packet4h& b) { + __int64_t a64 = _mm_cvtm64_si64(a.x); + __int64_t b64 = _mm_cvtm64_si64(b.x); + + Eigen::half h[4]; + + Eigen::half ha = half_impl::raw_uint16_to_half(static_cast(a64)); + Eigen::half hb = half_impl::raw_uint16_to_half(static_cast(b64)); + h[0] = ha * hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 16)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 16)); + h[1] = ha * hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 32)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 32)); + h[2] = ha * hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 48)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 48)); + h[3] = ha * hb; + Packet4h result; + result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4h pload(const Eigen::half* from) { + Packet4h result; + result.x = _mm_cvtsi64_m64(*reinterpret_cast(from)); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4h ploadu(const Eigen::half* from) { + Packet4h result; + result.x = _mm_cvtsi64_m64(*reinterpret_cast(from)); + return result; +} + +template<> EIGEN_STRONG_INLINE void pstore(Eigen::half* to, const Packet4h& from) { + __int64_t r = _mm_cvtm64_si64(from.x); + *(reinterpret_cast<__int64_t*>(to)) = r; +} + +template<> EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, const Packet4h& from) { + __int64_t r = _mm_cvtm64_si64(from.x); + *(reinterpret_cast<__int64_t*>(to)) = r; +} + +template<> EIGEN_STRONG_INLINE Packet4h +ploadquad(const Eigen::half* from) { + return pset1(*from); +} + +template<> EIGEN_STRONG_INLINE Packet4h pgather(const Eigen::half* from, Index stride) +{ + Packet4h result; + result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x); + return result; +} + +template<> EIGEN_STRONG_INLINE void pscatter(Eigen::half* to, const Packet4h& from, Index stride) +{ + __int64_t a = _mm_cvtm64_si64(from.x); + to[stride*0].x = static_cast(a); + to[stride*1].x = static_cast(a >> 16); + to[stride*2].x = static_cast(a >> 32); + to[stride*3].x = static_cast(a >> 48); +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + __m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x); + __m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x); + __m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x); + __m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x); + + kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1); + kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1); + kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3); + kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3); +} + +#endif + +} +} + +#endif // EIGEN_PACKET_MATH_HALF_CUDA_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/TypeCasting.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/TypeCasting.h new file mode 100644 index 000000000..aa5fbce8e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/CUDA/TypeCasting.h @@ -0,0 +1,212 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TYPE_CASTING_CUDA_H +#define EIGEN_TYPE_CASTING_CUDA_H + +namespace Eigen { + +namespace internal { + +template<> +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef Eigen::half result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const float& a) const { + #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 + return __float2half(a); + #else + return Eigen::half(a); + #endif + } +}; + +template<> +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + + +template<> +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef Eigen::half result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const int& a) const { + #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 + return __float2half(static_cast(a)); + #else + return Eigen::half(static_cast(a)); + #endif + } +}; + +template<> +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + + +template<> +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef float result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::half& a) const { + #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 + return __half2float(a); + #else + return static_cast(a); + #endif + } +}; + +template<> +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + + + +#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 2, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcast(const half2& a, const half2& b) { + float2 r1 = __half22float2(a); + float2 r2 = __half22float2(b); + return make_float4(r1.x, r1.y, r2.x, r2.y); +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 2 + }; +}; + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcast(const float4& a) { + // Simply discard the second half of the input + return __floats2half2_rn(a.x, a.y); +} + +#elif defined EIGEN_VECTORIZE_AVX512 +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet16f pcast(const Packet16h& a) { + return half2float(a); +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet16h pcast(const Packet16f& a) { + return float2half(a); +} + +#elif defined EIGEN_VECTORIZE_AVX + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet8f pcast(const Packet8h& a) { + return half2float(a); +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet8h pcast(const Packet8f& a) { + return float2half(a); +} + +// Disable the following code since it's broken on too many platforms / compilers. +//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC) +#elif 0 + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet4f pcast(const Packet4h& a) { + __int64_t a64 = _mm_cvtm64_si64(a.x); + Eigen::half h = raw_uint16_to_half(static_cast(a64)); + float f1 = static_cast(h); + h = raw_uint16_to_half(static_cast(a64 >> 16)); + float f2 = static_cast(h); + h = raw_uint16_to_half(static_cast(a64 >> 32)); + float f3 = static_cast(h); + h = raw_uint16_to_half(static_cast(a64 >> 48)); + float f4 = static_cast(h); + return _mm_set_ps(f4, f3, f2, f1); +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet4h pcast(const Packet4f& a) { + EIGEN_ALIGN16 float aux[4]; + pstore(aux, a); + Eigen::half h0(aux[0]); + Eigen::half h1(aux[1]); + Eigen::half h2(aux[2]); + Eigen::half h3(aux[3]); + + Packet4h result; + result.x = _mm_set_pi16(h3.x, h2.x, h1.x, h0.x); + return result; +} + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TYPE_CASTING_CUDA_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/Default/ConjHelper.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/Default/ConjHelper.h new file mode 100644 index 000000000..4cfe34e05 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/Default/ConjHelper.h @@ -0,0 +1,29 @@ + +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARCH_CONJ_HELPER_H +#define EIGEN_ARCH_CONJ_HELPER_H + +#define EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(PACKET_CPLX, PACKET_REAL) \ + template<> struct conj_helper { \ + EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_REAL& x, const PACKET_CPLX& y, const PACKET_CPLX& c) const \ + { return padd(c, pmul(x,y)); } \ + EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_REAL& x, const PACKET_CPLX& y) const \ + { return PACKET_CPLX(Eigen::internal::pmul(x, y.v)); } \ + }; \ + \ + template<> struct conj_helper { \ + EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_CPLX& x, const PACKET_REAL& y, const PACKET_CPLX& c) const \ + { return padd(c, pmul(x,y)); } \ + EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_CPLX& x, const PACKET_REAL& y) const \ + { return PACKET_CPLX(Eigen::internal::pmul(x.v, y)); } \ + }; + +#endif // EIGEN_ARCH_CONJ_HELPER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/Default/Settings.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/Default/Settings.h new file mode 100644 index 000000000..097373c84 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/Default/Settings.h @@ -0,0 +1,49 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +/* All the parameters defined in this file can be specialized in the + * architecture specific files, and/or by the user. + * More to come... */ + +#ifndef EIGEN_DEFAULT_SETTINGS_H +#define EIGEN_DEFAULT_SETTINGS_H + +/** Defines the maximal loop size to enable meta unrolling of loops. + * Note that the value here is expressed in Eigen's own notion of "number of FLOPS", + * it does not correspond to the number of iterations or the number of instructions + */ +#ifndef EIGEN_UNROLLING_LIMIT +#define EIGEN_UNROLLING_LIMIT 100 +#endif + +/** Defines the threshold between a "small" and a "large" matrix. + * This threshold is mainly used to select the proper product implementation. + */ +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +/** Defines the maximal width of the blocks used in the triangular product and solver + * for vectors (level 2 blas xTRMV and xTRSV). The default is 8. + */ +#ifndef EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH +#define EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH 8 +#endif + + +/** Defines the default number of registers available for that architecture. + * Currently it must be 8 or 16. Other values will fail. + */ +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8 +#endif + +#endif // EIGEN_DEFAULT_SETTINGS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/NEON/Complex.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/NEON/Complex.h new file mode 100644 index 000000000..306a309be --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/NEON/Complex.h @@ -0,0 +1,490 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2010 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_NEON_H +#define EIGEN_COMPLEX_NEON_H + +namespace Eigen { + +namespace internal { + +inline uint32x4_t p4ui_CONJ_XOR() { +// See bug 1325, clang fails to call vld1q_u64. +#if EIGEN_COMP_CLANG + uint32x4_t ret = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; + return ret; +#else + static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; + return vld1q_u32( conj_XOR_DATA ); +#endif +} + +inline uint32x2_t p2ui_CONJ_XOR() { + static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000 }; + return vld1_u32( conj_XOR_DATA ); +} + +//---------- float ---------- +struct Packet2cf +{ + EIGEN_STRONG_INLINE Packet2cf() {} + EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {} + Packet4f v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet2cf type; + typedef Packet2cf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { typedef std::complex type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; }; + +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) +{ + float32x2_t r64; + r64 = vld1_f32((const float *)&from); + + return Packet2cf(vcombine_f32(r64, r64)); +} + +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) +{ + Packet4ui b = vreinterpretq_u32_f32(a.v); + return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR()))); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) +{ + Packet4f v1, v2; + + // Get the real values of a | a1_re | a1_re | a2_re | a2_re | + v1 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 0), vdup_lane_f32(vget_high_f32(a.v), 0)); + // Get the imag values of a | a1_im | a1_im | a2_im | a2_im | + v2 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 1), vdup_lane_f32(vget_high_f32(a.v), 1)); + // Multiply the real a with b + v1 = vmulq_f32(v1, b.v); + // Multiply the imag a with b + v2 = vmulq_f32(v2, b.v); + // Conjugate v2 + v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR())); + // Swap real/imag elements in v2. + v2 = vrev64q_f32(v2); + // Add and return the result + return Packet2cf(vaddq_f32(v1, v2)); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pand (const Packet2cf& a, const Packet2cf& b) +{ + return Packet2cf(vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v)))); +} +template<> EIGEN_STRONG_INLINE Packet2cf por (const Packet2cf& a, const Packet2cf& b) +{ + return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v)))); +} +template<> EIGEN_STRONG_INLINE Packet2cf pxor (const Packet2cf& a, const Packet2cf& b) +{ + return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v)))); +} +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) +{ + return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v)))); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pload(const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload((const float*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu((const float*)from)); } + +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); } + +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, Index stride) +{ + Packet4f res = pset1(0.f); + res = vsetq_lane_f32(std::real(from[0*stride]), res, 0); + res = vsetq_lane_f32(std::imag(from[0*stride]), res, 1); + res = vsetq_lane_f32(std::real(from[1*stride]), res, 2); + res = vsetq_lane_f32(std::imag(from[1*stride]), res, 3); + return Packet2cf(res); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, Index stride) +{ + to[stride*0] = std::complex(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1)); + to[stride*1] = std::complex(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3)); +} + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_ARM_PREFETCH((const float *)addr); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) +{ + std::complex EIGEN_ALIGN16 x[2]; + vst1q_f32((float *)x, a.v); + return x[0]; +} + +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) +{ + float32x2_t a_lo, a_hi; + Packet4f a_r128; + + a_lo = vget_low_f32(a.v); + a_hi = vget_high_f32(a.v); + a_r128 = vcombine_f32(a_hi, a_lo); + + return Packet2cf(a_r128); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip(const Packet2cf& a) +{ + return Packet2cf(vrev64q_f32(a.v)); +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) +{ + float32x2_t a1, a2; + std::complex s; + + a1 = vget_low_f32(a.v); + a2 = vget_high_f32(a.v); + a2 = vadd_f32(a1, a2); + vst1_f32((float *)&s, a2); + + return s; +} + +template<> EIGEN_STRONG_INLINE Packet2cf preduxp(const Packet2cf* vecs) +{ + Packet4f sum1, sum2, sum; + + // Add the first two 64-bit float32x2_t of vecs[0] + sum1 = vcombine_f32(vget_low_f32(vecs[0].v), vget_low_f32(vecs[1].v)); + sum2 = vcombine_f32(vget_high_f32(vecs[0].v), vget_high_f32(vecs[1].v)); + sum = vaddq_f32(sum1, sum2); + + return Packet2cf(sum); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) +{ + float32x2_t a1, a2, v1, v2, prod; + std::complex s; + + a1 = vget_low_f32(a.v); + a2 = vget_high_f32(a.v); + // Get the real values of a | a1_re | a1_re | a2_re | a2_re | + v1 = vdup_lane_f32(a1, 0); + // Get the real values of a | a1_im | a1_im | a2_im | a2_im | + v2 = vdup_lane_f32(a1, 1); + // Multiply the real a with b + v1 = vmul_f32(v1, a2); + // Multiply the imag a with b + v2 = vmul_f32(v2, a2); + // Conjugate v2 + v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR())); + // Swap real/imag elements in v2. + v2 = vrev64_f32(v2); + // Add v1, v2 + prod = vadd_f32(v1, v2); + + vst1_f32((float *)&s, prod); + + return s; +} + +template +struct palign_impl +{ + EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second) + { + if (Offset==1) + { + first.v = vextq_f32(first.v, second.v, 2); + } + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + return internal::pmul(a, pconj(b)); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + return internal::pmul(pconj(a), b); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + return pconj(internal::pmul(a, b)); + } +}; + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f) + +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) +{ + // TODO optimize it for NEON + Packet2cf res = conj_helper().pmul(a,b); + Packet4f s, rev_s; + + // this computes the norm + s = vmulq_f32(b.v, b.v); + rev_s = vrev64q_f32(s); + + return Packet2cf(pdiv(res.v, vaddq_f32(s,rev_s))); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet4f tmp = vcombine_f32(vget_high_f32(kernel.packet[0].v), vget_high_f32(kernel.packet[1].v)); + kernel.packet[0].v = vcombine_f32(vget_low_f32(kernel.packet[0].v), vget_low_f32(kernel.packet[1].v)); + kernel.packet[1].v = tmp; +} + +//---------- double ---------- +#if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG + +// See bug 1325, clang fails to call vld1q_u64. +#if EIGEN_COMP_CLANG + static uint64x2_t p2ul_CONJ_XOR = {0x0, 0x8000000000000000}; +#else + const uint64_t p2ul_conj_XOR_DATA[] = { 0x0, 0x8000000000000000 }; + static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA ); +#endif + +struct Packet1cd +{ + EIGEN_STRONG_INLINE Packet1cd() {} + EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {} + Packet2d v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet1cd type; + typedef Packet1cd half; + enum { + Vectorizable = 1, + AlignedOnScalar = 0, + size = 1, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { typedef std::complex type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; }; + +template<> EIGEN_STRONG_INLINE Packet1cd pload(const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu((const double*)from)); } + +template<> EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) +{ /* here we really have to use unaligned loads :( */ return ploadu(&from); } + +template<> EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(padd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(psub(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v), p2ul_CONJ_XOR))); } + +template<> EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) +{ + Packet2d v1, v2; + + // Get the real values of a + v1 = vdupq_lane_f64(vget_low_f64(a.v), 0); + // Get the imag values of a + v2 = vdupq_lane_f64(vget_high_f64(a.v), 0); + // Multiply the real a with b + v1 = vmulq_f64(v1, b.v); + // Multiply the imag a with b + v2 = vmulq_f64(v2, b.v); + // Conjugate v2 + v2 = vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(v2), p2ul_CONJ_XOR)); + // Swap real/imag elements in v2. + v2 = preverse(v2); + // Add and return the result + return Packet1cd(vaddq_f64(v1, v2)); +} + +template<> EIGEN_STRONG_INLINE Packet1cd pand (const Packet1cd& a, const Packet1cd& b) +{ + return Packet1cd(vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); +} +template<> EIGEN_STRONG_INLINE Packet1cd por (const Packet1cd& a, const Packet1cd& b) +{ + return Packet1cd(vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); +} +template<> EIGEN_STRONG_INLINE Packet1cd pxor (const Packet1cd& a, const Packet1cd& b) +{ + return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); +} +template<> EIGEN_STRONG_INLINE Packet1cd pandnot(const Packet1cd& a, const Packet1cd& b) +{ + return Packet1cd(vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); +} + +template<> EIGEN_STRONG_INLINE Packet1cd ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_ARM_PREFETCH((const double *)addr); } + +template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>(const std::complex* from, Index stride) +{ + Packet2d res = pset1(0.0); + res = vsetq_lane_f64(std::real(from[0*stride]), res, 0); + res = vsetq_lane_f64(std::imag(from[0*stride]), res, 1); + return Packet1cd(res); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>(std::complex* to, const Packet1cd& from, Index stride) +{ + to[stride*0] = std::complex(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1)); +} + + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cd& a) +{ + std::complex EIGEN_ALIGN16 res; + pstore >(&res, a); + + return res; +} + +template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; } + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet1cd& a) { return pfirst(a); } + +template<> EIGEN_STRONG_INLINE Packet1cd preduxp(const Packet1cd* vecs) { return vecs[0]; } + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cd& a) { return pfirst(a); } + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/) + { + // FIXME is it sure we never have to align a Packet1cd? + // Even though a std::complex has 16 bytes, it is not necessarily aligned on a 16 bytes boundary... + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + return internal::pmul(a, pconj(b)); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + return internal::pmul(pconj(a), b); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + return pconj(internal::pmul(a, b)); + } +}; + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d) + +template<> EIGEN_STRONG_INLINE Packet1cd pdiv(const Packet1cd& a, const Packet1cd& b) +{ + // TODO optimize it for NEON + Packet1cd res = conj_helper().pmul(a,b); + Packet2d s = pmul(b.v, b.v); + Packet2d rev_s = preverse(s); + + return Packet1cd(pdiv(res.v, padd(s,rev_s))); +} + +EIGEN_STRONG_INLINE Packet1cd pcplxflip/**/(const Packet1cd& x) +{ + return Packet1cd(preverse(Packet2d(x.v))); +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet2d tmp = vcombine_f64(vget_high_f64(kernel.packet[0].v), vget_high_f64(kernel.packet[1].v)); + kernel.packet[0].v = vcombine_f64(vget_low_f64(kernel.packet[0].v), vget_low_f64(kernel.packet[1].v)); + kernel.packet[1].v = tmp; +} +#endif // EIGEN_ARCH_ARM64 + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_NEON_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/NEON/MathFunctions.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/NEON/MathFunctions.h new file mode 100644 index 000000000..6bb05bb92 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/NEON/MathFunctions.h @@ -0,0 +1,91 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* The sin, cos, exp, and log functions of this file come from + * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ + */ + +#ifndef EIGEN_MATH_FUNCTIONS_NEON_H +#define EIGEN_MATH_FUNCTIONS_NEON_H + +namespace Eigen { + +namespace internal { + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pexp(const Packet4f& _x) +{ + Packet4f x = _x; + Packet4f tmp, fx; + + _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); + _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); + _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f); + _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f); + _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f); + + x = vminq_f32(x, p4f_exp_hi); + x = vmaxq_f32(x, p4f_exp_lo); + + /* express exp(x) as exp(g + n*log(2)) */ + fx = vmlaq_f32(p4f_half, x, p4f_cephes_LOG2EF); + + /* perform a floorf */ + tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx)); + + /* if greater, substract 1 */ + Packet4ui mask = vcgtq_f32(tmp, fx); + mask = vandq_u32(mask, vreinterpretq_u32_f32(p4f_1)); + + fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask)); + + tmp = vmulq_f32(fx, p4f_cephes_exp_C1); + Packet4f z = vmulq_f32(fx, p4f_cephes_exp_C2); + x = vsubq_f32(x, tmp); + x = vsubq_f32(x, z); + + Packet4f y = vmulq_f32(p4f_cephes_exp_p0, x); + z = vmulq_f32(x, x); + y = vaddq_f32(y, p4f_cephes_exp_p1); + y = vmulq_f32(y, x); + y = vaddq_f32(y, p4f_cephes_exp_p2); + y = vmulq_f32(y, x); + y = vaddq_f32(y, p4f_cephes_exp_p3); + y = vmulq_f32(y, x); + y = vaddq_f32(y, p4f_cephes_exp_p4); + y = vmulq_f32(y, x); + y = vaddq_f32(y, p4f_cephes_exp_p5); + + y = vmulq_f32(y, z); + y = vaddq_f32(y, x); + y = vaddq_f32(y, p4f_1); + + /* build 2^n */ + int32x4_t mm; + mm = vcvtq_s32_f32(fx); + mm = vaddq_s32(mm, p4i_0x7f); + mm = vshlq_n_s32(mm, 23); + Packet4f pow2n = vreinterpretq_f32_s32(mm); + + y = vmulq_f32(y, pow2n); + return y; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_NEON_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/NEON/PacketMath.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/NEON/PacketMath.h new file mode 100644 index 000000000..3d5ed0d24 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/NEON/PacketMath.h @@ -0,0 +1,760 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2010 Konstantinos Margaritis +// Heavily based on Gael's SSE version. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_NEON_H +#define EIGEN_PACKET_MATH_NEON_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD +#endif + +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#if EIGEN_ARCH_ARM64 +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 +#else +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 +#endif +#endif + +#if EIGEN_COMP_MSVC + +// In MSVC's arm_neon.h header file, all NEON vector types +// are aliases to the same underlying type __n128. +// We thus have to wrap them to make them different C++ types. +// (See also bug 1428) + +template +struct eigen_packet_wrapper +{ + operator T&() { return m_val; } + operator const T&() const { return m_val; } + eigen_packet_wrapper() {} + eigen_packet_wrapper(const T &v) : m_val(v) {} + eigen_packet_wrapper& operator=(const T &v) { + m_val = v; + return *this; + } + + T m_val; +}; +typedef eigen_packet_wrapper Packet2f; +typedef eigen_packet_wrapper Packet4f; +typedef eigen_packet_wrapper Packet4i; +typedef eigen_packet_wrapper Packet2i; +typedef eigen_packet_wrapper Packet4ui; + +#else + +typedef float32x2_t Packet2f; +typedef float32x4_t Packet4f; +typedef int32x4_t Packet4i; +typedef int32x2_t Packet2i; +typedef uint32x4_t Packet4ui; + +#endif // EIGEN_COMP_MSVC + +#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ + const Packet4f p4f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ + const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1(X)) + +#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ + const Packet4i p4i_##NAME = pset1(X) + +#if EIGEN_ARCH_ARM64 + // __builtin_prefetch tends to do nothing on ARM64 compilers because the + // prefetch instructions there are too detailed for __builtin_prefetch to map + // meaningfully to them. + #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__("prfm pldl1keep, [%[addr]]\n" ::[addr] "r"(ADDR) : ); +#elif EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC + #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR); +#elif defined __pld + #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR) +#elif EIGEN_ARCH_ARM32 + #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ("pld [%[addr]]\n" :: [addr] "r" (ADDR) : ); +#else + // by default no explicit prefetching + #define EIGEN_ARM_PREFETCH(ADDR) +#endif + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4f type; + typedef Packet4f half; // Packet2f intrinsics not implemented yet + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket=0, // Packet2f intrinsics not implemented yet + + HasDiv = 1, + // FIXME check the Has* + HasSin = 0, + HasCos = 0, + HasLog = 0, + HasExp = 1, + HasSqrt = 0 + }; +}; +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4i type; + typedef Packet4i half; // Packet2i intrinsics not implemented yet + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=4, + HasHalfPacket=0 // Packet2i intrinsics not implemented yet + // FIXME check the Has* + }; +}; + +#if EIGEN_GNUC_AT_MOST(4,4) && !EIGEN_COMP_LLVM +// workaround gcc 4.2, 4.3 and 4.4 compilatin issue +EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); } +EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); } +EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32 (const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); } +EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); } +EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); } +#endif + +template<> struct unpacket_traits { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; }; +template<> struct unpacket_traits { typedef int32_t type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; }; + +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return vdupq_n_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int32_t& from) { return vdupq_n_s32(from); } + +template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) +{ + const float f[] = {0, 1, 2, 3}; + Packet4f countdown = vld1q_f32(f); + return vaddq_f32(pset1(a), countdown); +} +template<> EIGEN_STRONG_INLINE Packet4i plset(const int32_t& a) +{ + const int32_t i[] = {0, 1, 2, 3}; + Packet4i countdown = vld1q_s32(i); + return vaddq_s32(pset1(a), countdown); +} + +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); } +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); } + +template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) +{ +#if EIGEN_ARCH_ARM64 + return vdivq_f32(a,b); +#else + Packet4f inv, restep, div; + + // NEON does not offer a divide instruction, we have to do a reciprocal approximation + // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers + // a reciprocal estimate AND a reciprocal step -which saves a few instructions + // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with + // Newton-Raphson and vrecpsq_f32() + inv = vrecpeq_f32(b); + + // This returns a differential, by which we will have to multiply inv to get a better + // approximation of 1/b. + restep = vrecpsq_f32(b, inv); + inv = vmulq_f32(restep, inv); + + // Finally, multiply a by 1/b and get the wanted result of the division. + div = vmulq_f32(a, inv); + + return div; +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) +{ eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} + +// Clang/ARM wrongly advertises __ARM_FEATURE_FMA even when it's not available, +// then implements a slow software scalar fallback calling fmaf()! +// Filed LLVM bug: +// https://llvm.org/bugs/show_bug.cgi?id=27216 +#if (defined __ARM_FEATURE_FMA) && !(EIGEN_COMP_CLANG && EIGEN_ARCH_ARM) +// See bug 936. +// FMA is available on VFPv4 i.e. when compiling with -mfpu=neon-vfpv4. +// FMA is a true fused multiply-add i.e. only 1 rounding at the end, no intermediate rounding. +// MLA is not fused i.e. does 2 roundings. +// In addition to giving better accuracy, FMA also gives better performance here on a Krait (Nexus 4): +// MLA: 10 GFlop/s ; FMA: 12 GFlops/s. +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vfmaq_f32(c,a,b); } +#else +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { +#if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM + // Clang/ARM will replace VMLA by VMUL+VADD at least for some values of -mcpu, + // at least -mcpu=cortex-a8 and -mcpu=cortex-a7. Since the former is the default on + // -march=armv7-a, that is a very common case. + // See e.g. this thread: + // http://lists.llvm.org/pipermail/llvm-dev/2013-December/068806.html + // Filed LLVM bug: + // https://llvm.org/bugs/show_bug.cgi?id=27219 + Packet4f r = c; + asm volatile( + "vmla.f32 %q[r], %q[a], %q[b]" + : [r] "+w" (r) + : [a] "w" (a), + [b] "w" (b) + : ); + return r; +#else + return vmlaq_f32(c,a,b); +#endif +} +#endif + +// No FMA instruction for int, so use MLA unconditionally. +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); } + +// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) +{ + return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); +} +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) +{ + return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); +} +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) +{ + return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); +} +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) +{ + return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); +} +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4i pload(const int32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); } + +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int32_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); } + +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) +{ + float32x2_t lo, hi; + lo = vld1_dup_f32(from); + hi = vld1_dup_f32(from+1); + return vcombine_f32(lo, hi); +} +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int32_t* from) +{ + int32x2_t lo, hi; + lo = vld1_dup_s32(from); + hi = vld1_dup_s32(from+1); + return vcombine_s32(lo, hi); +} + +template<> EIGEN_STRONG_INLINE void pstore (float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); } + +template<> EIGEN_STRONG_INLINE void pstoreu (float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); } + +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) +{ + Packet4f res = pset1(0.f); + res = vsetq_lane_f32(from[0*stride], res, 0); + res = vsetq_lane_f32(from[1*stride], res, 1); + res = vsetq_lane_f32(from[2*stride], res, 2); + res = vsetq_lane_f32(from[3*stride], res, 3); + return res; +} +template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int32_t* from, Index stride) +{ + Packet4i res = pset1(0); + res = vsetq_lane_s32(from[0*stride], res, 0); + res = vsetq_lane_s32(from[1*stride], res, 1); + res = vsetq_lane_s32(from[2*stride], res, 2); + res = vsetq_lane_s32(from[3*stride], res, 3); + return res; +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) +{ + to[stride*0] = vgetq_lane_f32(from, 0); + to[stride*1] = vgetq_lane_f32(from, 1); + to[stride*2] = vgetq_lane_f32(from, 2); + to[stride*3] = vgetq_lane_f32(from, 3); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(int32_t* to, const Packet4i& from, Index stride) +{ + to[stride*0] = vgetq_lane_s32(from, 0); + to[stride*1] = vgetq_lane_s32(from, 1); + to[stride*2] = vgetq_lane_s32(from, 2); + to[stride*3] = vgetq_lane_s32(from, 3); +} + +template<> EIGEN_STRONG_INLINE void prefetch (const float* addr) { EIGEN_ARM_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); } + +// FIXME only store the 2 first elements ? +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; } +template<> EIGEN_STRONG_INLINE int32_t pfirst(const Packet4i& a) { int32_t EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; } + +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { + float32x2_t a_lo, a_hi; + Packet4f a_r64; + + a_r64 = vrev64q_f32(a); + a_lo = vget_low_f32(a_r64); + a_hi = vget_high_f32(a_r64); + return vcombine_f32(a_hi, a_lo); +} +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { + int32x2_t a_lo, a_hi; + Packet4i a_r64; + + a_r64 = vrev64q_s32(a); + a_lo = vget_low_s32(a_r64); + a_hi = vget_high_s32(a_r64); + return vcombine_s32(a_hi, a_lo); +} + +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); } +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); } + +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) +{ + float32x2_t a_lo, a_hi, sum; + + a_lo = vget_low_f32(a); + a_hi = vget_high_f32(a); + sum = vpadd_f32(a_lo, a_hi); + sum = vpadd_f32(sum, sum); + return vget_lane_f32(sum, 0); +} + +template<> EIGEN_STRONG_INLINE Packet4f preduxp(const Packet4f* vecs) +{ + float32x4x2_t vtrn1, vtrn2, res1, res2; + Packet4f sum1, sum2, sum; + + // NEON zip performs interleaving of the supplied vectors. + // We perform two interleaves in a row to acquire the transposed vector + vtrn1 = vzipq_f32(vecs[0], vecs[2]); + vtrn2 = vzipq_f32(vecs[1], vecs[3]); + res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]); + res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]); + + // Do the addition of the resulting vectors + sum1 = vaddq_f32(res1.val[0], res1.val[1]); + sum2 = vaddq_f32(res2.val[0], res2.val[1]); + sum = vaddq_f32(sum1, sum2); + + return sum; +} + +template<> EIGEN_STRONG_INLINE int32_t predux(const Packet4i& a) +{ + int32x2_t a_lo, a_hi, sum; + + a_lo = vget_low_s32(a); + a_hi = vget_high_s32(a); + sum = vpadd_s32(a_lo, a_hi); + sum = vpadd_s32(sum, sum); + return vget_lane_s32(sum, 0); +} + +template<> EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) +{ + int32x4x2_t vtrn1, vtrn2, res1, res2; + Packet4i sum1, sum2, sum; + + // NEON zip performs interleaving of the supplied vectors. + // We perform two interleaves in a row to acquire the transposed vector + vtrn1 = vzipq_s32(vecs[0], vecs[2]); + vtrn2 = vzipq_s32(vecs[1], vecs[3]); + res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]); + res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]); + + // Do the addition of the resulting vectors + sum1 = vaddq_s32(res1.val[0], res1.val[1]); + sum2 = vaddq_s32(res2.val[0], res2.val[1]); + sum = vaddq_s32(sum1, sum2); + + return sum; +} + +// Other reduction functions: +// mul +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) +{ + float32x2_t a_lo, a_hi, prod; + + // Get a_lo = |a1|a2| and a_hi = |a3|a4| + a_lo = vget_low_f32(a); + a_hi = vget_high_f32(a); + // Get the product of a_lo * a_hi -> |a1*a3|a2*a4| + prod = vmul_f32(a_lo, a_hi); + // Multiply prod with its swapped value |a2*a4|a1*a3| + prod = vmul_f32(prod, vrev64_f32(prod)); + + return vget_lane_f32(prod, 0); +} +template<> EIGEN_STRONG_INLINE int32_t predux_mul(const Packet4i& a) +{ + int32x2_t a_lo, a_hi, prod; + + // Get a_lo = |a1|a2| and a_hi = |a3|a4| + a_lo = vget_low_s32(a); + a_hi = vget_high_s32(a); + // Get the product of a_lo * a_hi -> |a1*a3|a2*a4| + prod = vmul_s32(a_lo, a_hi); + // Multiply prod with its swapped value |a2*a4|a1*a3| + prod = vmul_s32(prod, vrev64_s32(prod)); + + return vget_lane_s32(prod, 0); +} + +// min +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) +{ + float32x2_t a_lo, a_hi, min; + + a_lo = vget_low_f32(a); + a_hi = vget_high_f32(a); + min = vpmin_f32(a_lo, a_hi); + min = vpmin_f32(min, min); + + return vget_lane_f32(min, 0); +} + +template<> EIGEN_STRONG_INLINE int32_t predux_min(const Packet4i& a) +{ + int32x2_t a_lo, a_hi, min; + + a_lo = vget_low_s32(a); + a_hi = vget_high_s32(a); + min = vpmin_s32(a_lo, a_hi); + min = vpmin_s32(min, min); + + return vget_lane_s32(min, 0); +} + +// max +template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) +{ + float32x2_t a_lo, a_hi, max; + + a_lo = vget_low_f32(a); + a_hi = vget_high_f32(a); + max = vpmax_f32(a_lo, a_hi); + max = vpmax_f32(max, max); + + return vget_lane_f32(max, 0); +} + +template<> EIGEN_STRONG_INLINE int32_t predux_max(const Packet4i& a) +{ + int32x2_t a_lo, a_hi, max; + + a_lo = vget_low_s32(a); + a_hi = vget_high_s32(a); + max = vpmax_s32(a_lo, a_hi); + max = vpmax_s32(max, max); + + return vget_lane_s32(max, 0); +} + +// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, +// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 +#define PALIGN_NEON(Offset,Type,Command) \ +template<>\ +struct palign_impl\ +{\ + EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ + {\ + if (Offset!=0)\ + first = Command(first, second, Offset);\ + }\ +};\ + +PALIGN_NEON(0,Packet4f,vextq_f32) +PALIGN_NEON(1,Packet4f,vextq_f32) +PALIGN_NEON(2,Packet4f,vextq_f32) +PALIGN_NEON(3,Packet4f,vextq_f32) +PALIGN_NEON(0,Packet4i,vextq_s32) +PALIGN_NEON(1,Packet4i,vextq_s32) +PALIGN_NEON(2,Packet4i,vextq_s32) +PALIGN_NEON(3,Packet4i,vextq_s32) + +#undef PALIGN_NEON + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]); + float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]); + + kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0])); + kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0])); + kernel.packet[2] = vcombine_f32(vget_low_f32(tmp1.val[1]), vget_low_f32(tmp2.val[1])); + kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1])); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]); + int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]); + kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0])); + kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0])); + kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1])); + kernel.packet[3] = vcombine_s32(vget_high_s32(tmp1.val[1]), vget_high_s32(tmp2.val[1])); +} + +//---------- double ---------- + +// Clang 3.5 in the iOS toolchain has an ICE triggered by NEON intrisics for double. +// Confirmed at least with __apple_build_version__ = 6000054. +#ifdef __apple_build_version__ +// Let's hope that by the time __apple_build_version__ hits the 601* range, the bug will be fixed. +// https://gist.github.com/yamaya/2924292 suggests that the 3 first digits are only updated with +// major toolchain updates. +#define EIGEN_APPLE_DOUBLE_NEON_BUG (__apple_build_version__ < 6010000) +#else +#define EIGEN_APPLE_DOUBLE_NEON_BUG 0 +#endif + +#if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG + +// Bug 907: workaround missing declarations of the following two functions in the ADK +// Defining these functions as templates ensures that if these intrinsics are +// already defined in arm_neon.h, then our workaround doesn't cause a conflict +// and has lower priority in overload resolution. +template +uint64x2_t vreinterpretq_u64_f64(T a) +{ + return (uint64x2_t) a; +} + +template +float64x2_t vreinterpretq_f64_u64(T a) +{ + return (float64x2_t) a; +} + +typedef float64x2_t Packet2d; +typedef float64x1_t Packet1d; + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet2d type; + typedef Packet2d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket=0, + + HasDiv = 1, + // FIXME check the Has* + HasSin = 0, + HasCos = 0, + HasLog = 0, + HasExp = 0, + HasSqrt = 0 + }; +}; + +template<> struct unpacket_traits { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; }; + +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return vdupq_n_f64(from); } + +template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) +{ + const double countdown_raw[] = {0.0,1.0}; + const Packet2d countdown = vld1q_f64(countdown_raw); + return vaddq_f64(pset1(a), countdown); +} +template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return vaddq_f64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return vsubq_f64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return vnegq_f64(a); } + +template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return vmulq_f64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return vdivq_f64(a,b); } + +#ifdef __ARM_FEATURE_FMA +// See bug 936. See above comment about FMA for float. +template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vfmaq_f64(c,a,b); } +#else +template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vmlaq_f64(c,a,b); } +#endif + +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { return vminq_f64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { return vmaxq_f64(a,b); } + +// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics +template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) +{ + return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); +} + +template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) +{ + return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); +} + +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) +{ + return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); +} + +template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) +{ + return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); +} + +template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); } + +template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); } + +template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) +{ + return vld1q_dup_f64(from); +} +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to, from); } + +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); } + +template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) +{ + Packet2d res = pset1(0.0); + res = vsetq_lane_f64(from[0*stride], res, 0); + res = vsetq_lane_f64(from[1*stride], res, 1); + return res; +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) +{ + to[stride*0] = vgetq_lane_f64(from, 0); + to[stride*1] = vgetq_lane_f64(from, 1); +} +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { EIGEN_ARM_PREFETCH(addr); } + +// FIXME only store the 2 first elements ? +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return vgetq_lane_f64(a, 0); } + +template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); } + +template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vabsq_f64(a); } + +#if EIGEN_COMP_CLANG && defined(__apple_build_version__) +// workaround ICE, see bug 907 +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) { return (vget_low_f64(a) + vget_high_f64(a))[0]; } +#else +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); } +#endif + +template<> EIGEN_STRONG_INLINE Packet2d preduxp(const Packet2d* vecs) +{ + float64x2_t trn1, trn2; + + // NEON zip performs interleaving of the supplied vectors. + // We perform two interleaves in a row to acquire the transposed vector + trn1 = vzip1q_f64(vecs[0], vecs[1]); + trn2 = vzip2q_f64(vecs[0], vecs[1]); + + // Do the addition of the resulting vectors + return vaddq_f64(trn1, trn2); +} +// Other reduction functions: +// mul +#if EIGEN_COMP_CLANG && defined(__apple_build_version__) +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) { return (vget_low_f64(a) * vget_high_f64(a))[0]; } +#else +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); } +#endif + +// min +template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) { return vgetq_lane_f64(vpminq_f64(a, a), 0); } + +// max +template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) { return vgetq_lane_f64(vpmaxq_f64(a, a), 0); } + +// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, +// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 +#define PALIGN_NEON(Offset,Type,Command) \ +template<>\ +struct palign_impl\ +{\ + EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ + {\ + if (Offset!=0)\ + first = Command(first, second, Offset);\ + }\ +};\ + +PALIGN_NEON(0,Packet2d,vextq_f64) +PALIGN_NEON(1,Packet2d,vextq_f64) +#undef PALIGN_NEON + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]); + float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]); + + kernel.packet[0] = trn1; + kernel.packet[1] = trn2; +} +#endif // EIGEN_ARCH_ARM64 + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_NEON_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/Complex.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/Complex.h new file mode 100644 index 000000000..d075043ce --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/Complex.h @@ -0,0 +1,471 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_SSE_H +#define EIGEN_COMPLEX_SSE_H + +namespace Eigen { + +namespace internal { + +//---------- float ---------- +struct Packet2cf +{ + EIGEN_STRONG_INLINE Packet2cf() {} + EIGEN_STRONG_INLINE explicit Packet2cf(const __m128& a) : v(a) {} + __m128 v; +}; + +// Use the packet_traits defined in AVX/PacketMath.h instead if we're going +// to leverage AVX instructions. +#ifndef EIGEN_VECTORIZE_AVX +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet2cf type; + typedef Packet2cf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0, + HasBlend = 1 + }; +}; +#endif + +template<> struct unpacket_traits { typedef std::complex type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; }; + +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) +{ + const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000)); + return Packet2cf(_mm_xor_ps(a.v,mask)); +} +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) +{ + const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000)); + return Packet2cf(_mm_xor_ps(a.v,mask)); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) +{ + #ifdef EIGEN_VECTORIZE_SSE3 + return Packet2cf(_mm_addsub_ps(_mm_mul_ps(_mm_moveldup_ps(a.v), b.v), + _mm_mul_ps(_mm_movehdup_ps(a.v), + vec4f_swizzle1(b.v, 1, 0, 3, 2)))); +// return Packet2cf(_mm_addsub_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), +// _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), +// vec4f_swizzle1(b.v, 1, 0, 3, 2)))); + #else + const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x00000000,0x80000000,0x00000000)); + return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), + _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), + vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask))); + #endif +} + +template<> EIGEN_STRONG_INLINE Packet2cf pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_and_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_or_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(a.v,b.v)); } + +template<> EIGEN_STRONG_INLINE Packet2cf pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload(&numext::real_ref(*from))); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu(&numext::real_ref(*from))); } + +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) +{ + Packet2cf res; +#if EIGEN_GNUC_AT_MOST(4,2) + // Workaround annoying "may be used uninitialized in this function" warning with gcc 4.2 + res.v = _mm_loadl_pi(_mm_set1_ps(0.0f), reinterpret_cast(&from)); +#elif EIGEN_GNUC_AT_LEAST(4,6) + // Suppress annoying "may be used uninitialized in this function" warning with gcc >= 4.6 + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wuninitialized" + res.v = _mm_loadl_pi(res.v, (const __m64*)&from); + #pragma GCC diagnostic pop +#else + res.v = _mm_loadl_pi(res.v, (const __m64*)&from); +#endif + return Packet2cf(_mm_movelh_ps(res.v,res.v)); +} + +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), Packet4f(from.v)); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), Packet4f(from.v)); } + + +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, Index stride) +{ + return Packet2cf(_mm_set_ps(std::imag(from[1*stride]), std::real(from[1*stride]), + std::imag(from[0*stride]), std::real(from[0*stride]))); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, Index stride) +{ + to[stride*0] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 0)), + _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 1))); + to[stride*1] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 2)), + _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 3))); +} + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) +{ + #if EIGEN_GNUC_AT_MOST(4,3) + // Workaround gcc 4.2 ICE - this is not performance wise ideal, but who cares... + // This workaround also fix invalid code generation with gcc 4.3 + EIGEN_ALIGN16 std::complex res[2]; + _mm_store_ps((float*)res, a.v); + return res[0]; + #else + std::complex res; + _mm_storel_pi((__m64*)&res, a.v); + return res; + #endif +} + +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { return Packet2cf(_mm_castpd_ps(preverse(Packet2d(_mm_castps_pd(a.v))))); } + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) +{ + return pfirst(Packet2cf(_mm_add_ps(a.v, _mm_movehl_ps(a.v,a.v)))); +} + +template<> EIGEN_STRONG_INLINE Packet2cf preduxp(const Packet2cf* vecs) +{ + return Packet2cf(_mm_add_ps(_mm_movelh_ps(vecs[0].v,vecs[1].v), _mm_movehl_ps(vecs[1].v,vecs[0].v))); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) +{ + return pfirst(pmul(a, Packet2cf(_mm_movehl_ps(a.v,a.v)))); +} + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second) + { + if (Offset==1) + { + first.v = _mm_movehl_ps(first.v, first.v); + first.v = _mm_movelh_ps(first.v, second.v); + } + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + #ifdef EIGEN_VECTORIZE_SSE3 + return internal::pmul(a, pconj(b)); + #else + const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000)); + return Packet2cf(_mm_add_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask), + _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), + vec4f_swizzle1(b.v, 1, 0, 3, 2)))); + #endif + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + #ifdef EIGEN_VECTORIZE_SSE3 + return internal::pmul(pconj(a), b); + #else + const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000)); + return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), + _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), + vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask))); + #endif + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + #ifdef EIGEN_VECTORIZE_SSE3 + return pconj(internal::pmul(a, b)); + #else + const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000)); + return Packet2cf(_mm_sub_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask), + _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), + vec4f_swizzle1(b.v, 1, 0, 3, 2)))); + #endif + } +}; + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f) + +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) +{ + // TODO optimize it for SSE3 and 4 + Packet2cf res = conj_helper().pmul(a,b); + __m128 s = _mm_mul_ps(b.v,b.v); + return Packet2cf(_mm_div_ps(res.v,_mm_add_ps(s,_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(s), 0xb1))))); +} + +EIGEN_STRONG_INLINE Packet2cf pcplxflip/* */(const Packet2cf& x) +{ + return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2)); +} + + +//---------- double ---------- +struct Packet1cd +{ + EIGEN_STRONG_INLINE Packet1cd() {} + EIGEN_STRONG_INLINE explicit Packet1cd(const __m128d& a) : v(a) {} + __m128d v; +}; + +// Use the packet_traits defined in AVX/PacketMath.h instead if we're going +// to leverage AVX instructions. +#ifndef EIGEN_VECTORIZE_AVX +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet1cd type; + typedef Packet1cd half; + enum { + Vectorizable = 1, + AlignedOnScalar = 0, + size = 1, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; +#endif + +template<> struct unpacket_traits { typedef std::complex type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; }; + +template<> EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); } +template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) +{ + const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); + return Packet1cd(_mm_xor_pd(a.v,mask)); +} + +template<> EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) +{ + #ifdef EIGEN_VECTORIZE_SSE3 + return Packet1cd(_mm_addsub_pd(_mm_mul_pd(_mm_movedup_pd(a.v), b.v), + _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)))); + #else + const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); + return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), + _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)), mask))); + #endif +} + +template<> EIGEN_STRONG_INLINE Packet1cd pand (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_and_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd por (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_or_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pxor (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_xor_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pandnot(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(a.v,b.v)); } + +// FIXME force unaligned load, this is a temporary fix +template<> EIGEN_STRONG_INLINE Packet1cd pload (const std::complex* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd ploadu(const std::complex* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) +{ /* here we really have to use unaligned loads :( */ return ploadu(&from); } + +template<> EIGEN_STRONG_INLINE Packet1cd ploaddup(const std::complex* from) { return pset1(*from); } + +// FIXME force unaligned store, this is a temporary fix +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, Packet2d(from.v)); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, Packet2d(from.v)); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cd& a) +{ + EIGEN_ALIGN16 double res[2]; + _mm_store_pd(res, a.v); + return std::complex(res[0],res[1]); +} + +template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; } + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet1cd& a) +{ + return pfirst(a); +} + +template<> EIGEN_STRONG_INLINE Packet1cd preduxp(const Packet1cd* vecs) +{ + return vecs[0]; +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cd& a) +{ + return pfirst(a); +} + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/) + { + // FIXME is it sure we never have to align a Packet1cd? + // Even though a std::complex has 16 bytes, it is not necessarily aligned on a 16 bytes boundary... + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + #ifdef EIGEN_VECTORIZE_SSE3 + return internal::pmul(a, pconj(b)); + #else + const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); + return Packet1cd(_mm_add_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask), + _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)))); + #endif + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + #ifdef EIGEN_VECTORIZE_SSE3 + return internal::pmul(pconj(a), b); + #else + const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); + return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), + _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)), mask))); + #endif + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + #ifdef EIGEN_VECTORIZE_SSE3 + return pconj(internal::pmul(a, b)); + #else + const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); + return Packet1cd(_mm_sub_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask), + _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)))); + #endif + } +}; + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d) + +template<> EIGEN_STRONG_INLINE Packet1cd pdiv(const Packet1cd& a, const Packet1cd& b) +{ + // TODO optimize it for SSE3 and 4 + Packet1cd res = conj_helper().pmul(a,b); + __m128d s = _mm_mul_pd(b.v,b.v); + return Packet1cd(_mm_div_pd(res.v, _mm_add_pd(s,_mm_shuffle_pd(s, s, 0x1)))); +} + +EIGEN_STRONG_INLINE Packet1cd pcplxflip/* */(const Packet1cd& x) +{ + return Packet1cd(preverse(Packet2d(x.v))); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m128d w1 = _mm_castps_pd(kernel.packet[0].v); + __m128d w2 = _mm_castps_pd(kernel.packet[1].v); + + __m128 tmp = _mm_castpd_ps(_mm_unpackhi_pd(w1, w2)); + kernel.packet[0].v = _mm_castpd_ps(_mm_unpacklo_pd(w1, w2)); + kernel.packet[1].v = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) { + __m128d result = pblend(ifPacket, _mm_castps_pd(thenPacket.v), _mm_castps_pd(elsePacket.v)); + return Packet2cf(_mm_castpd_ps(result)); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pinsertfirst(const Packet2cf& a, std::complex b) +{ + return Packet2cf(_mm_loadl_pi(a.v, reinterpret_cast(&b))); +} + +template<> EIGEN_STRONG_INLINE Packet1cd pinsertfirst(const Packet1cd&, std::complex b) +{ + return pset1(b); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pinsertlast(const Packet2cf& a, std::complex b) +{ + return Packet2cf(_mm_loadh_pi(a.v, reinterpret_cast(&b))); +} + +template<> EIGEN_STRONG_INLINE Packet1cd pinsertlast(const Packet1cd&, std::complex b) +{ + return pset1(b); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_SSE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h new file mode 100644 index 000000000..7b5f948e1 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h @@ -0,0 +1,562 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Julien Pommier +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* The sin, cos, exp, and log functions of this file come from + * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ + */ + +#ifndef EIGEN_MATH_FUNCTIONS_SSE_H +#define EIGEN_MATH_FUNCTIONS_SSE_H + +namespace Eigen { + +namespace internal { + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f plog(const Packet4f& _x) +{ + Packet4f x = _x; + _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); + _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); + _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f); + + _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000); + + /* the smallest non denormalized float number */ + _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000); + _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000);//-1.f/0.f); + + /* natural logarithm computed for 4 simultaneous float + return NaN for x <= 0 + */ + _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f); + + + Packet4i emm0; + + Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN + Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps()); + + x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */ + emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23); + + /* keep only the fractional part */ + x = _mm_and_ps(x, p4f_inv_mant_mask); + x = _mm_or_ps(x, p4f_half); + + emm0 = _mm_sub_epi32(emm0, p4i_0x7f); + Packet4f e = padd(Packet4f(_mm_cvtepi32_ps(emm0)), p4f_1); + + /* part2: + if( x < SQRTHF ) { + e -= 1; + x = x + x - 1.0; + } else { x = x - 1.0; } + */ + Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF); + Packet4f tmp = pand(x, mask); + x = psub(x, p4f_1); + e = psub(e, pand(p4f_1, mask)); + x = padd(x, tmp); + + Packet4f x2 = pmul(x,x); + Packet4f x3 = pmul(x2,x); + + Packet4f y, y1, y2; + y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1); + y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4); + y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7); + y = pmadd(y , x, p4f_cephes_log_p2); + y1 = pmadd(y1, x, p4f_cephes_log_p5); + y2 = pmadd(y2, x, p4f_cephes_log_p8); + y = pmadd(y, x3, y1); + y = pmadd(y, x3, y2); + y = pmul(y, x3); + + y1 = pmul(e, p4f_cephes_log_q1); + tmp = pmul(x2, p4f_half); + y = padd(y, y1); + x = psub(x, tmp); + y2 = pmul(e, p4f_cephes_log_q2); + x = padd(x, y); + x = padd(x, y2); + // negative arg will be NAN, 0 will be -INF + return _mm_or_ps(_mm_andnot_ps(iszero_mask, _mm_or_ps(x, invalid_mask)), + _mm_and_ps(iszero_mask, p4f_minus_inf)); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pexp(const Packet4f& _x) +{ + Packet4f x = _x; + _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); + _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); + _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f); + + + _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f); + _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f); + + _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f); + + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f); + + Packet4f tmp, fx; + Packet4i emm0; + + // clamp x + x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo); + + /* express exp(x) as exp(g + n*log(2)) */ + fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half); + +#ifdef EIGEN_VECTORIZE_SSE4_1 + fx = _mm_floor_ps(fx); +#else + emm0 = _mm_cvttps_epi32(fx); + tmp = _mm_cvtepi32_ps(emm0); + /* if greater, substract 1 */ + Packet4f mask = _mm_cmpgt_ps(tmp, fx); + mask = _mm_and_ps(mask, p4f_1); + fx = psub(tmp, mask); +#endif + + tmp = pmul(fx, p4f_cephes_exp_C1); + Packet4f z = pmul(fx, p4f_cephes_exp_C2); + x = psub(x, tmp); + x = psub(x, z); + + z = pmul(x,x); + + Packet4f y = p4f_cephes_exp_p0; + y = pmadd(y, x, p4f_cephes_exp_p1); + y = pmadd(y, x, p4f_cephes_exp_p2); + y = pmadd(y, x, p4f_cephes_exp_p3); + y = pmadd(y, x, p4f_cephes_exp_p4); + y = pmadd(y, x, p4f_cephes_exp_p5); + y = pmadd(y, z, x); + y = padd(y, p4f_1); + + // build 2^n + emm0 = _mm_cvttps_epi32(fx); + emm0 = _mm_add_epi32(emm0, p4i_0x7f); + emm0 = _mm_slli_epi32(emm0, 23); + return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x); +} +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d pexp(const Packet2d& _x) +{ + Packet2d x = _x; + + _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0); + _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0); + _EIGEN_DECLARE_CONST_Packet2d(half, 0.5); + + _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437); + _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303); + + _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599); + + _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4); + _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2); + _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1); + + _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6); + _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3); + _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1); + _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0); + + _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125); + _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6); + static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0); + + Packet2d tmp, fx; + Packet4i emm0; + + // clamp x + x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo); + /* express exp(x) as exp(g + n*log(2)) */ + fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half); + +#ifdef EIGEN_VECTORIZE_SSE4_1 + fx = _mm_floor_pd(fx); +#else + emm0 = _mm_cvttpd_epi32(fx); + tmp = _mm_cvtepi32_pd(emm0); + /* if greater, substract 1 */ + Packet2d mask = _mm_cmpgt_pd(tmp, fx); + mask = _mm_and_pd(mask, p2d_1); + fx = psub(tmp, mask); +#endif + + tmp = pmul(fx, p2d_cephes_exp_C1); + Packet2d z = pmul(fx, p2d_cephes_exp_C2); + x = psub(x, tmp); + x = psub(x, z); + + Packet2d x2 = pmul(x,x); + + Packet2d px = p2d_cephes_exp_p0; + px = pmadd(px, x2, p2d_cephes_exp_p1); + px = pmadd(px, x2, p2d_cephes_exp_p2); + px = pmul (px, x); + + Packet2d qx = p2d_cephes_exp_q0; + qx = pmadd(qx, x2, p2d_cephes_exp_q1); + qx = pmadd(qx, x2, p2d_cephes_exp_q2); + qx = pmadd(qx, x2, p2d_cephes_exp_q3); + + x = pdiv(px,psub(qx,px)); + x = pmadd(p2d_2,x,p2d_1); + + // build 2^n + emm0 = _mm_cvttpd_epi32(fx); + emm0 = _mm_add_epi32(emm0, p4i_1023_0); + emm0 = _mm_slli_epi32(emm0, 20); + emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3)); + return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x); +} + +/* evaluation of 4 sines at onces, using SSE2 intrinsics. + + The code is the exact rewriting of the cephes sinf function. + Precision is excellent as long as x < 8192 (I did not bother to + take into account the special handling they have for greater values + -- it does not return garbage for arguments over 8192, though, but + the extra precision is missing). + + Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the + surprising but correct result. +*/ + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f psin(const Packet4f& _x) +{ + Packet4f x = _x; + _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); + _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); + + _EIGEN_DECLARE_CONST_Packet4i(1, 1); + _EIGEN_DECLARE_CONST_Packet4i(not1, ~1); + _EIGEN_DECLARE_CONST_Packet4i(2, 2); + _EIGEN_DECLARE_CONST_Packet4i(4, 4); + + _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000); + + _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f); + _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f); + _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f); + _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f); + _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f); + _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f); + _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f); + _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f); + _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI + + Packet4f xmm1, xmm2, xmm3, sign_bit, y; + + Packet4i emm0, emm2; + sign_bit = x; + /* take the absolute value */ + x = pabs(x); + + /* take the modulo */ + + /* extract the sign bit (upper one) */ + sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask); + + /* scale by 4/Pi */ + y = pmul(x, p4f_cephes_FOPI); + + /* store the integer part of y in mm0 */ + emm2 = _mm_cvttps_epi32(y); + /* j=(j+1) & (~1) (see the cephes sources) */ + emm2 = _mm_add_epi32(emm2, p4i_1); + emm2 = _mm_and_si128(emm2, p4i_not1); + y = _mm_cvtepi32_ps(emm2); + /* get the swap sign flag */ + emm0 = _mm_and_si128(emm2, p4i_4); + emm0 = _mm_slli_epi32(emm0, 29); + /* get the polynom selection mask + there is one polynom for 0 <= x <= Pi/4 + and another one for Pi/4 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pcos(const Packet4f& _x) +{ + Packet4f x = _x; + _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); + _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); + + _EIGEN_DECLARE_CONST_Packet4i(1, 1); + _EIGEN_DECLARE_CONST_Packet4i(not1, ~1); + _EIGEN_DECLARE_CONST_Packet4i(2, 2); + _EIGEN_DECLARE_CONST_Packet4i(4, 4); + + _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f); + _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f); + _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f); + _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f); + _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f); + _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f); + _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f); + _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f); + _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f); + _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI + + Packet4f xmm1, xmm2, xmm3, y; + Packet4i emm0, emm2; + + x = pabs(x); + + /* scale by 4/Pi */ + y = pmul(x, p4f_cephes_FOPI); + + /* get the integer part of y */ + emm2 = _mm_cvttps_epi32(y); + /* j=(j+1) & (~1) (see the cephes sources) */ + emm2 = _mm_add_epi32(emm2, p4i_1); + emm2 = _mm_and_si128(emm2, p4i_not1); + y = _mm_cvtepi32_ps(emm2); + + emm2 = _mm_sub_epi32(emm2, p4i_2); + + /* get the swap sign flag */ + emm0 = _mm_andnot_si128(emm2, p4i_4); + emm0 = _mm_slli_epi32(emm0, 29); + /* get the polynom selection mask */ + emm2 = _mm_and_si128(emm2, p4i_2); + emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128()); + + Packet4f sign_bit = _mm_castsi128_ps(emm0); + Packet4f poly_mask = _mm_castsi128_ps(emm2); + + /* The magic pass: "Extended precision modular arithmetic" + x = ((x - y * DP1) - y * DP2) - y * DP3; */ + xmm1 = pmul(y, p4f_minus_cephes_DP1); + xmm2 = pmul(y, p4f_minus_cephes_DP2); + xmm3 = pmul(y, p4f_minus_cephes_DP3); + x = padd(x, xmm1); + x = padd(x, xmm2); + x = padd(x, xmm3); + + /* Evaluate the first polynom (0 <= x <= Pi/4) */ + y = p4f_coscof_p0; + Packet4f z = pmul(x,x); + + y = pmadd(y,z,p4f_coscof_p1); + y = pmadd(y,z,p4f_coscof_p2); + y = pmul(y, z); + y = pmul(y, z); + Packet4f tmp = _mm_mul_ps(z, p4f_half); + y = psub(y, tmp); + y = padd(y, p4f_1); + + /* Evaluate the second polynom (Pi/4 <= x <= 0) */ + Packet4f y2 = p4f_sincof_p0; + y2 = pmadd(y2, z, p4f_sincof_p1); + y2 = pmadd(y2, z, p4f_sincof_p2); + y2 = pmul(y2, z); + y2 = pmadd(y2, x, x); + + /* select the correct result from the two polynoms */ + y2 = _mm_and_ps(poly_mask, y2); + y = _mm_andnot_ps(poly_mask, y); + y = _mm_or_ps(y,y2); + + /* update the sign */ + return _mm_xor_ps(y, sign_bit); +} + +#if EIGEN_FAST_MATH + +// Functions for sqrt. +// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step +// of Newton's method, at a cost of 1-2 bits of precision as opposed to the +// exact solution. It does not handle +inf, or denormalized numbers correctly. +// The main advantage of this approach is not just speed, but also the fact that +// it can be inlined and pipelined with other computations, further reducing its +// effective latency. This is similar to Quake3's fast inverse square root. +// For detail see here: http://www.beyond3d.com/content/articles/8/ +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f psqrt(const Packet4f& _x) +{ + Packet4f half = pmul(_x, pset1(.5f)); + Packet4f denormal_mask = _mm_and_ps( + _mm_cmpge_ps(_x, _mm_setzero_ps()), + _mm_cmplt_ps(_x, pset1((std::numeric_limits::min)()))); + + // Compute approximate reciprocal sqrt. + Packet4f x = _mm_rsqrt_ps(_x); + // Do a single step of Newton's iteration. + x = pmul(x, psub(pset1(1.5f), pmul(half, pmul(x,x)))); + // Flush results for denormals to zero. + return _mm_andnot_ps(denormal_mask, pmul(_x,x)); +} + +#else + +template<>EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f psqrt(const Packet4f& x) { return _mm_sqrt_ps(x); } + +#endif + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d psqrt(const Packet2d& x) { return _mm_sqrt_pd(x); } + +#if EIGEN_FAST_MATH + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f prsqrt(const Packet4f& _x) { + _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inf, 0x7f800000); + _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(nan, 0x7fc00000); + _EIGEN_DECLARE_CONST_Packet4f(one_point_five, 1.5f); + _EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5f); + _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(flt_min, 0x00800000); + + Packet4f neg_half = pmul(_x, p4f_minus_half); + + // select only the inverse sqrt of positive normal inputs (denormals are + // flushed to zero and cause infs as well). + Packet4f le_zero_mask = _mm_cmple_ps(_x, p4f_flt_min); + Packet4f x = _mm_andnot_ps(le_zero_mask, _mm_rsqrt_ps(_x)); + + // Fill in NaNs and Infs for the negative/zero entries. + Packet4f neg_mask = _mm_cmplt_ps(_x, _mm_setzero_ps()); + Packet4f zero_mask = _mm_andnot_ps(neg_mask, le_zero_mask); + Packet4f infs_and_nans = _mm_or_ps(_mm_and_ps(neg_mask, p4f_nan), + _mm_and_ps(zero_mask, p4f_inf)); + + // Do a single step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), p4f_one_point_five)); + + // Insert NaNs and Infs in all the right places. + return _mm_or_ps(x, infs_and_nans); +} + +#else + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f prsqrt(const Packet4f& x) { + // Unfortunately we can't use the much faster mm_rqsrt_ps since it only provides an approximation. + return _mm_div_ps(pset1(1.0f), _mm_sqrt_ps(x)); +} + +#endif + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d prsqrt(const Packet2d& x) { + // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation. + return _mm_div_pd(pset1(1.0), _mm_sqrt_pd(x)); +} + +// Hyperbolic Tangent function. +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f +ptanh(const Packet4f& x) { + return internal::generic_fast_tanh_float(x); +} + +} // end namespace internal + +namespace numext { + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float sqrt(const float &x) +{ + return internal::pfirst(internal::Packet4f(_mm_sqrt_ss(_mm_set_ss(x)))); +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double sqrt(const double &x) +{ +#if EIGEN_COMP_GNUC_STRICT + // This works around a GCC bug generating poor code for _mm_sqrt_pd + // See https://bitbucket.org/eigen/eigen/commits/14f468dba4d350d7c19c9b93072e19f7b3df563b + return internal::pfirst(internal::Packet2d(__builtin_ia32_sqrtsd(_mm_set_sd(x)))); +#else + return internal::pfirst(internal::Packet2d(_mm_sqrt_pd(_mm_set_sd(x)))); +#endif +} + +} // end namespace numex + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_SSE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/PacketMath.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/PacketMath.h new file mode 100644 index 000000000..60e2517e4 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/PacketMath.h @@ -0,0 +1,895 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_SSE_H +#define EIGEN_PACKET_MATH_SSE_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*)) +#endif + +#ifdef __FMA__ +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 1 +#endif +#endif + +#if ((defined EIGEN_VECTORIZE_AVX) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_MINGW) && (__GXX_ABI_VERSION < 1004)) || EIGEN_OS_QNX +// With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot +// have overloads for both types without linking error. +// One solution is to increase ABI version using -fabi-version=4 (or greater). +// Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper +// structure: +template +struct eigen_packet_wrapper +{ + EIGEN_ALWAYS_INLINE operator T&() { return m_val; } + EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; } + EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {} + EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {} + EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) { + m_val = v; + return *this; + } + + T m_val; +}; +typedef eigen_packet_wrapper<__m128> Packet4f; +typedef eigen_packet_wrapper<__m128i> Packet4i; +typedef eigen_packet_wrapper<__m128d> Packet2d; +#else +typedef __m128 Packet4f; +typedef __m128i Packet4i; +typedef __m128d Packet2d; +#endif + +template<> struct is_arithmetic<__m128> { enum { value = true }; }; +template<> struct is_arithmetic<__m128i> { enum { value = true }; }; +template<> struct is_arithmetic<__m128d> { enum { value = true }; }; + +#define vec4f_swizzle1(v,p,q,r,s) \ + (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p))))) + +#define vec4i_swizzle1(v,p,q,r,s) \ + (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p)))) + +#define vec2d_swizzle1(v,p,q) \ + (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2))))) + +#define vec4f_swizzle2(a,b,p,q,r,s) \ + (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p)))) + +#define vec4i_swizzle2(a,b,p,q,r,s) \ + (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p)))))) + +#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ + const Packet4f p4f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \ + const Packet2d p2d_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ + const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1(X)) + +#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ + const Packet4i p4i_##NAME = pset1(X) + + +// Use the packet_traits defined in AVX/PacketMath.h instead if we're going +// to leverage AVX instructions. +#ifndef EIGEN_VECTORIZE_AVX +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4f type; + typedef Packet4f half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=4, + HasHalfPacket = 0, + + HasDiv = 1, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasTanh = EIGEN_FAST_MATH, + HasBlend = 1 + +#ifdef EIGEN_VECTORIZE_SSE4_1 + , + HasRound = 1, + HasFloor = 1, + HasCeil = 1 +#endif + }; +}; +template<> struct packet_traits : default_packet_traits +{ + typedef Packet2d type; + typedef Packet2d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=2, + HasHalfPacket = 0, + + HasDiv = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasBlend = 1 + +#ifdef EIGEN_VECTORIZE_SSE4_1 + , + HasRound = 1, + HasFloor = 1, + HasCeil = 1 +#endif + }; +}; +#endif +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4i type; + typedef Packet4i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=4, + + HasBlend = 1 + }; +}; + +template<> struct unpacket_traits { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; }; +template<> struct unpacket_traits { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; }; +template<> struct unpacket_traits { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; }; + +#ifndef EIGEN_VECTORIZE_AVX +template<> struct scalar_div_cost { enum { value = 7 }; }; +template<> struct scalar_div_cost { enum { value = 8 }; }; +#endif + +#if EIGEN_COMP_MSVC==1500 +// Workaround MSVC 9 internal compiler error. +// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode +// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)). +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return _mm_set_ps(from,from,from,from); } +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return _mm_set_pd(from,from); } +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return _mm_set_epi32(from,from,from,from); } +#else +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return _mm_set_ps1(from); } +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return _mm_set1_pd(from); } +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return _mm_set1_epi32(from); } +#endif + +// GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction. +// However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203) +// Using inline assembly is also not an option because then gcc fails to reorder properly the instructions. +// Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply. +// Also note that with AVX, we want it to generate a vbroadcastss. +#if EIGEN_COMP_GNUC_STRICT && (!defined __AVX__) +template<> EIGEN_STRONG_INLINE Packet4f pload1(const float *from) { + return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0); +} +#endif + +template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { return _mm_add_ps(pset1(a), _mm_set_ps(3,2,1,0)); } +template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { return _mm_add_pd(pset1(a),_mm_set_pd(1,0)); } +template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { return _mm_add_epi32(pset1(a),_mm_set_epi32(3,2,1,0)); } + +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) +{ + const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000)); + return _mm_xor_ps(a,mask); +} +template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) +{ + const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000)); + return _mm_xor_pd(a,mask); +} +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) +{ + return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a); +} + +template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_mullo_epi32(a,b); +#else + // this version is slightly faster than 4 scalar products + return vec4i_swizzle1( + vec4i_swizzle2( + _mm_mul_epu32(a,b), + _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2), + vec4i_swizzle1(b,1,0,3,2)), + 0,2,0,2), + 0,2,1,3); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); } + +// for some weird raisons, it has to be overloaded for packet of integers +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); } +#ifdef __FMA__ +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); } +template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); } +#endif + +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_min_epi32(a,b); +#else + // after some bench, this version *is* faster than a scalar implementation + Packet4i mask = _mm_cmplt_epi32(a,b); + return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_max_epi32(a,b); +#else + // after some bench, this version *is* faster than a scalar implementation + Packet4i mask = _mm_cmpgt_epi32(a,b); + return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); +#endif +} + +#ifdef EIGEN_VECTORIZE_SSE4_1 +template<> EIGEN_STRONG_INLINE Packet4f pround(const Packet4f& a) { return _mm_round_ps(a, 0); } +template<> EIGEN_STRONG_INLINE Packet2d pround(const Packet2d& a) { return _mm_round_pd(a, 0); } + +template<> EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) { return _mm_ceil_ps(a); } +template<> EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) { return _mm_ceil_pd(a); } + +template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) { return _mm_floor_ps(a); } +template<> EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) { return _mm_floor_pd(a); } +#endif + +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); } +template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); } +template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast(from)); } + +#if EIGEN_COMP_MSVC + template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { + EIGEN_DEBUG_UNALIGNED_LOAD + #if (EIGEN_COMP_MSVC==1600) + // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps + // (i.e., it does not generate an unaligned load!! + __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from)); + res = _mm_loadh_pi(res, (const __m64*)(from+2)); + return res; + #else + return _mm_loadu_ps(from); + #endif + } +#else +// NOTE: with the code below, MSVC's compiler crashes! + +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD + return _mm_loadu_ps(from); +} +#endif + +template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD + return _mm_loadu_pd(from); +} +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD + return _mm_loadu_si128(reinterpret_cast(from)); +} + + +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) +{ + return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast(from))), 0, 0, 1, 1); +} +template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) +{ return pset1(from[0]); } +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) +{ + Packet4i tmp; + tmp = _mm_loadl_epi64(reinterpret_cast(from)); + return vec4i_swizzle1(tmp, 0, 0, 1, 1); +} + +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } + +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); } + +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) +{ + return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); +} +template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) +{ + return _mm_set_pd(from[1*stride], from[0*stride]); +} +template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, Index stride) +{ + return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); + } + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) +{ + to[stride*0] = _mm_cvtss_f32(from); + to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1)); + to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2)); + to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3)); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) +{ + to[stride*0] = _mm_cvtsd_f64(from); + to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1)); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, Index stride) +{ + to[stride*0] = _mm_cvtsi128_si32(from); + to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1)); + to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2)); + to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3)); +} + +// some compilers might be tempted to perform multiple moves instead of using a vector path. +template<> EIGEN_STRONG_INLINE void pstore1(float* to, const float& a) +{ + Packet4f pa = _mm_set_ss(a); + pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0))); +} +// some compilers might be tempted to perform multiple moves instead of using a vector path. +template<> EIGEN_STRONG_INLINE void pstore1(double* to, const double& a) +{ + Packet2d pa = _mm_set_sd(a); + pstore(to, Packet2d(vec2d_swizzle1(pa,0,0))); +} + +#if EIGEN_COMP_PGI +typedef const void * SsePrefetchPtrType; +#else +typedef const char * SsePrefetchPtrType; +#endif + +#ifndef EIGEN_VECTORIZE_AVX +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +#endif + +#if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64 +// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 +// Direct of the struct members fixed bug #62. +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return a.m128_f32[0]; } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return a.m128d_f64[0]; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } +#elif EIGEN_COMP_MSVC_STRICT +// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } +#else +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return _mm_cvtss_f32(a); } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return _mm_cvtsd_f64(a); } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { return _mm_cvtsi128_si32(a); } +#endif + +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) +{ return _mm_shuffle_ps(a,a,0x1B); } +template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) +{ return _mm_shuffle_pd(a,a,0x1); } +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) +{ return _mm_shuffle_epi32(a,0x1B); } + +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) +{ + const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF)); + return _mm_and_ps(a,mask); +} +template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) +{ + const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF)); + return _mm_and_pd(a,mask); +} +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) +{ + #ifdef EIGEN_VECTORIZE_SSSE3 + return _mm_abs_epi32(a); + #else + Packet4i aux = _mm_srai_epi32(a,31); + return _mm_sub_epi32(_mm_xor_si128(a,aux),aux); + #endif +} + +// with AVX, the default implementations based on pload1 are faster +#ifndef __AVX__ +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const float *a, + Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3) +{ + a3 = pload(a); + a0 = vec4f_swizzle1(a3, 0,0,0,0); + a1 = vec4f_swizzle1(a3, 1,1,1,1); + a2 = vec4f_swizzle1(a3, 2,2,2,2); + a3 = vec4f_swizzle1(a3, 3,3,3,3); +} +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const double *a, + Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3) +{ +#ifdef EIGEN_VECTORIZE_SSE3 + a0 = _mm_loaddup_pd(a+0); + a1 = _mm_loaddup_pd(a+1); + a2 = _mm_loaddup_pd(a+2); + a3 = _mm_loaddup_pd(a+3); +#else + a1 = pload(a); + a0 = vec2d_swizzle1(a1, 0,0); + a1 = vec2d_swizzle1(a1, 1,1); + a3 = pload(a+2); + a2 = vec2d_swizzle1(a3, 0,0); + a3 = vec2d_swizzle1(a3, 1,1); +#endif +} +#endif + +EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs) +{ + vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55)); + vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA)); + vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF)); + vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00)); +} + +#ifdef EIGEN_VECTORIZE_SSE3 +template<> EIGEN_STRONG_INLINE Packet4f preduxp(const Packet4f* vecs) +{ + return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3])); +} + +template<> EIGEN_STRONG_INLINE Packet2d preduxp(const Packet2d* vecs) +{ + return _mm_hadd_pd(vecs[0], vecs[1]); +} + +#else +template<> EIGEN_STRONG_INLINE Packet4f preduxp(const Packet4f* vecs) +{ + Packet4f tmp0, tmp1, tmp2; + tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]); + tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]); + tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]); + tmp0 = _mm_add_ps(tmp0, tmp1); + tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]); + tmp1 = _mm_add_ps(tmp1, tmp2); + tmp2 = _mm_movehl_ps(tmp1, tmp0); + tmp0 = _mm_movelh_ps(tmp0, tmp1); + return _mm_add_ps(tmp0, tmp2); +} + +template<> EIGEN_STRONG_INLINE Packet2d preduxp(const Packet2d* vecs) +{ + return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1])); +} +#endif // SSE3 + +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) +{ + // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures + // (from Nehalem to Haswell) +// #ifdef EIGEN_VECTORIZE_SSE3 +// Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3)); +// return pfirst(_mm_hadd_ps(tmp, tmp)); +// #else + Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a)); + return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); +// #endif +} + +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) +{ + // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures + // (from Nehalem to Haswell) +// #ifdef EIGEN_VECTORIZE_SSE3 +// return pfirst(_mm_hadd_pd(a, a)); +// #else + return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a))); +// #endif +} + +#ifdef EIGEN_VECTORIZE_SSSE3 +template<> EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) +{ + return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3])); +} +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) +{ + Packet4i tmp0 = _mm_hadd_epi32(a,a); + return pfirst(_mm_hadd_epi32(tmp0,tmp0)); +} +#else +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) +{ + Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a)); + return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1)); +} + +template<> EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) +{ + Packet4i tmp0, tmp1, tmp2; + tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]); + tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]); + tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]); + tmp0 = _mm_add_epi32(tmp0, tmp1); + tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]); + tmp1 = _mm_add_epi32(tmp1, tmp2); + tmp2 = _mm_unpacklo_epi64(tmp0, tmp1); + tmp0 = _mm_unpackhi_epi64(tmp0, tmp1); + return _mm_add_epi32(tmp0, tmp2); +} +#endif +// Other reduction functions: + +// mul +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) +{ + Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a)); + return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); +} +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) +{ + return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a))); +} +template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) +{ + // after some experiments, it is seems this is the fastest way to implement it + // for GCC (eg., reusing pmul is very slow !) + // TODO try to call _mm_mul_epu32 directly + EIGEN_ALIGN16 int aux[4]; + pstore(aux, a); + return (aux[0] * aux[1]) * (aux[2] * aux[3]);; +} + +// min +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) +{ + Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a)); + return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); +} +template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) +{ + return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a))); +} +template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2))); + return pfirst(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1))); +#else + // after some experiments, it is seems this is the fastest way to implement it + // for GCC (eg., it does not like using std::min after the pstore !!) + EIGEN_ALIGN16 int aux[4]; + pstore(aux, a); + int aux0 = aux[0] EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) +{ + Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a)); + return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); +} +template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) +{ + return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a))); +} +template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2))); + return pfirst(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1))); +#else + // after some experiments, it is seems this is the fastest way to implement it + // for GCC (eg., it does not like using std::min after the pstore !!) + EIGEN_ALIGN16 int aux[4]; + pstore(aux, a); + int aux0 = aux[0]>aux[1] ? aux[0] : aux[1]; + int aux2 = aux[2]>aux[3] ? aux[2] : aux[3]; + return aux0>aux2 ? aux0 : aux2; +#endif // EIGEN_VECTORIZE_SSE4_1 +} + +#if EIGEN_COMP_GNUC +// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) +// { +// Packet4f res = b; +// asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c)); +// return res; +// } +// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i& a, const Packet4i& b, const int i) +// { +// Packet4i res = a; +// asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i)); +// return res; +// } +#endif + +#ifdef EIGEN_VECTORIZE_SSSE3 +// SSSE3 versions +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second) + { + if (Offset!=0) + first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4)); + } +}; + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second) + { + if (Offset!=0) + first = _mm_alignr_epi8(second,first, Offset*4); + } +}; + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second) + { + if (Offset==1) + first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8)); + } +}; +#else +// SSE2 versions +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second) + { + if (Offset==1) + { + first = _mm_move_ss(first,second); + first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39)); + } + else if (Offset==2) + { + first = _mm_movehl_ps(first,first); + first = _mm_movelh_ps(first,second); + } + else if (Offset==3) + { + first = _mm_move_ss(first,second); + first = _mm_shuffle_ps(first,second,0x93); + } + } +}; + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second) + { + if (Offset==1) + { + first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second))); + first = _mm_shuffle_epi32(first,0x39); + } + else if (Offset==2) + { + first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first))); + first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second))); + } + else if (Offset==3) + { + first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second))); + first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93)); + } + } +}; + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second) + { + if (Offset==1) + { + first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first))); + first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second))); + } + } +}; +#endif + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + _MM_TRANSPOSE4_PS(kernel.packet[0], kernel.packet[1], kernel.packet[2], kernel.packet[3]); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]); + kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]); + kernel.packet[1] = tmp; +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m128i T0 = _mm_unpacklo_epi32(kernel.packet[0], kernel.packet[1]); + __m128i T1 = _mm_unpacklo_epi32(kernel.packet[2], kernel.packet[3]); + __m128i T2 = _mm_unpackhi_epi32(kernel.packet[0], kernel.packet[1]); + __m128i T3 = _mm_unpackhi_epi32(kernel.packet[2], kernel.packet[3]); + + kernel.packet[0] = _mm_unpacklo_epi64(T0, T1); + kernel.packet[1] = _mm_unpackhi_epi64(T0, T1); + kernel.packet[2] = _mm_unpacklo_epi64(T2, T3); + kernel.packet[3] = _mm_unpackhi_epi64(T2, T3); +} + +template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) { + const __m128i zero = _mm_setzero_si128(); + const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); + __m128i false_mask = _mm_cmpeq_epi32(select, zero); +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_blendv_epi8(thenPacket, elsePacket, false_mask); +#else + return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket)); +#endif +} +template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) { + const __m128 zero = _mm_setzero_ps(); + const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); + __m128 false_mask = _mm_cmpeq_ps(select, zero); +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_blendv_ps(thenPacket, elsePacket, false_mask); +#else + return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket)); +#endif +} +template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) { + const __m128d zero = _mm_setzero_pd(); + const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]); + __m128d false_mask = _mm_cmpeq_pd(select, zero); +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_blendv_pd(thenPacket, elsePacket, false_mask); +#else + return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4f pinsertfirst(const Packet4f& a, float b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_blend_ps(a,pset1(b),1); +#else + return _mm_move_ss(a, _mm_load_ss(&b)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet2d pinsertfirst(const Packet2d& a, double b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_blend_pd(a,pset1(b),1); +#else + return _mm_move_sd(a, _mm_load_sd(&b)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4f pinsertlast(const Packet4f& a, float b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_blend_ps(a,pset1(b),(1<<3)); +#else + const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x0,0x0,0x0,0xFFFFFFFF)); + return _mm_or_ps(_mm_andnot_ps(mask, a), _mm_and_ps(mask, pset1(b))); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet2d pinsertlast(const Packet2d& a, double b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_blend_pd(a,pset1(b),(1<<1)); +#else + const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x0,0xFFFFFFFF,0xFFFFFFFF)); + return _mm_or_pd(_mm_andnot_pd(mask, a), _mm_and_pd(mask, pset1(b))); +#endif +} + +// Scalar path for pmadd with FMA to ensure consistency with vectorized path. +#ifdef __FMA__ +template<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) { + return ::fmaf(a,b,c); +} +template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, const double& c) { + return ::fma(a,b,c); +} +#endif + +} // end namespace internal + +} // end namespace Eigen + +#if EIGEN_COMP_PGI +// PGI++ does not define the following intrinsics in C++ mode. +static inline __m128 _mm_castpd_ps (__m128d x) { return reinterpret_cast<__m128&>(x); } +static inline __m128i _mm_castpd_si128(__m128d x) { return reinterpret_cast<__m128i&>(x); } +static inline __m128d _mm_castps_pd (__m128 x) { return reinterpret_cast<__m128d&>(x); } +static inline __m128i _mm_castps_si128(__m128 x) { return reinterpret_cast<__m128i&>(x); } +static inline __m128 _mm_castsi128_ps(__m128i x) { return reinterpret_cast<__m128&>(x); } +static inline __m128d _mm_castsi128_pd(__m128i x) { return reinterpret_cast<__m128d&>(x); } +#endif + +#endif // EIGEN_PACKET_MATH_SSE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/TypeCasting.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/TypeCasting.h new file mode 100644 index 000000000..c6ca8c716 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/SSE/TypeCasting.h @@ -0,0 +1,77 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TYPE_CASTING_SSE_H +#define EIGEN_TYPE_CASTING_SSE_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_VECTORIZE_AVX +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 2, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 2 + }; +}; +#endif + +template<> EIGEN_STRONG_INLINE Packet4i pcast(const Packet4f& a) { + return _mm_cvttps_epi32(a); +} + +template<> EIGEN_STRONG_INLINE Packet4f pcast(const Packet4i& a) { + return _mm_cvtepi32_ps(a); +} + +template<> EIGEN_STRONG_INLINE Packet4f pcast(const Packet2d& a, const Packet2d& b) { + return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6)); +} + +template<> EIGEN_STRONG_INLINE Packet2d pcast(const Packet4f& a) { + // Simply discard the second half of the input + return _mm_cvtps_pd(a); +} + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TYPE_CASTING_SSE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/ZVector/Complex.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/ZVector/Complex.h new file mode 100644 index 000000000..1bfb73397 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/ZVector/Complex.h @@ -0,0 +1,397 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX32_ALTIVEC_H +#define EIGEN_COMPLEX32_ALTIVEC_H + +namespace Eigen { + +namespace internal { + +static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_ZERO_, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 }; +static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_ZERO_, 8);//{ 0x8000000000000000, 0x0000000000000000 }; + +struct Packet1cd +{ + EIGEN_STRONG_INLINE Packet1cd() {} + EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {} + Packet2d v; +}; + +struct Packet2cf +{ + EIGEN_STRONG_INLINE Packet2cf() {} + EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {} + union { + Packet4f v; + Packet1cd cd[2]; + }; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet2cf type; + typedef Packet2cf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasBlend = 1, + HasSetLinear = 0 + }; +}; + + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet1cd type; + typedef Packet1cd half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 1, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { typedef std::complex type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; }; +template<> struct unpacket_traits { typedef std::complex type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; }; + +/* Forward declaration */ +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel); + +template<> EIGEN_STRONG_INLINE Packet2cf pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload((const float*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu((const float*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu((const double*)from)); } +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); } + +template<> EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) +{ /* here we really have to use unaligned loads :( */ return ploadu(&from); } + +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) +{ + Packet2cf res; + res.cd[0] = Packet1cd(vec_ld2f((const float *)&from)); + res.cd[1] = res.cd[0]; + return res; +} +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, Index stride) +{ + std::complex EIGEN_ALIGN16 af[2]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + return pload(af); +} +template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>(const std::complex* from, Index stride EIGEN_UNUSED) +{ + return pload(from); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, Index stride) +{ + std::complex EIGEN_ALIGN16 af[2]; + pstore >((std::complex *) af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; +} +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>(std::complex* to, const Packet1cd& from, Index stride EIGEN_UNUSED) +{ + pstore >(to, from); +} + +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v - b.v); } +template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); } +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(Packet4f(a.v))); } +template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd((Packet2d)vec_xor((Packet2d)a.v, (Packet2d)p2ul_CONJ_XOR2)); } +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) +{ + Packet2cf res; + res.v.v4f[0] = pconj(Packet1cd(reinterpret_cast(a.v.v4f[0]))).v; + res.v.v4f[1] = pconj(Packet1cd(reinterpret_cast(a.v.v4f[1]))).v; + return res; +} + +template<> EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) +{ + Packet2d a_re, a_im, v1, v2; + + // Permute and multiply the real parts of a and b + a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI); + // Get the imaginary parts of a + a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO); + // multiply a_re * b + v1 = vec_madd(a_re, b.v, p2d_ZERO); + // multiply a_im * b and get the conjugate result + v2 = vec_madd(a_im, b.v, p2d_ZERO); + v2 = (Packet2d) vec_sld((Packet4ui)v2, (Packet4ui)v2, 8); + v2 = (Packet2d) vec_xor((Packet2d)v2, (Packet2d) p2ul_CONJ_XOR1); + + return Packet1cd(v1 + v2); +} +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) +{ + Packet2cf res; + res.v.v4f[0] = pmul(Packet1cd(reinterpret_cast(a.v.v4f[0])), Packet1cd(reinterpret_cast(b.v.v4f[0]))).v; + res.v.v4f[1] = pmul(Packet1cd(reinterpret_cast(a.v.v4f[1])), Packet1cd(reinterpret_cast(b.v.v4f[1]))).v; + return res; +} + +template<> EIGEN_STRONG_INLINE Packet1cd pand (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd por (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_or(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pxor (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_xor(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pandnot(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v, vec_nor(b.v,b.v))); } +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot(a.v,b.v)); } + +template<> EIGEN_STRONG_INLINE Packet1cd ploaddup(const std::complex* from) { return pset1(*from); } +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_ZVECTOR_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_ZVECTOR_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cd& a) +{ + std::complex EIGEN_ALIGN16 res; + pstore >(&res, a); + + return res; +} +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) +{ + std::complex EIGEN_ALIGN16 res[2]; + pstore >(res, a); + + return res[0]; +} + +template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) +{ + Packet2cf res; + res.cd[0] = a.cd[1]; + res.cd[1] = a.cd[0]; + return res; +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet1cd& a) +{ + return pfirst(a); +} +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) +{ + std::complex res; + Packet1cd b = padd(a.cd[0], a.cd[1]); + vec_st2f(b.v, (float*)&res); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet1cd preduxp(const Packet1cd* vecs) +{ + return vecs[0]; +} +template<> EIGEN_STRONG_INLINE Packet2cf preduxp(const Packet2cf* vecs) +{ + PacketBlock transpose; + transpose.packet[0] = vecs[0]; + transpose.packet[1] = vecs[1]; + ptranspose(transpose); + + return padd(transpose.packet[0], transpose.packet[1]); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cd& a) +{ + return pfirst(a); +} +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) +{ + std::complex res; + Packet1cd b = pmul(a.cd[0], a.cd[1]); + vec_st2f(b.v, (float*)&res); + return res; +} + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/) + { + // FIXME is it sure we never have to align a Packet1cd? + // Even though a std::complex has 16 bytes, it is not necessarily aligned on a 16 bytes boundary... + } +}; + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second) + { + if (Offset == 1) { + first.cd[0] = first.cd[1]; + first.cd[1] = second.cd[0]; + } + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + return internal::pmul(a, pconj(b)); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + return internal::pmul(pconj(a), b); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const + { + return pconj(internal::pmul(a, b)); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + return internal::pmul(a, pconj(b)); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + return internal::pmul(pconj(a), b); + } +}; + +template<> struct conj_helper +{ + EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const + { return padd(pmul(x,y),c); } + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const + { + return pconj(internal::pmul(a, b)); + } +}; + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f) +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d) + +template<> EIGEN_STRONG_INLINE Packet1cd pdiv(const Packet1cd& a, const Packet1cd& b) +{ + // TODO optimize it for AltiVec + Packet1cd res = conj_helper().pmul(a,b); + Packet2d s = vec_madd(b.v, b.v, p2d_ZERO_); + return Packet1cd(pdiv(res.v, s + vec_perm(s, s, p16uc_REVERSE64))); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) +{ + // TODO optimize it for AltiVec + Packet2cf res; + res.cd[0] = pdiv(a.cd[0], b.cd[0]); + res.cd[1] = pdiv(a.cd[1], b.cd[1]); + return res; +} + +EIGEN_STRONG_INLINE Packet1cd pcplxflip/**/(const Packet1cd& x) +{ + return Packet1cd(preverse(Packet2d(x.v))); +} + +EIGEN_STRONG_INLINE Packet2cf pcplxflip/**/(const Packet2cf& x) +{ + Packet2cf res; + res.cd[0] = pcplxflip(x.cd[0]); + res.cd[1] = pcplxflip(x.cd[1]); + return res; +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI); + kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO); + kernel.packet[0].v = tmp; +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet1cd tmp = kernel.packet[0].cd[1]; + kernel.packet[0].cd[1] = kernel.packet[1].cd[0]; + kernel.packet[1].cd[0] = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) { + Packet2cf result; + const Selector<4> ifPacket4 = { ifPacket.select[0], ifPacket.select[0], ifPacket.select[1], ifPacket.select[1] }; + result.v = pblend(ifPacket4, thenPacket.v, elsePacket.v); + return result; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX32_ALTIVEC_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/ZVector/MathFunctions.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/ZVector/MathFunctions.h new file mode 100644 index 000000000..5c7aa7256 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/ZVector/MathFunctions.h @@ -0,0 +1,137 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Julien Pommier +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* The sin, cos, exp, and log functions of this file come from + * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ + */ + +#ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H +#define EIGEN_MATH_FUNCTIONS_ALTIVEC_H + +namespace Eigen { + +namespace internal { + +static _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0); +static _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0); +static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5); + +static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437); +static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6); + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d pexp(const Packet2d& _x) +{ + Packet2d x = _x; + + Packet2d tmp, fx; + Packet2l emm0; + + // clamp x + x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo); + /* express exp(x) as exp(g + n*log(2)) */ + fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half); + + fx = vec_floor(fx); + + tmp = pmul(fx, p2d_cephes_exp_C1); + Packet2d z = pmul(fx, p2d_cephes_exp_C2); + x = psub(x, tmp); + x = psub(x, z); + + Packet2d x2 = pmul(x,x); + + Packet2d px = p2d_cephes_exp_p0; + px = pmadd(px, x2, p2d_cephes_exp_p1); + px = pmadd(px, x2, p2d_cephes_exp_p2); + px = pmul (px, x); + + Packet2d qx = p2d_cephes_exp_q0; + qx = pmadd(qx, x2, p2d_cephes_exp_q1); + qx = pmadd(qx, x2, p2d_cephes_exp_q2); + qx = pmadd(qx, x2, p2d_cephes_exp_q3); + + x = pdiv(px,psub(qx,px)); + x = pmadd(p2d_2,x,p2d_1); + + // build 2^n + emm0 = vec_ctsl(fx, 0); + + static const Packet2l p2l_1023 = { 1023, 1023 }; + static const Packet2ul p2ul_52 = { 52, 52 }; + + emm0 = emm0 + p2l_1023; + emm0 = emm0 << reinterpret_cast(p2ul_52); + + // Altivec's max & min operators just drop silent NaNs. Check NaNs in + // inputs and return them unmodified. + Packet2ul isnumber_mask = reinterpret_cast(vec_cmpeq(_x, _x)); + return vec_sel(_x, pmax(pmul(x, reinterpret_cast(emm0)), _x), + isnumber_mask); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pexp(const Packet4f& x) +{ + Packet4f res; + res.v4f[0] = pexp(x.v4f[0]); + res.v4f[1] = pexp(x.v4f[1]); + return res; +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d psqrt(const Packet2d& x) +{ + return __builtin_s390_vfsqdb(x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f psqrt(const Packet4f& x) +{ + Packet4f res; + res.v4f[0] = psqrt(x.v4f[0]); + res.v4f[1] = psqrt(x.v4f[1]); + return res; +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d prsqrt(const Packet2d& x) { + // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation. + return pset1(1.0) / psqrt(x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f prsqrt(const Packet4f& x) { + Packet4f res; + res.v4f[0] = prsqrt(x.v4f[0]); + res.v4f[1] = prsqrt(x.v4f[1]); + return res; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_ALTIVEC_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/ZVector/PacketMath.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/ZVector/PacketMath.h new file mode 100644 index 000000000..57b01fc63 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/arch/ZVector/PacketMath.h @@ -0,0 +1,945 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_ZVECTOR_H +#define EIGEN_PACKET_MATH_ZVECTOR_H + +#include + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4 +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD +#endif + +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 +#endif + +typedef __vector int Packet4i; +typedef __vector unsigned int Packet4ui; +typedef __vector __bool int Packet4bi; +typedef __vector short int Packet8i; +typedef __vector unsigned char Packet16uc; +typedef __vector double Packet2d; +typedef __vector unsigned long long Packet2ul; +typedef __vector long long Packet2l; + +typedef struct { + Packet2d v4f[2]; +} Packet4f; + +typedef union { + int32_t i[4]; + uint32_t ui[4]; + int64_t l[2]; + uint64_t ul[2]; + double d[2]; + Packet4i v4i; + Packet4ui v4ui; + Packet2l v2l; + Packet2ul v2ul; + Packet2d v2d; +} Packet; + +// We don't want to write the same code all the time, but we need to reuse the constants +// and it doesn't really work to declare them global, so we define macros instead + +#define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \ + Packet4i p4i_##NAME = reinterpret_cast(vec_splat_s32(X)) + +#define _EIGEN_DECLARE_CONST_FAST_Packet2d(NAME,X) \ + Packet2d p2d_##NAME = reinterpret_cast(vec_splat_s64(X)) + +#define _EIGEN_DECLARE_CONST_FAST_Packet2l(NAME,X) \ + Packet2l p2l_##NAME = reinterpret_cast(vec_splat_s64(X)) + +#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ + Packet4i p4i_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \ + Packet2d p2d_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet2l(NAME,X) \ + Packet2l p2l_##NAME = pset1(X) + +// These constants are endian-agnostic +//static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE, 1); //{ 1, 1, 1, 1} + +static _EIGEN_DECLARE_CONST_FAST_Packet2d(ZERO, 0); +static _EIGEN_DECLARE_CONST_FAST_Packet2l(ZERO, 0); +static _EIGEN_DECLARE_CONST_FAST_Packet2l(ONE, 1); + +static Packet2d p2d_ONE = { 1.0, 1.0 }; +static Packet2d p2d_ZERO_ = { -0.0, -0.0 }; + +static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 }; +static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 }; +static Packet2d p2d_COUNTDOWN = reinterpret_cast(vec_sld(reinterpret_cast(p2d_ZERO), reinterpret_cast(p2d_ONE), 8)); + +static Packet16uc p16uc_PSET64_HI = { 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 }; +static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 }; + +// Mask alignment +#define _EIGEN_MASK_ALIGNMENT 0xfffffffffffffff0 + +#define _EIGEN_ALIGNED_PTR(x) ((std::ptrdiff_t)(x) & _EIGEN_MASK_ALIGNMENT) + +// Handle endianness properly while loading constants +// Define global static constants: + +static Packet16uc p16uc_FORWARD = { 0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15 }; +static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 }; +static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; + +static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 }; +static Packet16uc p16uc_PSET32_WEVEN = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 }; +/*static Packet16uc p16uc_HALF64_0_16 = vec_sld((Packet16uc)p4i_ZERO, vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 3), 8); //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16}; + +static Packet16uc p16uc_PSET64_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN); //{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };*/ +static Packet16uc p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN); //{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 }; +/*static Packet16uc p16uc_TRANSPOSE64_HI = vec_add(p16uc_PSET64_HI, p16uc_HALF64_0_16); //{ 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23}; +static Packet16uc p16uc_TRANSPOSE64_LO = vec_add(p16uc_PSET64_LO, p16uc_HALF64_0_16); //{ 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};*/ +static Packet16uc p16uc_TRANSPOSE64_HI = { 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23}; +static Packet16uc p16uc_TRANSPOSE64_LO = { 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31}; + +//static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8); //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 }; + +//static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; + + +#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC + #define EIGEN_ZVECTOR_PREFETCH(ADDR) __builtin_prefetch(ADDR); +#else + #define EIGEN_ZVECTOR_PREFETCH(ADDR) asm( " pfd [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" ); +#endif + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4i type; + typedef Packet4i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasBlend = 1 + }; +}; + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4f type; + typedef Packet4f half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=4, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasMin = 1, + HasMax = 1, + HasAbs = 1, + HasSin = 0, + HasCos = 0, + HasLog = 0, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasNegate = 1, + HasBlend = 1 + }; +}; + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet2d type; + typedef Packet2d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=2, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasMin = 1, + HasMax = 1, + HasAbs = 1, + HasSin = 0, + HasCos = 0, + HasLog = 0, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasNegate = 1, + HasBlend = 1 + }; +}; + +template<> struct unpacket_traits { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; }; +template<> struct unpacket_traits { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; }; +template<> struct unpacket_traits { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; }; + +/* Forward declaration */ +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel); + +inline std::ostream & operator <<(std::ostream & s, const Packet4i & v) +{ + Packet vt; + vt.v4i = v; + s << vt.i[0] << ", " << vt.i[1] << ", " << vt.i[2] << ", " << vt.i[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet4ui & v) +{ + Packet vt; + vt.v4ui = v; + s << vt.ui[0] << ", " << vt.ui[1] << ", " << vt.ui[2] << ", " << vt.ui[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet2l & v) +{ + Packet vt; + vt.v2l = v; + s << vt.l[0] << ", " << vt.l[1]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet2ul & v) +{ + Packet vt; + vt.v2ul = v; + s << vt.ul[0] << ", " << vt.ul[1] ; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet2d & v) +{ + Packet vt; + vt.v2d = v; + s << vt.d[0] << ", " << vt.d[1]; + return s; +} + +/* Helper function to simulate a vec_splat_packet4f + */ +template EIGEN_STRONG_INLINE Packet4f vec_splat_packet4f(const Packet4f& from) +{ + Packet4f splat; + switch (element) { + case 0: + splat.v4f[0] = vec_splat(from.v4f[0], 0); + splat.v4f[1] = splat.v4f[0]; + break; + case 1: + splat.v4f[0] = vec_splat(from.v4f[0], 1); + splat.v4f[1] = splat.v4f[0]; + break; + case 2: + splat.v4f[0] = vec_splat(from.v4f[1], 0); + splat.v4f[1] = splat.v4f[0]; + break; + case 3: + splat.v4f[0] = vec_splat(from.v4f[1], 1); + splat.v4f[1] = splat.v4f[0]; + break; + } + return splat; +} + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second) + { + switch (Offset % 4) { + case 1: + first = vec_sld(first, second, 4); break; + case 2: + first = vec_sld(first, second, 8); break; + case 3: + first = vec_sld(first, second, 12); break; + } + } +}; + +/* This is a tricky one, we have to translate float alignment to vector elements of sizeof double + */ +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second) + { + switch (Offset % 4) { + case 1: + first.v4f[0] = vec_sld(first.v4f[0], first.v4f[1], 8); + first.v4f[1] = vec_sld(first.v4f[1], second.v4f[0], 8); + break; + case 2: + first.v4f[0] = first.v4f[1]; + first.v4f[1] = second.v4f[0]; + break; + case 3: + first.v4f[0] = vec_sld(first.v4f[1], second.v4f[0], 8); + first.v4f[1] = vec_sld(second.v4f[0], second.v4f[1], 8); + break; + } + } +}; + + +template +struct palign_impl +{ + static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second) + { + if (Offset == 1) + first = reinterpret_cast(vec_sld(reinterpret_cast(first), reinterpret_cast(second), 8)); + } +}; + +template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_LOAD + Packet *vfrom; + vfrom = (Packet *) from; + return vfrom->v4i; +} + +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_LOAD + Packet4f vfrom; + vfrom.v4f[0] = vec_ld2f(&from[0]); + vfrom.v4f[1] = vec_ld2f(&from[2]); + return vfrom; +} + +template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_LOAD + Packet *vfrom; + vfrom = (Packet *) from; + return vfrom->v2d; +} + +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_STORE + Packet *vto; + vto = (Packet *) to; + vto->v4i = from; +} + +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_STORE + vec_st2f(from.v4f[0], &to[0]); + vec_st2f(from.v4f[1], &to[2]); +} + + +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_STORE + Packet *vto; + vto = (Packet *) to; + vto->v2d = from; +} + +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) +{ + return vec_splats(from); +} +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { + return vec_splats(from); +} +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) +{ + Packet4f to; + to.v4f[0] = pset1(static_cast(from)); + to.v4f[1] = to.v4f[0]; + return to; +} + +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const int *a, + Packet4i& a0, Packet4i& a1, Packet4i& a2, Packet4i& a3) +{ + a3 = pload(a); + a0 = vec_splat(a3, 0); + a1 = vec_splat(a3, 1); + a2 = vec_splat(a3, 2); + a3 = vec_splat(a3, 3); +} + +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const float *a, + Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3) +{ + a3 = pload(a); + a0 = vec_splat_packet4f<0>(a3); + a1 = vec_splat_packet4f<1>(a3); + a2 = vec_splat_packet4f<2>(a3); + a3 = vec_splat_packet4f<3>(a3); +} + +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const double *a, + Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3) +{ + a1 = pload(a); + a0 = vec_splat(a1, 0); + a1 = vec_splat(a1, 1); + a3 = pload(a+2); + a2 = vec_splat(a3, 0); + a3 = vec_splat(a3, 1); +} + +template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, Index stride) +{ + int EIGEN_ALIGN16 ai[4]; + ai[0] = from[0*stride]; + ai[1] = from[1*stride]; + ai[2] = from[2*stride]; + ai[3] = from[3*stride]; + return pload(ai); +} + +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) +{ + float EIGEN_ALIGN16 ai[4]; + ai[0] = from[0*stride]; + ai[1] = from[1*stride]; + ai[2] = from[2*stride]; + ai[3] = from[3*stride]; + return pload(ai); +} + +template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) +{ + double EIGEN_ALIGN16 af[2]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + return pload(af); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, Index stride) +{ + int EIGEN_ALIGN16 ai[4]; + pstore((int *)ai, from); + to[0*stride] = ai[0]; + to[1*stride] = ai[1]; + to[2*stride] = ai[2]; + to[3*stride] = ai[3]; +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) +{ + float EIGEN_ALIGN16 ai[4]; + pstore((float *)ai, from); + to[0*stride] = ai[0]; + to[1*stride] = ai[1]; + to[2*stride] = ai[2]; + to[3*stride] = ai[3]; +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) +{ + double EIGEN_ALIGN16 af[2]; + pstore(af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; +} + +template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return (a + b); } +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) +{ + Packet4f c; + c.v4f[0] = a.v4f[0] + b.v4f[0]; + c.v4f[1] = a.v4f[1] + b.v4f[1]; + return c; +} +template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return (a + b); } + +template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return (a - b); } +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) +{ + Packet4f c; + c.v4f[0] = a.v4f[0] - b.v4f[0]; + c.v4f[1] = a.v4f[1] - b.v4f[1]; + return c; +} +template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return (a - b); } + +template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { return (a * b); } +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) +{ + Packet4f c; + c.v4f[0] = a.v4f[0] * b.v4f[0]; + c.v4f[1] = a.v4f[1] * b.v4f[1]; + return c; +} +template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return (a * b); } + +template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& a, const Packet4i& b) { return (a / b); } +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) +{ + Packet4f c; + c.v4f[0] = a.v4f[0] / b.v4f[0]; + c.v4f[1] = a.v4f[1] / b.v4f[1]; + return c; +} +template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return (a / b); } + +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return (-a); } +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) +{ + Packet4f c; + c.v4f[0] = -a.v4f[0]; + c.v4f[1] = -a.v4f[1]; + return c; +} +template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return (-a); } + +template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a, b), c); } +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) +{ + Packet4f res; + res.v4f[0] = vec_madd(a.v4f[0], b.v4f[0], c.v4f[0]); + res.v4f[1] = vec_madd(a.v4f[1], b.v4f[1], c.v4f[1]); + return res; +} +template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); } + +template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { return padd(pset1(a), p4i_COUNTDOWN); } +template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { return padd(pset1(a), p4f_COUNTDOWN); } +template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { return padd(pset1(a), p2d_COUNTDOWN); } + +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { return vec_min(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pmin(a.v4f[0], b.v4f[0]); + res.v4f[1] = pmin(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { return vec_max(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pmax(a.v4f[0], b.v4f[0]); + res.v4f[1] = pmax(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pand(a.v4f[0], b.v4f[0]); + res.v4f[1] = pand(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pand(a.v4f[0], b.v4f[0]); + res.v4f[1] = pand(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { return vec_xor(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pand(a.v4f[0], b.v4f[0]); + res.v4f[1] = pand(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return pand(a, vec_nor(b, b)); } +template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); } +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pandnot(a.v4f[0], b.v4f[0]); + res.v4f[1] = pandnot(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f pround(const Packet4f& a) +{ + Packet4f res; + res.v4f[0] = vec_round(a.v4f[0]); + res.v4f[1] = vec_round(a.v4f[1]); + return res; +} +template<> EIGEN_STRONG_INLINE Packet2d pround(const Packet2d& a) { return vec_round(a); } +template<> EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) +{ + Packet4f res; + res.v4f[0] = vec_ceil(a.v4f[0]); + res.v4f[1] = vec_ceil(a.v4f[1]); + return res; +} +template<> EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) { return vec_ceil(a); } +template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) +{ + Packet4f res; + res.v4f[0] = vec_floor(a.v4f[0]); + res.v4f[1] = vec_floor(a.v4f[1]); + return res; +} +template<> EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) { return vec_floor(a); } + +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { return pload(from); } +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { return pload(from); } +template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { return pload(from); } + + +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) +{ + Packet4i p = pload(from); + return vec_perm(p, p, p16uc_DUPLICATE32_HI); +} + +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) +{ + Packet4f p = pload(from); + p.v4f[1] = vec_splat(p.v4f[0], 1); + p.v4f[0] = vec_splat(p.v4f[0], 0); + return p; +} + +template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) +{ + Packet2d p = pload(from); + return vec_perm(p, p, p16uc_PSET64_HI); +} + +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { pstore(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { pstore(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { pstore(to, from); } + +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { EIGEN_ZVECTOR_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { EIGEN_ZVECTOR_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { EIGEN_ZVECTOR_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; pstore(x, a); return x[0]; } +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float EIGEN_ALIGN16 x[2]; vec_st2f(a.v4f[0], &x[0]); return x[0]; } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { double EIGEN_ALIGN16 x[2]; pstore(x, a); return x[0]; } + +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE32)); +} + +template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE64)); +} + +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) +{ + Packet4f rev; + rev.v4f[0] = preverse(a.v4f[1]); + rev.v4f[1] = preverse(a.v4f[0]); + return rev; +} + +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) +{ + Packet4f res; + res.v4f[0] = pabs(a.v4f[0]); + res.v4f[1] = pabs(a.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) +{ + Packet4i b, sum; + b = vec_sld(a, a, 8); + sum = padd(a, b); + b = vec_sld(sum, sum, 4); + sum = padd(sum, b); + return pfirst(sum); +} + +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) +{ + Packet2d b, sum; + b = reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)); + sum = padd(a, b); + return pfirst(sum); +} +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) +{ + Packet2d sum; + sum = padd(a.v4f[0], a.v4f[1]); + double first = predux(sum); + return static_cast(first); +} + +template<> EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) +{ + Packet4i v[4], sum[4]; + + // It's easier and faster to transpose then add as columns + // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation + // Do the transpose, first set of moves + v[0] = vec_mergeh(vecs[0], vecs[2]); + v[1] = vec_mergel(vecs[0], vecs[2]); + v[2] = vec_mergeh(vecs[1], vecs[3]); + v[3] = vec_mergel(vecs[1], vecs[3]); + // Get the resulting vectors + sum[0] = vec_mergeh(v[0], v[2]); + sum[1] = vec_mergel(v[0], v[2]); + sum[2] = vec_mergeh(v[1], v[3]); + sum[3] = vec_mergel(v[1], v[3]); + + // Now do the summation: + // Lines 0+1 + sum[0] = padd(sum[0], sum[1]); + // Lines 2+3 + sum[1] = padd(sum[2], sum[3]); + // Add the results + sum[0] = padd(sum[0], sum[1]); + + return sum[0]; +} + +template<> EIGEN_STRONG_INLINE Packet2d preduxp(const Packet2d* vecs) +{ + Packet2d v[2], sum; + v[0] = padd(vecs[0], reinterpret_cast(vec_sld(reinterpret_cast(vecs[0]), reinterpret_cast(vecs[0]), 8))); + v[1] = padd(vecs[1], reinterpret_cast(vec_sld(reinterpret_cast(vecs[1]), reinterpret_cast(vecs[1]), 8))); + + sum = reinterpret_cast(vec_sld(reinterpret_cast(v[0]), reinterpret_cast(v[1]), 8)); + + return sum; +} + +template<> EIGEN_STRONG_INLINE Packet4f preduxp(const Packet4f* vecs) +{ + PacketBlock transpose; + transpose.packet[0] = vecs[0]; + transpose.packet[1] = vecs[1]; + transpose.packet[2] = vecs[2]; + transpose.packet[3] = vecs[3]; + ptranspose(transpose); + + Packet4f sum = padd(transpose.packet[0], transpose.packet[1]); + sum = padd(sum, transpose.packet[2]); + sum = padd(sum, transpose.packet[3]); + return sum; +} + +// Other reduction functions: +// mul +template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) +{ + EIGEN_ALIGN16 int aux[4]; + pstore(aux, a); + return aux[0] * aux[1] * aux[2] * aux[3]; +} + +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) +{ + return pfirst(pmul(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) +{ + // Return predux_mul of the subvectors product + return static_cast(pfirst(predux_mul(pmul(a.v4f[0], a.v4f[1])))); +} + +// min +template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) +{ + Packet4i b, res; + b = pmin(a, vec_sld(a, a, 8)); + res = pmin(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) +{ + return pfirst(pmin(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) +{ + Packet2d b, res; + b = pmin(a.v4f[0], a.v4f[1]); + res = pmin(b, reinterpret_cast(vec_sld(reinterpret_cast(b), reinterpret_cast(b), 8))); + return static_cast(pfirst(res)); +} + +// max +template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) +{ + Packet4i b, res; + b = pmax(a, vec_sld(a, a, 8)); + res = pmax(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +// max +template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) +{ + return pfirst(pmax(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) +{ + Packet2d b, res; + b = pmax(a.v4f[0], a.v4f[1]); + res = pmax(b, reinterpret_cast(vec_sld(reinterpret_cast(b), reinterpret_cast(b), 8))); + return static_cast(pfirst(res)); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet4i t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]); + Packet4i t1 = vec_mergel(kernel.packet[0], kernel.packet[2]); + Packet4i t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]); + Packet4i t3 = vec_mergel(kernel.packet[1], kernel.packet[3]); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet2d t0 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_HI); + Packet2d t1 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_LO); + kernel.packet[0] = t0; + kernel.packet[1] = t1; +} + +/* Split the Packet4f PacketBlock into 4 Packet2d PacketBlocks and transpose each one + */ +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + PacketBlock t0,t1,t2,t3; + // copy top-left 2x2 Packet2d block + t0.packet[0] = kernel.packet[0].v4f[0]; + t0.packet[1] = kernel.packet[1].v4f[0]; + + // copy top-right 2x2 Packet2d block + t1.packet[0] = kernel.packet[0].v4f[1]; + t1.packet[1] = kernel.packet[1].v4f[1]; + + // copy bottom-left 2x2 Packet2d block + t2.packet[0] = kernel.packet[2].v4f[0]; + t2.packet[1] = kernel.packet[3].v4f[0]; + + // copy bottom-right 2x2 Packet2d block + t3.packet[0] = kernel.packet[2].v4f[1]; + t3.packet[1] = kernel.packet[3].v4f[1]; + + // Transpose all 2x2 blocks + ptranspose(t0); + ptranspose(t1); + ptranspose(t2); + ptranspose(t3); + + // Copy back transposed blocks, but exchange t1 and t2 due to transposition + kernel.packet[0].v4f[0] = t0.packet[0]; + kernel.packet[0].v4f[1] = t2.packet[0]; + kernel.packet[1].v4f[0] = t0.packet[1]; + kernel.packet[1].v4f[1] = t2.packet[1]; + kernel.packet[2].v4f[0] = t1.packet[0]; + kernel.packet[2].v4f[1] = t3.packet[0]; + kernel.packet[3].v4f[0] = t1.packet[1]; + kernel.packet[3].v4f[1] = t3.packet[1]; +} + +template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) { + Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] }; + Packet4ui mask = vec_cmpeq(select, reinterpret_cast(p4i_ONE)); + return vec_sel(elsePacket, thenPacket, mask); +} + +template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) { + Packet2ul select_hi = { ifPacket.select[0], ifPacket.select[1] }; + Packet2ul select_lo = { ifPacket.select[2], ifPacket.select[3] }; + Packet2ul mask_hi = vec_cmpeq(select_hi, reinterpret_cast(p2l_ONE)); + Packet2ul mask_lo = vec_cmpeq(select_lo, reinterpret_cast(p2l_ONE)); + Packet4f result; + result.v4f[0] = vec_sel(elsePacket.v4f[0], thenPacket.v4f[0], mask_hi); + result.v4f[1] = vec_sel(elsePacket.v4f[1], thenPacket.v4f[1], mask_lo); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) { + Packet2ul select = { ifPacket.select[0], ifPacket.select[1] }; + Packet2ul mask = vec_cmpeq(select, reinterpret_cast(p2l_ONE)); + return vec_sel(elsePacket, thenPacket, mask); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_ZVECTOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/AssignmentFunctors.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/AssignmentFunctors.h new file mode 100644 index 000000000..4153b877c --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/AssignmentFunctors.h @@ -0,0 +1,168 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ASSIGNMENT_FUNCTORS_H +#define EIGEN_ASSIGNMENT_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +/** \internal + * \brief Template functor for scalar/packet assignment + * + */ +template struct assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a = b; } + + template + EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const + { internal::pstoret(a,b); } +}; + +// Empty overload for void type (used by PermutationMatrix) +template struct assign_op {}; + +template +struct functor_traits > { + enum { + Cost = NumTraits::ReadCost, + PacketAccess = is_same::value && packet_traits::Vectorizable && packet_traits::Vectorizable + }; +}; + +/** \internal + * \brief Template functor for scalar/packet assignment with addition + * + */ +template struct add_assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(add_assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a += b; } + + template + EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const + { internal::pstoret(a,internal::padd(internal::ploadt(a),b)); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::ReadCost + NumTraits::AddCost, + PacketAccess = is_same::value && packet_traits::HasAdd + }; +}; + +/** \internal + * \brief Template functor for scalar/packet assignment with subtraction + * + */ +template struct sub_assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(sub_assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a -= b; } + + template + EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const + { internal::pstoret(a,internal::psub(internal::ploadt(a),b)); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::ReadCost + NumTraits::AddCost, + PacketAccess = is_same::value && packet_traits::HasSub + }; +}; + +/** \internal + * \brief Template functor for scalar/packet assignment with multiplication + * + */ +template +struct mul_assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(mul_assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a *= b; } + + template + EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const + { internal::pstoret(a,internal::pmul(internal::ploadt(a),b)); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::ReadCost + NumTraits::MulCost, + PacketAccess = is_same::value && packet_traits::HasMul + }; +}; + +/** \internal + * \brief Template functor for scalar/packet assignment with diviving + * + */ +template struct div_assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(div_assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a /= b; } + + template + EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const + { internal::pstoret(a,internal::pdiv(internal::ploadt(a),b)); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::ReadCost + NumTraits::MulCost, + PacketAccess = is_same::value && packet_traits::HasDiv + }; +}; + +/** \internal + * \brief Template functor for scalar/packet assignment with swapping + * + * It works as follow. For a non-vectorized evaluation loop, we have: + * for(i) func(A.coeffRef(i), B.coeff(i)); + * where B is a SwapWrapper expression. The trick is to make SwapWrapper::coeff behaves like a non-const coeffRef. + * Actually, SwapWrapper might not even be needed since even if B is a plain expression, since it has to be writable + * B.coeff already returns a const reference to the underlying scalar value. + * + * The case of a vectorized loop is more tricky: + * for(i,j) func.assignPacket(&A.coeffRef(i,j), B.packet(i,j)); + * Here, B must be a SwapWrapper whose packet function actually returns a proxy object holding a Scalar*, + * the actual alignment and Packet type. + * + */ +template struct swap_assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(swap_assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const + { +#ifdef __CUDACC__ + // FIXME is there some kind of cuda::swap? + Scalar t=b; const_cast(b)=a; a=t; +#else + using std::swap; + swap(a,const_cast(b)); +#endif + } +}; +template +struct functor_traits > { + enum { + Cost = 3 * NumTraits::ReadCost, + PacketAccess = packet_traits::Vectorizable + }; +}; + +} // namespace internal + +} // namespace Eigen + +#endif // EIGEN_ASSIGNMENT_FUNCTORS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/BinaryFunctors.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/BinaryFunctors.h new file mode 100644 index 000000000..3eae6b8ca --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/BinaryFunctors.h @@ -0,0 +1,475 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BINARY_FUNCTORS_H +#define EIGEN_BINARY_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +//---------- associative binary functors ---------- + +template +struct binary_op_base +{ + typedef Arg1 first_argument_type; + typedef Arg2 second_argument_type; +}; + +/** \internal + * \brief Template functor to compute the sum of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, DenseBase::sum() + */ +template +struct scalar_sum_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op) +#else + scalar_sum_op() { + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a + b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::padd(a,b); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const + { return internal::predux(a); } +}; +template +struct functor_traits > { + enum { + Cost = (NumTraits::AddCost+NumTraits::AddCost)/2, // rough estimate! + PacketAccess = is_same::value && packet_traits::HasAdd && packet_traits::HasAdd + // TODO vectorize mixed sum + }; +}; + +/** \internal + * \brief Template specialization to deprecate the summation of boolean expressions. + * This is required to solve Bug 426. + * \sa DenseBase::count(), DenseBase::any(), ArrayBase::cast(), MatrixBase::cast() + */ +template<> struct scalar_sum_op : scalar_sum_op { + EIGEN_DEPRECATED + scalar_sum_op() {} +}; + + +/** \internal + * \brief Template functor to compute the product of two scalars + * + * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux() + */ +template +struct scalar_product_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op) +#else + scalar_product_op() { + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pmul(a,b); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const + { return internal::predux_mul(a); } +}; +template +struct functor_traits > { + enum { + Cost = (NumTraits::MulCost + NumTraits::MulCost)/2, // rough estimate! + PacketAccess = is_same::value && packet_traits::HasMul && packet_traits::HasMul + // TODO vectorize mixed product + }; +}; + +/** \internal + * \brief Template functor to compute the conjugate product of two scalars + * + * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y) + */ +template +struct scalar_conj_product_op : binary_op_base +{ + + enum { + Conj = NumTraits::IsComplex + }; + + typedef typename ScalarBinaryOpTraits::ReturnType result_type; + + EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const + { return conj_helper().pmul(a,b); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return conj_helper().pmul(a,b); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::MulCost, + PacketAccess = internal::is_same::value && packet_traits::HasMul + }; +}; + +/** \internal + * \brief Template functor to compute the min of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff() + */ +template +struct scalar_min_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return numext::mini(a, b); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pmin(a,b); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const + { return internal::predux_min(a); } +}; +template +struct functor_traits > { + enum { + Cost = (NumTraits::AddCost+NumTraits::AddCost)/2, + PacketAccess = internal::is_same::value && packet_traits::HasMin + }; +}; + +/** \internal + * \brief Template functor to compute the max of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff() + */ +template +struct scalar_max_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return numext::maxi(a, b); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pmax(a,b); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const + { return internal::predux_max(a); } +}; +template +struct functor_traits > { + enum { + Cost = (NumTraits::AddCost+NumTraits::AddCost)/2, + PacketAccess = internal::is_same::value && packet_traits::HasMax + }; +}; + +/** \internal + * \brief Template functors for comparison of two scalars + * \todo Implement packet-comparisons + */ +template struct scalar_cmp_op; + +template +struct functor_traits > { + enum { + Cost = (NumTraits::AddCost+NumTraits::AddCost)/2, + PacketAccess = false + }; +}; + +template +struct result_of(LhsScalar,RhsScalar)> { + typedef bool type; +}; + + +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a==b;} +}; +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a<=b;} +}; +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a>b;} +}; +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a>=b;} +}; +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return !(a<=b || b<=a);} +}; +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a!=b;} +}; + + +/** \internal + * \brief Template functor to compute the hypot of two \b positive \b and \b real scalars + * + * \sa MatrixBase::stableNorm(), class Redux + */ +template +struct scalar_hypot_op : binary_op_base +{ + EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op) + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar &x, const Scalar &y) const + { + // This functor is used by hypotNorm only for which it is faster to first apply abs + // on all coefficients prior to reduction through hypot. + // This way we avoid calling abs on positive and real entries, and this also permits + // to seamlessly handle complexes. Otherwise we would have to handle both real and complexes + // through the same functor... + return internal::positive_real_hypot(x,y); + } +}; +template +struct functor_traits > { + enum + { + Cost = 3 * NumTraits::AddCost + + 2 * NumTraits::MulCost + + 2 * scalar_div_cost::value, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to compute the pow of two scalars + */ +template +struct scalar_pow_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_pow_op) +#else + scalar_pow_op() { + typedef Scalar LhsScalar; + typedef Exponent RhsScalar; + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + EIGEN_DEVICE_FUNC + inline result_type operator() (const Scalar& a, const Exponent& b) const { return numext::pow(a, b); } +}; +template +struct functor_traits > { + enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; +}; + + + +//---------- non associative binary functors ---------- + +/** \internal + * \brief Template functor to compute the difference of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::operator- + */ +template +struct scalar_difference_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op) +#else + scalar_difference_op() { + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a - b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::psub(a,b); } +}; +template +struct functor_traits > { + enum { + Cost = (NumTraits::AddCost+NumTraits::AddCost)/2, + PacketAccess = is_same::value && packet_traits::HasSub && packet_traits::HasSub + }; +}; + +/** \internal + * \brief Template functor to compute the quotient of two scalars + * + * \sa class CwiseBinaryOp, Cwise::operator/() + */ +template +struct scalar_quotient_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op) +#else + scalar_quotient_op() { + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pdiv(a,b); } +}; +template +struct functor_traits > { + typedef typename scalar_quotient_op::result_type result_type; + enum { + PacketAccess = is_same::value && packet_traits::HasDiv && packet_traits::HasDiv, + Cost = scalar_div_cost::value + }; +}; + + + +/** \internal + * \brief Template functor to compute the and of two booleans + * + * \sa class CwiseBinaryOp, ArrayBase::operator&& + */ +struct scalar_boolean_and_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_and_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a && b; } +}; +template<> struct functor_traits { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to compute the or of two booleans + * + * \sa class CwiseBinaryOp, ArrayBase::operator|| + */ +struct scalar_boolean_or_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_or_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a || b; } +}; +template<> struct functor_traits { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to compute the xor of two booleans + * + * \sa class CwiseBinaryOp, ArrayBase::operator^ + */ +struct scalar_boolean_xor_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_xor_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a ^ b; } +}; +template<> struct functor_traits { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + + + +//---------- binary functors bound to a constant, thus appearing as a unary functor ---------- + +// The following two classes permits to turn any binary functor into a unary one with one argument bound to a constant value. +// They are analogues to std::binder1st/binder2nd but with the following differences: +// - they are compatible with packetOp +// - they are portable across C++ versions (the std::binder* are deprecated in C++11) +template struct bind1st_op : BinaryOp { + + typedef typename BinaryOp::first_argument_type first_argument_type; + typedef typename BinaryOp::second_argument_type second_argument_type; + typedef typename BinaryOp::result_type result_type; + + bind1st_op(const first_argument_type &val) : m_value(val) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const second_argument_type& b) const { return BinaryOp::operator()(m_value,b); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& b) const + { return BinaryOp::packetOp(internal::pset1(m_value), b); } + + first_argument_type m_value; +}; +template struct functor_traits > : functor_traits {}; + + +template struct bind2nd_op : BinaryOp { + + typedef typename BinaryOp::first_argument_type first_argument_type; + typedef typename BinaryOp::second_argument_type second_argument_type; + typedef typename BinaryOp::result_type result_type; + + bind2nd_op(const second_argument_type &val) : m_value(val) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const first_argument_type& a) const { return BinaryOp::operator()(a,m_value); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return BinaryOp::packetOp(a,internal::pset1(m_value)); } + + second_argument_type m_value; +}; +template struct functor_traits > : functor_traits {}; + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BINARY_FUNCTORS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/NullaryFunctors.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/NullaryFunctors.h new file mode 100644 index 000000000..b03be0269 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/NullaryFunctors.h @@ -0,0 +1,188 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NULLARY_FUNCTORS_H +#define EIGEN_NULLARY_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +template +struct scalar_constant_op { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_constant_op(const scalar_constant_op& other) : m_other(other.m_other) { } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_constant_op(const Scalar& other) : m_other(other) { } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() () const { return m_other; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const PacketType packetOp() const { return internal::pset1(m_other); } + const Scalar m_other; +}; +template +struct functor_traits > +{ enum { Cost = 0 /* as the constant value should be loaded in register only once for the whole expression */, + PacketAccess = packet_traits::Vectorizable, IsRepeatable = true }; }; + +template struct scalar_identity_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_identity_op) + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType row, IndexType col) const { return row==col ? Scalar(1) : Scalar(0); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false, IsRepeatable = true }; }; + +template struct linspaced_op_impl; + +template +struct linspaced_op_impl +{ + linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) : + m_low(low), m_high(high), m_size1(num_steps==1 ? 1 : num_steps-1), m_step(num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1)), + m_flip(numext::abs(high) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const { + typedef typename NumTraits::Real RealScalar; + if(m_flip) + return (i==0)? m_low : (m_high - RealScalar(m_size1-i)*m_step); + else + return (i==m_size1)? m_high : (m_low + RealScalar(i)*m_step); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const + { + // Principle: + // [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) ) + if(m_flip) + { + Packet pi = plset(Scalar(i-m_size1)); + Packet res = padd(pset1(m_high), pmul(pset1(m_step), pi)); + if(i==0) + res = pinsertfirst(res, m_low); + return res; + } + else + { + Packet pi = plset(Scalar(i)); + Packet res = padd(pset1(m_low), pmul(pset1(m_step), pi)); + if(i==m_size1-unpacket_traits::size+1) + res = pinsertlast(res, m_high); + return res; + } + } + + const Scalar m_low; + const Scalar m_high; + const Index m_size1; + const Scalar m_step; + const bool m_flip; +}; + +template +struct linspaced_op_impl +{ + linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) : + m_low(low), + m_multiplier((high-low)/convert_index(num_steps<=1 ? 1 : num_steps-1)), + m_divisor(convert_index((high>=low?num_steps:-num_steps)+(high-low))/((numext::abs(high-low)+1)==0?1:(numext::abs(high-low)+1))), + m_use_divisor(num_steps>1 && (numext::abs(high-low)+1) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Scalar operator() (IndexType i) const + { + if(m_use_divisor) return m_low + convert_index(i)/m_divisor; + else return m_low + convert_index(i)*m_multiplier; + } + + const Scalar m_low; + const Scalar m_multiplier; + const Scalar m_divisor; + const bool m_use_divisor; +}; + +// ----- Linspace functor ---------------------------------------------------------------- + +// Forward declaration (we default to random access which does not really give +// us a speed gain when using packet access but it allows to use the functor in +// nested expressions). +template struct linspaced_op; +template struct functor_traits< linspaced_op > +{ + enum + { + Cost = 1, + PacketAccess = (!NumTraits::IsInteger) && packet_traits::HasSetLinear && packet_traits::HasBlend, + /*&& ((!NumTraits::IsInteger) || packet_traits::HasDiv),*/ // <- vectorization for integer is currently disabled + IsRepeatable = true + }; +}; +template struct linspaced_op +{ + linspaced_op(const Scalar& low, const Scalar& high, Index num_steps) + : impl((num_steps==1 ? high : low),high,num_steps) + {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const { return impl(i); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const { return impl.packetOp(i); } + + // This proxy object handles the actual required temporaries and the different + // implementations (integer vs. floating point). + const linspaced_op_impl::IsInteger> impl; +}; + +// Linear access is automatically determined from the operator() prototypes available for the given functor. +// If it exposes an operator()(i,j), then we assume the i and j coefficients are required independently +// and linear access is not possible. In all other cases, linear access is enabled. +// Users should not have to deal with this structure. +template struct functor_has_linear_access { enum { ret = !has_binary_operator::value }; }; + +// For unreliable compilers, let's specialize the has_*ary_operator +// helpers so that at least built-in nullary functors work fine. +#if !( (EIGEN_COMP_MSVC>1600) || (EIGEN_GNUC_AT_LEAST(4,8)) || (EIGEN_COMP_ICC>=1600)) +template +struct has_nullary_operator,IndexType> { enum { value = 1}; }; +template +struct has_unary_operator,IndexType> { enum { value = 0}; }; +template +struct has_binary_operator,IndexType> { enum { value = 0}; }; + +template +struct has_nullary_operator,IndexType> { enum { value = 0}; }; +template +struct has_unary_operator,IndexType> { enum { value = 0}; }; +template +struct has_binary_operator,IndexType> { enum { value = 1}; }; + +template +struct has_nullary_operator,IndexType> { enum { value = 0}; }; +template +struct has_unary_operator,IndexType> { enum { value = 1}; }; +template +struct has_binary_operator,IndexType> { enum { value = 0}; }; + +template +struct has_nullary_operator,IndexType> { enum { value = 1}; }; +template +struct has_unary_operator,IndexType> { enum { value = 0}; }; +template +struct has_binary_operator,IndexType> { enum { value = 0}; }; +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_NULLARY_FUNCTORS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/StlFunctors.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/StlFunctors.h new file mode 100644 index 000000000..9c1d75850 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/StlFunctors.h @@ -0,0 +1,136 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STL_FUNCTORS_H +#define EIGEN_STL_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +// default functor traits for STL functors: + +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +#if (__cplusplus < 201103L) && (EIGEN_COMP_MSVC <= 1900) +// std::binder* are deprecated since c++11 and will be removed in c++17 +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost, PacketAccess = false }; }; +#endif + +#if (__cplusplus < 201703L) && (EIGEN_COMP_MSVC < 1910) +// std::unary_negate is deprecated since c++17 and will be removed in c++20 +template +struct functor_traits > +{ enum { Cost = 1 + functor_traits::Cost, PacketAccess = false }; }; + +// std::binary_negate is deprecated since c++17 and will be removed in c++20 +template +struct functor_traits > +{ enum { Cost = 1 + functor_traits::Cost, PacketAccess = false }; }; +#endif + +#ifdef EIGEN_STDEXT_SUPPORT + +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost + functor_traits::Cost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost + functor_traits::Cost + functor_traits::Cost, PacketAccess = false }; }; + +#endif // EIGEN_STDEXT_SUPPORT + +// allow to add new functors and specializations of functor_traits from outside Eigen. +// this macro is really needed because functor_traits must be specialized after it is declared but before it is used... +#ifdef EIGEN_FUNCTORS_PLUGIN +#include EIGEN_FUNCTORS_PLUGIN +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_STL_FUNCTORS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/TernaryFunctors.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/TernaryFunctors.h new file mode 100644 index 000000000..b254e96c6 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/TernaryFunctors.h @@ -0,0 +1,25 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Eugene Brevdo +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TERNARY_FUNCTORS_H +#define EIGEN_TERNARY_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +//---------- associative ternary functors ---------- + + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TERNARY_FUNCTORS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/UnaryFunctors.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/UnaryFunctors.h new file mode 100644 index 000000000..2e6a00ffd --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/functors/UnaryFunctors.h @@ -0,0 +1,792 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_UNARY_FUNCTORS_H +#define EIGEN_UNARY_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +/** \internal + * \brief Template functor to compute the opposite of a scalar + * + * \sa class CwiseUnaryOp, MatrixBase::operator- + */ +template struct scalar_opposite_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_opposite_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pnegate(a); } +}; +template +struct functor_traits > +{ enum { + Cost = NumTraits::AddCost, + PacketAccess = packet_traits::HasNegate }; +}; + +/** \internal + * \brief Template functor to compute the absolute value of a scalar + * + * \sa class CwiseUnaryOp, Cwise::abs + */ +template struct scalar_abs_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs(a); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pabs(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::AddCost, + PacketAccess = packet_traits::HasAbs + }; +}; + +/** \internal + * \brief Template functor to compute the score of a scalar, to chose a pivot + * + * \sa class CwiseUnaryOp + */ +template struct scalar_score_coeff_op : scalar_abs_op +{ + typedef void Score_is_abs; +}; +template +struct functor_traits > : functor_traits > {}; + +/* Avoid recomputing abs when we know the score and they are the same. Not a true Eigen functor. */ +template struct abs_knowing_score +{ + EIGEN_EMPTY_STRUCT_CTOR(abs_knowing_score) + typedef typename NumTraits::Real result_type; + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a, const Score&) const { return numext::abs(a); } +}; +template struct abs_knowing_score::Score_is_abs> +{ + EIGEN_EMPTY_STRUCT_CTOR(abs_knowing_score) + typedef typename NumTraits::Real result_type; + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scal&, const result_type& a) const { return a; } +}; + +/** \internal + * \brief Template functor to compute the squared absolute value of a scalar + * + * \sa class CwiseUnaryOp, Cwise::abs2 + */ +template struct scalar_abs2_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs2(a); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pmul(a,a); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasAbs2 }; }; + +/** \internal + * \brief Template functor to compute the conjugate of a complex value + * + * \sa class CwiseUnaryOp, MatrixBase::conjugate() + */ +template struct scalar_conjugate_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op) + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { using numext::conj; return conj(a); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::IsComplex ? NumTraits::AddCost : 0, + PacketAccess = packet_traits::HasConj + }; +}; + +/** \internal + * \brief Template functor to compute the phase angle of a complex + * + * \sa class CwiseUnaryOp, Cwise::arg + */ +template struct scalar_arg_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_arg_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { using numext::arg; return arg(a); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::parg(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::IsComplex ? 5 * NumTraits::MulCost : NumTraits::AddCost, + PacketAccess = packet_traits::HasArg + }; +}; +/** \internal + * \brief Template functor to cast a scalar to another type + * + * \sa class CwiseUnaryOp, MatrixBase::cast() + */ +template +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef NewType result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return cast(a); } +}; +template +struct functor_traits > +{ enum { Cost = is_same::value ? 0 : NumTraits::AddCost, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the real part of a complex + * + * \sa class CwiseUnaryOp, MatrixBase::real() + */ +template +struct scalar_real_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::real(a); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the imaginary part of a complex + * + * \sa class CwiseUnaryOp, MatrixBase::imag() + */ +template +struct scalar_imag_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::imag(a); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the real part of a complex as a reference + * + * \sa class CwiseUnaryOp, MatrixBase::real() + */ +template +struct scalar_real_ref_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::real_ref(*const_cast(&a)); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the imaginary part of a complex as a reference + * + * \sa class CwiseUnaryOp, MatrixBase::imag() + */ +template +struct scalar_imag_ref_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::imag_ref(*const_cast(&a)); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * + * \brief Template functor to compute the exponential of a scalar + * + * \sa class CwiseUnaryOp, Cwise::exp() + */ +template struct scalar_exp_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::exp(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pexp(a); } +}; +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasExp, + // The following numbers are based on the AVX implementation. +#ifdef EIGEN_VECTORIZE_FMA + // Haswell can issue 2 add/mul/madd per cycle. + Cost = + (sizeof(Scalar) == 4 + // float: 8 pmadd, 4 pmul, 2 padd/psub, 6 other + ? (8 * NumTraits::AddCost + 6 * NumTraits::MulCost) + // double: 7 pmadd, 5 pmul, 3 padd/psub, 1 div, 13 other + : (14 * NumTraits::AddCost + + 6 * NumTraits::MulCost + + scalar_div_cost::HasDiv>::value)) +#else + Cost = + (sizeof(Scalar) == 4 + // float: 7 pmadd, 6 pmul, 4 padd/psub, 10 other + ? (21 * NumTraits::AddCost + 13 * NumTraits::MulCost) + // double: 7 pmadd, 5 pmul, 3 padd/psub, 1 div, 13 other + : (23 * NumTraits::AddCost + + 12 * NumTraits::MulCost + + scalar_div_cost::HasDiv>::value)) +#endif + }; +}; + +/** \internal + * + * \brief Template functor to compute the logarithm of a scalar + * + * \sa class CwiseUnaryOp, ArrayBase::log() + */ +template struct scalar_log_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::log(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog(a); } +}; +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasLog, + Cost = + (PacketAccess + // The following numbers are based on the AVX implementation. +#ifdef EIGEN_VECTORIZE_FMA + // 8 pmadd, 6 pmul, 8 padd/psub, 16 other, can issue 2 add/mul/madd per cycle. + ? (20 * NumTraits::AddCost + 7 * NumTraits::MulCost) +#else + // 8 pmadd, 6 pmul, 8 padd/psub, 20 other + ? (36 * NumTraits::AddCost + 14 * NumTraits::MulCost) +#endif + // Measured cost of std::log. + : sizeof(Scalar)==4 ? 40 : 85) + }; +}; + +/** \internal + * + * \brief Template functor to compute the logarithm of 1 plus a scalar value + * + * \sa class CwiseUnaryOp, ArrayBase::log1p() + */ +template struct scalar_log1p_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_log1p_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::log1p(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog1p(a); } +}; +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasLog1p, + Cost = functor_traits >::Cost // TODO measure cost of log1p + }; +}; + +/** \internal + * + * \brief Template functor to compute the base-10 logarithm of a scalar + * + * \sa class CwiseUnaryOp, Cwise::log10() + */ +template struct scalar_log10_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_log10_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { EIGEN_USING_STD_MATH(log10) return log10(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog10(a); } +}; +template +struct functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasLog10 }; }; + +/** \internal + * \brief Template functor to compute the square root of a scalar + * \sa class CwiseUnaryOp, Cwise::sqrt() + */ +template struct scalar_sqrt_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sqrt(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); } +}; +template +struct functor_traits > { + enum { +#if EIGEN_FAST_MATH + // The following numbers are based on the AVX implementation. + Cost = (sizeof(Scalar) == 8 ? 28 + // 4 pmul, 1 pmadd, 3 other + : (3 * NumTraits::AddCost + + 5 * NumTraits::MulCost)), +#else + // The following numbers are based on min VSQRT throughput on Haswell. + Cost = (sizeof(Scalar) == 8 ? 28 : 14), +#endif + PacketAccess = packet_traits::HasSqrt + }; +}; + +/** \internal + * \brief Template functor to compute the reciprocal square root of a scalar + * \sa class CwiseUnaryOp, Cwise::rsqrt() + */ +template struct scalar_rsqrt_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_rsqrt_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return Scalar(1)/numext::sqrt(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::prsqrt(a); } +}; + +template +struct functor_traits > +{ enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasRsqrt + }; +}; + +/** \internal + * \brief Template functor to compute the cosine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::cos() + */ +template struct scalar_cos_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op) + EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return numext::cos(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pcos(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasCos + }; +}; + +/** \internal + * \brief Template functor to compute the sine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::sin() + */ +template struct scalar_sin_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sin(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psin(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasSin + }; +}; + + +/** \internal + * \brief Template functor to compute the tan of a scalar + * \sa class CwiseUnaryOp, ArrayBase::tan() + */ +template struct scalar_tan_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::tan(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::ptan(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasTan + }; +}; + +/** \internal + * \brief Template functor to compute the arc cosine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::acos() + */ +template struct scalar_acos_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::acos(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pacos(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasACos + }; +}; + +/** \internal + * \brief Template functor to compute the arc sine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::asin() + */ +template struct scalar_asin_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::asin(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pasin(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasASin + }; +}; + + +/** \internal + * \brief Template functor to compute the atan of a scalar + * \sa class CwiseUnaryOp, ArrayBase::atan() + */ +template struct scalar_atan_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_atan_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::atan(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::patan(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasATan + }; +}; + +/** \internal + * \brief Template functor to compute the tanh of a scalar + * \sa class CwiseUnaryOp, ArrayBase::tanh() + */ +template +struct scalar_tanh_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_tanh_op) + EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::tanh(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& x) const { return ptanh(x); } +}; + +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasTanh, + Cost = ( (EIGEN_FAST_MATH && is_same::value) +// The following numbers are based on the AVX implementation, +#ifdef EIGEN_VECTORIZE_FMA + // Haswell can issue 2 add/mul/madd per cycle. + // 9 pmadd, 2 pmul, 1 div, 2 other + ? (2 * NumTraits::AddCost + + 6 * NumTraits::MulCost + + scalar_div_cost::HasDiv>::value) +#else + ? (11 * NumTraits::AddCost + + 11 * NumTraits::MulCost + + scalar_div_cost::HasDiv>::value) +#endif + // This number assumes a naive implementation of tanh + : (6 * NumTraits::AddCost + + 3 * NumTraits::MulCost + + 2 * scalar_div_cost::HasDiv>::value + + functor_traits >::Cost)) + }; +}; + +/** \internal + * \brief Template functor to compute the sinh of a scalar + * \sa class CwiseUnaryOp, ArrayBase::sinh() + */ +template struct scalar_sinh_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sinh_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sinh(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psinh(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasSinh + }; +}; + +/** \internal + * \brief Template functor to compute the cosh of a scalar + * \sa class CwiseUnaryOp, ArrayBase::cosh() + */ +template struct scalar_cosh_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cosh_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::cosh(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pcosh(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasCosh + }; +}; + +/** \internal + * \brief Template functor to compute the inverse of a scalar + * \sa class CwiseUnaryOp, Cwise::inverse() + */ +template +struct scalar_inverse_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_inverse_op) + EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; } + template + EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const + { return internal::pdiv(pset1(Scalar(1)),a); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasDiv }; }; + +/** \internal + * \brief Template functor to compute the square of a scalar + * \sa class CwiseUnaryOp, Cwise::square() + */ +template +struct scalar_square_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op) + EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a*a; } + template + EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const + { return internal::pmul(a,a); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; + +/** \internal + * \brief Template functor to compute the cube of a scalar + * \sa class CwiseUnaryOp, Cwise::cube() + */ +template +struct scalar_cube_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op) + EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a*a*a; } + template + EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const + { return internal::pmul(a,pmul(a,a)); } +}; +template +struct functor_traits > +{ enum { Cost = 2*NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; + +/** \internal + * \brief Template functor to compute the rounded value of a scalar + * \sa class CwiseUnaryOp, ArrayBase::round() + */ +template struct scalar_round_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_round_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::round(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pround(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = packet_traits::HasRound + }; +}; + +/** \internal + * \brief Template functor to compute the floor of a scalar + * \sa class CwiseUnaryOp, ArrayBase::floor() + */ +template struct scalar_floor_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_floor_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::floor(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pfloor(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = packet_traits::HasFloor + }; +}; + +/** \internal + * \brief Template functor to compute the ceil of a scalar + * \sa class CwiseUnaryOp, ArrayBase::ceil() + */ +template struct scalar_ceil_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_ceil_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::ceil(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pceil(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = packet_traits::HasCeil + }; +}; + +/** \internal + * \brief Template functor to compute whether a scalar is NaN + * \sa class CwiseUnaryOp, ArrayBase::isnan() + */ +template struct scalar_isnan_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_isnan_op) + typedef bool result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return (numext::isnan)(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to check whether a scalar is +/-inf + * \sa class CwiseUnaryOp, ArrayBase::isinf() + */ +template struct scalar_isinf_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_isinf_op) + typedef bool result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return (numext::isinf)(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to check whether a scalar has a finite value + * \sa class CwiseUnaryOp, ArrayBase::isfinite() + */ +template struct scalar_isfinite_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_isfinite_op) + typedef bool result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return (numext::isfinite)(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to compute the logical not of a boolean + * + * \sa class CwiseUnaryOp, ArrayBase::operator! + */ +template struct scalar_boolean_not_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_not_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a) const { return !a; } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to compute the signum of a scalar + * \sa class CwiseUnaryOp, Cwise::sign() + */ +template::IsComplex!=0) > struct scalar_sign_op; +template +struct scalar_sign_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const + { + return Scalar( (a>Scalar(0)) - (a + //EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psign(a); } +}; +template +struct scalar_sign_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const + { + typedef typename NumTraits::Real real_type; + real_type aa = numext::abs(a); + if (aa==real_type(0)) + return Scalar(0); + aa = real_type(1)/aa; + return Scalar(real(a)*aa, imag(a)*aa ); + } + //TODO + //template + //EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psign(a); } +}; +template +struct functor_traits > +{ enum { + Cost = + NumTraits::IsComplex + ? ( 8*NumTraits::MulCost ) // roughly + : ( 3*NumTraits::AddCost), + PacketAccess = packet_traits::HasSign + }; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_FUNCTORS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h new file mode 100644 index 000000000..e3980f6ff --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h @@ -0,0 +1,2156 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERAL_BLOCK_PANEL_H +#define EIGEN_GENERAL_BLOCK_PANEL_H + + +namespace Eigen { + +namespace internal { + +template +class gebp_traits; + + +/** \internal \returns b if a<=0, and returns a otherwise. */ +inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b) +{ + return a<=0 ? b : a; +} + +#if EIGEN_ARCH_i386_OR_x86_64 +const std::ptrdiff_t defaultL1CacheSize = 32*1024; +const std::ptrdiff_t defaultL2CacheSize = 256*1024; +const std::ptrdiff_t defaultL3CacheSize = 2*1024*1024; +#else +const std::ptrdiff_t defaultL1CacheSize = 16*1024; +const std::ptrdiff_t defaultL2CacheSize = 512*1024; +const std::ptrdiff_t defaultL3CacheSize = 512*1024; +#endif + +/** \internal */ +struct CacheSizes { + CacheSizes(): m_l1(-1),m_l2(-1),m_l3(-1) { + int l1CacheSize, l2CacheSize, l3CacheSize; + queryCacheSizes(l1CacheSize, l2CacheSize, l3CacheSize); + m_l1 = manage_caching_sizes_helper(l1CacheSize, defaultL1CacheSize); + m_l2 = manage_caching_sizes_helper(l2CacheSize, defaultL2CacheSize); + m_l3 = manage_caching_sizes_helper(l3CacheSize, defaultL3CacheSize); + } + + std::ptrdiff_t m_l1; + std::ptrdiff_t m_l2; + std::ptrdiff_t m_l3; +}; + + +/** \internal */ +inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3) +{ + static CacheSizes m_cacheSizes; + + if(action==SetAction) + { + // set the cpu cache size and cache all block sizes from a global cache size in byte + eigen_internal_assert(l1!=0 && l2!=0); + m_cacheSizes.m_l1 = *l1; + m_cacheSizes.m_l2 = *l2; + m_cacheSizes.m_l3 = *l3; + } + else if(action==GetAction) + { + eigen_internal_assert(l1!=0 && l2!=0); + *l1 = m_cacheSizes.m_l1; + *l2 = m_cacheSizes.m_l2; + *l3 = m_cacheSizes.m_l3; + } + else + { + eigen_internal_assert(false); + } +} + +/* Helper for computeProductBlockingSizes. + * + * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar, + * this function computes the blocking size parameters along the respective dimensions + * for matrix products and related algorithms. The blocking sizes depends on various + * parameters: + * - the L1 and L2 cache sizes, + * - the register level blocking sizes defined by gebp_traits, + * - the number of scalars that fit into a packet (when vectorization is enabled). + * + * \sa setCpuCacheSizes */ + +template +void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index num_threads = 1) +{ + typedef gebp_traits Traits; + + // Explanations: + // Let's recall that the product algorithms form mc x kc vertical panels A' on the lhs and + // kc x nc blocks B' on the rhs. B' has to fit into L2/L3 cache. Moreover, A' is processed + // per mr x kc horizontal small panels where mr is the blocking size along the m dimension + // at the register level. This small horizontal panel has to stay within L1 cache. + std::ptrdiff_t l1, l2, l3; + manage_caching_sizes(GetAction, &l1, &l2, &l3); + + if (num_threads > 1) { + typedef typename Traits::ResScalar ResScalar; + enum { + kdiv = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)), + ksub = Traits::mr * Traits::nr * sizeof(ResScalar), + kr = 8, + mr = Traits::mr, + nr = Traits::nr + }; + // Increasing k gives us more time to prefetch the content of the "C" + // registers. However once the latency is hidden there is no point in + // increasing the value of k, so we'll cap it at 320 (value determined + // experimentally). + const Index k_cache = (numext::mini)((l1-ksub)/kdiv, 320); + if (k_cache < k) { + k = k_cache - (k_cache % kr); + eigen_internal_assert(k > 0); + } + + const Index n_cache = (l2-l1) / (nr * sizeof(RhsScalar) * k); + const Index n_per_thread = numext::div_ceil(n, num_threads); + if (n_cache <= n_per_thread) { + // Don't exceed the capacity of the l2 cache. + eigen_internal_assert(n_cache >= static_cast(nr)); + n = n_cache - (n_cache % nr); + eigen_internal_assert(n > 0); + } else { + n = (numext::mini)(n, (n_per_thread + nr - 1) - ((n_per_thread + nr - 1) % nr)); + } + + if (l3 > l2) { + // l3 is shared between all cores, so we'll give each thread its own chunk of l3. + const Index m_cache = (l3-l2) / (sizeof(LhsScalar) * k * num_threads); + const Index m_per_thread = numext::div_ceil(m, num_threads); + if(m_cache < m_per_thread && m_cache >= static_cast(mr)) { + m = m_cache - (m_cache % mr); + eigen_internal_assert(m > 0); + } else { + m = (numext::mini)(m, (m_per_thread + mr - 1) - ((m_per_thread + mr - 1) % mr)); + } + } + } + else { + // In unit tests we do not want to use extra large matrices, + // so we reduce the cache size to check the blocking strategy is not flawed +#ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS + l1 = 9*1024; + l2 = 32*1024; + l3 = 512*1024; +#endif + + // Early return for small problems because the computation below are time consuming for small problems. + // Perhaps it would make more sense to consider k*n*m?? + // Note that for very tiny problem, this function should be bypassed anyway + // because we use the coefficient-based implementation for them. + if((numext::maxi)(k,(numext::maxi)(m,n))<48) + return; + + typedef typename Traits::ResScalar ResScalar; + enum { + k_peeling = 8, + k_div = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)), + k_sub = Traits::mr * Traits::nr * sizeof(ResScalar) + }; + + // ---- 1st level of blocking on L1, yields kc ---- + + // Blocking on the third dimension (i.e., k) is chosen so that an horizontal panel + // of size mr x kc of the lhs plus a vertical panel of kc x nr of the rhs both fits within L1 cache. + // We also include a register-level block of the result (mx x nr). + // (In an ideal world only the lhs panel would stay in L1) + // Moreover, kc has to be a multiple of 8 to be compatible with loop peeling, leading to a maximum blocking size of: + const Index max_kc = numext::maxi(((l1-k_sub)/k_div) & (~(k_peeling-1)),1); + const Index old_k = k; + if(k>max_kc) + { + // We are really blocking on the third dimension: + // -> reduce blocking size to make sure the last block is as large as possible + // while keeping the same number of sweeps over the result. + k = (k%max_kc)==0 ? max_kc + : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1))); + + eigen_internal_assert(((old_k/k) == (old_k/max_kc)) && "the number of sweeps has to remain the same"); + } + + // ---- 2nd level of blocking on max(L2,L3), yields nc ---- + + // TODO find a reliable way to get the actual amount of cache per core to use for 2nd level blocking, that is: + // actual_l2 = max(l2, l3/nb_core_sharing_l3) + // The number below is quite conservative: it is better to underestimate the cache size rather than overestimating it) + // For instance, it corresponds to 6MB of L3 shared among 4 cores. + #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS + const Index actual_l2 = l3; + #else + const Index actual_l2 = 1572864; // == 1.5 MB + #endif + + // Here, nc is chosen such that a block of kc x nc of the rhs fit within half of L2. + // The second half is implicitly reserved to access the result and lhs coefficients. + // When k= Index(Traits::nr*sizeof(RhsScalar))*k) + { + // L1 blocking + max_nc = remaining_l1 / (k*sizeof(RhsScalar)); + } + else + { + // L2 blocking + max_nc = (3*actual_l2)/(2*2*max_kc*sizeof(RhsScalar)); + } + // WARNING Below, we assume that Traits::nr is a power of two. + Index nc = numext::mini(actual_l2/(2*k*sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1)); + if(n>nc) + { + // We are really blocking over the columns: + // -> reduce blocking size to make sure the last block is as large as possible + // while keeping the same number of sweeps over the packed lhs. + // Here we allow one more sweep if this gives us a perfect match, thus the commented "-1" + n = (n%nc)==0 ? nc + : (nc - Traits::nr * ((nc/*-1*/-(n%nc))/(Traits::nr*(n/nc+1)))); + } + else if(old_k==k) + { + // So far, no blocking at all, i.e., kc==k, and nc==n. + // In this case, let's perform a blocking over the rows such that the packed lhs data is kept in cache L1/L2 + // TODO: part of this blocking strategy is now implemented within the kernel itself, so the L1-based heuristic here should be obsolete. + Index problem_size = k*n*sizeof(LhsScalar); + Index actual_lm = actual_l2; + Index max_mc = m; + if(problem_size<=1024) + { + // problem is small enough to keep in L1 + // Let's choose m such that lhs's block fit in 1/3 of L1 + actual_lm = l1; + } + else if(l3!=0 && problem_size<=32768) + { + // we have both L2 and L3, and problem is small enough to be kept in L2 + // Let's choose m such that lhs's block fit in 1/3 of L2 + actual_lm = l2; + max_mc = (numext::mini)(576,max_mc); + } + Index mc = (numext::mini)(actual_lm/(3*k*sizeof(LhsScalar)), max_mc); + if (mc > Traits::mr) mc -= mc % Traits::mr; + else if (mc==0) return; + m = (m%mc)==0 ? mc + : (mc - Traits::mr * ((mc/*-1*/-(m%mc))/(Traits::mr*(m/mc+1)))); + } + } +} + +template +inline bool useSpecificBlockingSizes(Index& k, Index& m, Index& n) +{ +#ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES + if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) { + k = numext::mini(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K); + m = numext::mini(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M); + n = numext::mini(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N); + return true; + } +#else + EIGEN_UNUSED_VARIABLE(k) + EIGEN_UNUSED_VARIABLE(m) + EIGEN_UNUSED_VARIABLE(n) +#endif + return false; +} + +/** \brief Computes the blocking parameters for a m x k times k x n matrix product + * + * \param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension. + * \param[in,out] m Input: the number of rows of the left hand side. Output: the blocking size along the same dimension. + * \param[in,out] n Input: the number of columns of the right hand side. Output: the blocking size along the same dimension. + * + * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar, + * this function computes the blocking size parameters along the respective dimensions + * for matrix products and related algorithms. + * + * The blocking size parameters may be evaluated: + * - either by a heuristic based on cache sizes; + * - or using fixed prescribed values (for testing purposes). + * + * \sa setCpuCacheSizes */ + +template +void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1) +{ + if (!useSpecificBlockingSizes(k, m, n)) { + evaluateProductBlockingSizesHeuristic(k, m, n, num_threads); + } +} + +template +inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1) +{ + computeProductBlockingSizes(k, m, n, num_threads); +} + +#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD + #define CJMADD(CJ,A,B,C,T) C = CJ.pmadd(A,B,C); +#else + + // FIXME (a bit overkill maybe ?) + + template struct gebp_madd_selector { + EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/) + { + c = cj.pmadd(a,b,c); + } + }; + + template struct gebp_madd_selector { + EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t) + { + t = b; t = cj.pmul(a,t); c = padd(c,t); + } + }; + + template + EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t) + { + gebp_madd_selector::run(cj,a,b,c,t); + } + + #define CJMADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T); +// #define CJMADD(CJ,A,B,C,T) T = B; T = CJ.pmul(A,T); C = padd(C,T); +#endif + +/* Vectorization logic + * real*real: unpack rhs to constant packets, ... + * + * cd*cd : unpack rhs to (b_r,b_r), (b_i,b_i), mul to get (a_r b_r,a_i b_r) (a_r b_i,a_i b_i), + * storing each res packet into two packets (2x2), + * at the end combine them: swap the second and addsub them + * cf*cf : same but with 2x4 blocks + * cplx*real : unpack rhs to constant packets, ... + * real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual + */ +template +class gebp_traits +{ +public: + typedef _LhsScalar LhsScalar; + typedef _RhsScalar RhsScalar; + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; + + enum { + ConjLhs = _ConjLhs, + ConjRhs = _ConjRhs, + Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable, + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1, + + NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, + + // register block size along the N direction must be 1 or 4 + nr = 4, + + // register block size along the M direction (currently, this one cannot be modified) + default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize, +#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX) + // we assume 16 registers + // See bug 992, if the scalar type is not vectorizable but that EIGEN_HAS_SINGLE_INSTRUCTION_MADD is defined, + // then using 3*LhsPacketSize triggers non-implemented paths in syrk. + mr = Vectorizable ? 3*LhsPacketSize : default_mr, +#else + mr = default_mr, +#endif + + LhsProgress = LhsPacketSize, + RhsProgress = 1 + }; + + typedef typename packet_traits::type _LhsPacket; + typedef typename packet_traits::type _RhsPacket; + typedef typename packet_traits::type _ResPacket; + + typedef typename conditional::type LhsPacket; + typedef typename conditional::type RhsPacket; + typedef typename conditional::type ResPacket; + + typedef ResPacket AccPacket; + + EIGEN_STRONG_INLINE void initAcc(AccPacket& p) + { + p = pset1(ResScalar(0)); + } + + EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) + { + pbroadcast4(b, b0, b1, b2, b3); + } + +// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1) +// { +// pbroadcast2(b, b0, b1); +// } + + template + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const + { + dest = pset1(*b); + } + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const + { + dest = ploadquad(b); + } + + template + EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacketType& dest) const + { + dest = pload(a); + } + + template + EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const + { + dest = ploadu(a); + } + + template + EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const + { + conj_helper cj; + // It would be a lot cleaner to call pmadd all the time. Unfortunately if we + // let gcc allocate the register in which to store the result of the pmul + // (in the case where there is no FMA) gcc fails to figure out how to avoid + // spilling register. +#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD + EIGEN_UNUSED_VARIABLE(tmp); + c = cj.pmadd(a,b,c); +#else + tmp = b; tmp = cj.pmul(a,tmp); c = padd(c,tmp); +#endif + } + + EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const + { + r = pmadd(c,alpha,r); + } + + template + EIGEN_STRONG_INLINE void acc(const ResPacketHalf& c, const ResPacketHalf& alpha, ResPacketHalf& r) const + { + r = pmadd(c,alpha,r); + } + +}; + +template +class gebp_traits, RealScalar, _ConjLhs, false> +{ +public: + typedef std::complex LhsScalar; + typedef RealScalar RhsScalar; + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; + + enum { + ConjLhs = _ConjLhs, + ConjRhs = false, + Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable, + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1, + + NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, + nr = 4, +#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX) + // we assume 16 registers + mr = 3*LhsPacketSize, +#else + mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize, +#endif + + LhsProgress = LhsPacketSize, + RhsProgress = 1 + }; + + typedef typename packet_traits::type _LhsPacket; + typedef typename packet_traits::type _RhsPacket; + typedef typename packet_traits::type _ResPacket; + + typedef typename conditional::type LhsPacket; + typedef typename conditional::type RhsPacket; + typedef typename conditional::type ResPacket; + + typedef ResPacket AccPacket; + + EIGEN_STRONG_INLINE void initAcc(AccPacket& p) + { + p = pset1(ResScalar(0)); + } + + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const + { + dest = pset1(*b); + } + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const + { + dest = pset1(*b); + } + + EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const + { + dest = pload(a); + } + + EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const + { + dest = ploadu(a); + } + + EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) + { + pbroadcast4(b, b0, b1, b2, b3); + } + +// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1) +// { +// pbroadcast2(b, b0, b1); +// } + + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const + { + madd_impl(a, b, c, tmp, typename conditional::type()); + } + + EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const + { +#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD + EIGEN_UNUSED_VARIABLE(tmp); + c.v = pmadd(a.v,b,c.v); +#else + tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp); +#endif + } + + EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const + { + c += a * b; + } + + EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const + { + r = cj.pmadd(c,alpha,r); + } + +protected: + conj_helper cj; +}; + +template +struct DoublePacket +{ + Packet first; + Packet second; +}; + +template +DoublePacket padd(const DoublePacket &a, const DoublePacket &b) +{ + DoublePacket res; + res.first = padd(a.first, b.first); + res.second = padd(a.second,b.second); + return res; +} + +template +const DoublePacket& predux_downto4(const DoublePacket &a) +{ + return a; +} + +template struct unpacket_traits > { typedef DoublePacket half; }; +// template +// DoublePacket pmadd(const DoublePacket &a, const DoublePacket &b) +// { +// DoublePacket res; +// res.first = padd(a.first, b.first); +// res.second = padd(a.second,b.second); +// return res; +// } + +template +class gebp_traits, std::complex, _ConjLhs, _ConjRhs > +{ +public: + typedef std::complex Scalar; + typedef std::complex LhsScalar; + typedef std::complex RhsScalar; + typedef std::complex ResScalar; + + enum { + ConjLhs = _ConjLhs, + ConjRhs = _ConjRhs, + Vectorizable = packet_traits::Vectorizable + && packet_traits::Vectorizable, + RealPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1, + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + + // FIXME: should depend on NumberOfRegisters + nr = 4, + mr = ResPacketSize, + + LhsProgress = ResPacketSize, + RhsProgress = 1 + }; + + typedef typename packet_traits::type RealPacket; + typedef typename packet_traits::type ScalarPacket; + typedef DoublePacket DoublePacketType; + + typedef typename conditional::type LhsPacket; + typedef typename conditional::type RhsPacket; + typedef typename conditional::type ResPacket; + typedef typename conditional::type AccPacket; + + EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); } + + EIGEN_STRONG_INLINE void initAcc(DoublePacketType& p) + { + p.first = pset1(RealScalar(0)); + p.second = pset1(RealScalar(0)); + } + + // Scalar path + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const + { + dest = pset1(*b); + } + + // Vectorized path + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacketType& dest) const + { + dest.first = pset1(real(*b)); + dest.second = pset1(imag(*b)); + } + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const + { + loadRhs(b,dest); + } + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const + { + eigen_internal_assert(unpacket_traits::size<=4); + loadRhs(b,dest); + } + + EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) + { + // FIXME not sure that's the best way to implement it! + loadRhs(b+0, b0); + loadRhs(b+1, b1); + loadRhs(b+2, b2); + loadRhs(b+3, b3); + } + + // Vectorized path + EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1) + { + // FIXME not sure that's the best way to implement it! + loadRhs(b+0, b0); + loadRhs(b+1, b1); + } + + // Scalar path + EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsScalar& b0, RhsScalar& b1) + { + // FIXME not sure that's the best way to implement it! + loadRhs(b+0, b0); + loadRhs(b+1, b1); + } + + // nothing special here + EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const + { + dest = pload((const typename unpacket_traits::type*)(a)); + } + + EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const + { + dest = ploadu((const typename unpacket_traits::type*)(a)); + } + + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacketType& c, RhsPacket& /*tmp*/) const + { + c.first = padd(pmul(a,b.first), c.first); + c.second = padd(pmul(a,b.second),c.second); + } + + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const + { + c = cj.pmadd(a,b,c); + } + + EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; } + + EIGEN_STRONG_INLINE void acc(const DoublePacketType& c, const ResPacket& alpha, ResPacket& r) const + { + // assemble c + ResPacket tmp; + if((!ConjLhs)&&(!ConjRhs)) + { + tmp = pcplxflip(pconj(ResPacket(c.second))); + tmp = padd(ResPacket(c.first),tmp); + } + else if((!ConjLhs)&&(ConjRhs)) + { + tmp = pconj(pcplxflip(ResPacket(c.second))); + tmp = padd(ResPacket(c.first),tmp); + } + else if((ConjLhs)&&(!ConjRhs)) + { + tmp = pcplxflip(ResPacket(c.second)); + tmp = padd(pconj(ResPacket(c.first)),tmp); + } + else if((ConjLhs)&&(ConjRhs)) + { + tmp = pcplxflip(ResPacket(c.second)); + tmp = psub(pconj(ResPacket(c.first)),tmp); + } + + r = pmadd(tmp,alpha,r); + } + +protected: + conj_helper cj; +}; + +template +class gebp_traits, false, _ConjRhs > +{ +public: + typedef std::complex Scalar; + typedef RealScalar LhsScalar; + typedef Scalar RhsScalar; + typedef Scalar ResScalar; + + enum { + ConjLhs = false, + ConjRhs = _ConjRhs, + Vectorizable = packet_traits::Vectorizable + && packet_traits::Vectorizable, + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1, + + NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, + // FIXME: should depend on NumberOfRegisters + nr = 4, + mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize, + + LhsProgress = ResPacketSize, + RhsProgress = 1 + }; + + typedef typename packet_traits::type _LhsPacket; + typedef typename packet_traits::type _RhsPacket; + typedef typename packet_traits::type _ResPacket; + + typedef typename conditional::type LhsPacket; + typedef typename conditional::type RhsPacket; + typedef typename conditional::type ResPacket; + + typedef ResPacket AccPacket; + + EIGEN_STRONG_INLINE void initAcc(AccPacket& p) + { + p = pset1(ResScalar(0)); + } + + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const + { + dest = pset1(*b); + } + + void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) + { + pbroadcast4(b, b0, b1, b2, b3); + } + +// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1) +// { +// // FIXME not sure that's the best way to implement it! +// b0 = pload1(b+0); +// b1 = pload1(b+1); +// } + + EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const + { + dest = ploaddup(a); + } + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const + { + eigen_internal_assert(unpacket_traits::size<=4); + loadRhs(b,dest); + } + + EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const + { + dest = ploaddup(a); + } + + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const + { + madd_impl(a, b, c, tmp, typename conditional::type()); + } + + EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const + { +#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD + EIGEN_UNUSED_VARIABLE(tmp); + c.v = pmadd(a,b.v,c.v); +#else + tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp); +#endif + + } + + EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const + { + c += a * b; + } + + EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const + { + r = cj.pmadd(alpha,c,r); + } + +protected: + conj_helper cj; +}; + +/* optimized GEneral packed Block * packed Panel product kernel + * + * Mixing type logic: C += A * B + * | A | B | comments + * |real |cplx | no vectorization yet, would require to pack A with duplication + * |cplx |real | easy vectorization + */ +template +struct gebp_kernel +{ + typedef gebp_traits Traits; + typedef typename Traits::ResScalar ResScalar; + typedef typename Traits::LhsPacket LhsPacket; + typedef typename Traits::RhsPacket RhsPacket; + typedef typename Traits::ResPacket ResPacket; + typedef typename Traits::AccPacket AccPacket; + + typedef gebp_traits SwappedTraits; + typedef typename SwappedTraits::ResScalar SResScalar; + typedef typename SwappedTraits::LhsPacket SLhsPacket; + typedef typename SwappedTraits::RhsPacket SRhsPacket; + typedef typename SwappedTraits::ResPacket SResPacket; + typedef typename SwappedTraits::AccPacket SAccPacket; + + typedef typename DataMapper::LinearMapper LinearMapper; + + enum { + Vectorizable = Traits::Vectorizable, + LhsProgress = Traits::LhsProgress, + RhsProgress = Traits::RhsProgress, + ResPacketSize = Traits::ResPacketSize + }; + + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB, + Index rows, Index depth, Index cols, ResScalar alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); +}; + +template +EIGEN_DONT_INLINE +void gebp_kernel + ::operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB, + Index rows, Index depth, Index cols, ResScalar alpha, + Index strideA, Index strideB, Index offsetA, Index offsetB) + { + Traits traits; + SwappedTraits straits; + + if(strideA==-1) strideA = depth; + if(strideB==-1) strideB = depth; + conj_helper cj; + Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; + const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0; + const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0; + const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0; + enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell) + const Index peeled_kc = depth & ~(pk-1); + const Index prefetch_res_offset = 32/sizeof(ResScalar); +// const Index depth2 = depth & ~1; + + //---------- Process 3 * LhsProgress rows at once ---------- + // This corresponds to 3*LhsProgress x nr register blocks. + // Usually, make sense only with FMA + if(mr>=3*Traits::LhsProgress) + { + // Here, the general idea is to loop on each largest micro horizontal panel of the lhs (3*Traits::LhsProgress x depth) + // and on each largest micro vertical panel of the rhs (depth * nr). + // Blocking sizes, i.e., 'depth' has been computed so that the micro horizontal panel of the lhs fit in L1. + // However, if depth is too small, we can extend the number of rows of these horizontal panels. + // This actual number of rows is computed as follow: + const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function. + // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size + // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess), + // or because we are testing specific blocking sizes. + const Index actual_panel_rows = (3*LhsProgress) * std::max(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 3*LhsProgress) )); + for(Index i1=0; i1(alpha); + + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + R1 = r0.loadPacket(1 * Traits::ResPacketSize); + R2 = r0.loadPacket(2 * Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C4, alphav, R1); + traits.acc(C8, alphav, R2); + r0.storePacket(0 * Traits::ResPacketSize, R0); + r0.storePacket(1 * Traits::ResPacketSize, R1); + r0.storePacket(2 * Traits::ResPacketSize, R2); + + R0 = r1.loadPacket(0 * Traits::ResPacketSize); + R1 = r1.loadPacket(1 * Traits::ResPacketSize); + R2 = r1.loadPacket(2 * Traits::ResPacketSize); + traits.acc(C1, alphav, R0); + traits.acc(C5, alphav, R1); + traits.acc(C9, alphav, R2); + r1.storePacket(0 * Traits::ResPacketSize, R0); + r1.storePacket(1 * Traits::ResPacketSize, R1); + r1.storePacket(2 * Traits::ResPacketSize, R2); + + R0 = r2.loadPacket(0 * Traits::ResPacketSize); + R1 = r2.loadPacket(1 * Traits::ResPacketSize); + R2 = r2.loadPacket(2 * Traits::ResPacketSize); + traits.acc(C2, alphav, R0); + traits.acc(C6, alphav, R1); + traits.acc(C10, alphav, R2); + r2.storePacket(0 * Traits::ResPacketSize, R0); + r2.storePacket(1 * Traits::ResPacketSize, R1); + r2.storePacket(2 * Traits::ResPacketSize, R2); + + R0 = r3.loadPacket(0 * Traits::ResPacketSize); + R1 = r3.loadPacket(1 * Traits::ResPacketSize); + R2 = r3.loadPacket(2 * Traits::ResPacketSize); + traits.acc(C3, alphav, R0); + traits.acc(C7, alphav, R1); + traits.acc(C11, alphav, R2); + r3.storePacket(0 * Traits::ResPacketSize, R0); + r3.storePacket(1 * Traits::ResPacketSize, R1); + r3.storePacket(2 * Traits::ResPacketSize, R2); + } + } + + // Deal with remaining columns of the rhs + for(Index j2=packet_cols4; j2(alpha); + + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + R1 = r0.loadPacket(1 * Traits::ResPacketSize); + R2 = r0.loadPacket(2 * Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C4, alphav, R1); + traits.acc(C8, alphav, R2); + r0.storePacket(0 * Traits::ResPacketSize, R0); + r0.storePacket(1 * Traits::ResPacketSize, R1); + r0.storePacket(2 * Traits::ResPacketSize, R2); + } + } + } + } + + //---------- Process 2 * LhsProgress rows at once ---------- + if(mr>=2*Traits::LhsProgress) + { + const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function. + // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size + // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess), + // or because we are testing specific blocking sizes. + Index actual_panel_rows = (2*LhsProgress) * std::max(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 2*LhsProgress) )); + + for(Index i1=peeled_mc3; i1=6 without FMA (bug 1637) + #if EIGEN_GNUC_AT_LEAST(6,0) && defined(EIGEN_VECTORIZE_SSE) + #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND __asm__ ("" : [a0] "+x,m" (A0),[a1] "+x,m" (A1)); + #else + #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND + #endif + #define EIGEN_GEBGP_ONESTEP(K) \ + do { \ + EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4"); \ + traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \ + traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \ + traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \ + traits.madd(A0, B_0, C0, T0); \ + traits.madd(A1, B_0, C4, B_0); \ + traits.madd(A0, B1, C1, T0); \ + traits.madd(A1, B1, C5, B1); \ + traits.madd(A0, B2, C2, T0); \ + traits.madd(A1, B2, C6, B2); \ + traits.madd(A0, B3, C3, T0); \ + traits.madd(A1, B3, C7, B3); \ + EIGEN_GEBP_2PX4_SPILLING_WORKAROUND \ + EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4"); \ + } while(false) + + internal::prefetch(blB+(48+0)); + EIGEN_GEBGP_ONESTEP(0); + EIGEN_GEBGP_ONESTEP(1); + EIGEN_GEBGP_ONESTEP(2); + EIGEN_GEBGP_ONESTEP(3); + internal::prefetch(blB+(48+16)); + EIGEN_GEBGP_ONESTEP(4); + EIGEN_GEBGP_ONESTEP(5); + EIGEN_GEBGP_ONESTEP(6); + EIGEN_GEBGP_ONESTEP(7); + + blB += pk*4*RhsProgress; + blA += pk*(2*Traits::LhsProgress); + + EIGEN_ASM_COMMENT("end gebp micro kernel 2pX4"); + } + // process remaining peeled loop + for(Index k=peeled_kc; k(alpha); + + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + R1 = r0.loadPacket(1 * Traits::ResPacketSize); + R2 = r1.loadPacket(0 * Traits::ResPacketSize); + R3 = r1.loadPacket(1 * Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C4, alphav, R1); + traits.acc(C1, alphav, R2); + traits.acc(C5, alphav, R3); + r0.storePacket(0 * Traits::ResPacketSize, R0); + r0.storePacket(1 * Traits::ResPacketSize, R1); + r1.storePacket(0 * Traits::ResPacketSize, R2); + r1.storePacket(1 * Traits::ResPacketSize, R3); + + R0 = r2.loadPacket(0 * Traits::ResPacketSize); + R1 = r2.loadPacket(1 * Traits::ResPacketSize); + R2 = r3.loadPacket(0 * Traits::ResPacketSize); + R3 = r3.loadPacket(1 * Traits::ResPacketSize); + traits.acc(C2, alphav, R0); + traits.acc(C6, alphav, R1); + traits.acc(C3, alphav, R2); + traits.acc(C7, alphav, R3); + r2.storePacket(0 * Traits::ResPacketSize, R0); + r2.storePacket(1 * Traits::ResPacketSize, R1); + r3.storePacket(0 * Traits::ResPacketSize, R2); + r3.storePacket(1 * Traits::ResPacketSize, R3); + } + } + + // Deal with remaining columns of the rhs + for(Index j2=packet_cols4; j2(alpha); + + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + R1 = r0.loadPacket(1 * Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C4, alphav, R1); + r0.storePacket(0 * Traits::ResPacketSize, R0); + r0.storePacket(1 * Traits::ResPacketSize, R1); + } + } + } + } + //---------- Process 1 * LhsProgress rows at once ---------- + if(mr>=1*Traits::LhsProgress) + { + // loops on each largest micro horizontal panel of lhs (1*LhsProgress x depth) + for(Index i=peeled_mc2; i(alpha); + + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + R1 = r1.loadPacket(0 * Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C1, alphav, R1); + r0.storePacket(0 * Traits::ResPacketSize, R0); + r1.storePacket(0 * Traits::ResPacketSize, R1); + + R0 = r2.loadPacket(0 * Traits::ResPacketSize); + R1 = r3.loadPacket(0 * Traits::ResPacketSize); + traits.acc(C2, alphav, R0); + traits.acc(C3, alphav, R1); + r2.storePacket(0 * Traits::ResPacketSize, R0); + r3.storePacket(0 * Traits::ResPacketSize, R1); + } + + // Deal with remaining columns of the rhs + for(Index j2=packet_cols4; j2(alpha); + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + r0.storePacket(0 * Traits::ResPacketSize, R0); + } + } + } + //---------- Process remaining rows, 1 at once ---------- + if(peeled_mc1::half>::size; + if ((SwappedTraits::LhsProgress % 4) == 0 && + (SwappedTraits::LhsProgress <= 8) && + (SwappedTraits::LhsProgress!=8 || SResPacketHalfSize==nr)) + { + SAccPacket C0, C1, C2, C3; + straits.initAcc(C0); + straits.initAcc(C1); + straits.initAcc(C2); + straits.initAcc(C3); + + const Index spk = (std::max)(1,SwappedTraits::LhsProgress/4); + const Index endk = (depth/spk)*spk; + const Index endk4 = (depth/(spk*4))*(spk*4); + + Index k=0; + for(; k=8,typename unpacket_traits::half,SResPacket>::type SResPacketHalf; + typedef typename conditional=8,typename unpacket_traits::half,SLhsPacket>::type SLhsPacketHalf; + typedef typename conditional=8,typename unpacket_traits::half,SRhsPacket>::type SRhsPacketHalf; + typedef typename conditional=8,typename unpacket_traits::half,SAccPacket>::type SAccPacketHalf; + + SResPacketHalf R = res.template gatherPacket(i, j2); + SResPacketHalf alphav = pset1(alpha); + + if(depth-endk>0) + { + // We have to handle the last row of the rhs which corresponds to a half-packet + SLhsPacketHalf a0; + SRhsPacketHalf b0; + straits.loadLhsUnaligned(blB, a0); + straits.loadRhs(blA, b0); + SAccPacketHalf c0 = predux_downto4(C0); + straits.madd(a0,b0,c0,b0); + straits.acc(c0, alphav, R); + } + else + { + straits.acc(predux_downto4(C0), alphav, R); + } + res.scatterPacket(i, j2, R); + } + else + { + SResPacket R = res.template gatherPacket(i, j2); + SResPacket alphav = pset1(alpha); + straits.acc(C0, alphav, R); + res.scatterPacket(i, j2, R); + } + } + else // scalar path + { + // get a 1 x 4 res block as registers + ResScalar C0(0), C1(0), C2(0), C3(0); + + for(Index k=0; k +struct gemm_pack_lhs +{ + typedef typename DataMapper::LinearMapper LinearMapper; + EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +EIGEN_DONT_INLINE void gemm_pack_lhs + ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + typedef typename packet_traits::type Packet; + enum { PacketSize = packet_traits::size }; + + EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS"); + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(offset); + eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); + eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) ); + conj_if::IsComplex && Conjugate> cj; + Index count = 0; + + const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; + const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0; + const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; + const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1 + : Pack2>1 ? (rows/Pack2)*Pack2 : 0; + + Index i=0; + + // Pack 3 packets + if(Pack1>=3*PacketSize) + { + for(; i=2*PacketSize) + { + for(; i=1*PacketSize) + { + for(; i1) + { + for(; i +struct gemm_pack_lhs +{ + typedef typename DataMapper::LinearMapper LinearMapper; + EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +EIGEN_DONT_INLINE void gemm_pack_lhs + ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + typedef typename packet_traits::type Packet; + enum { PacketSize = packet_traits::size }; + + EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS"); + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(offset); + eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); + conj_if::IsComplex && Conjugate> cj; + Index count = 0; + +// const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; +// const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0; +// const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; + + int pack = Pack1; + Index i = 0; + while(pack>0) + { + Index remaining_rows = rows-i; + Index peeled_mc = i+(remaining_rows/pack)*pack; + for(; i=PacketSize) + { + for(; k kernel; + for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k); + ptranspose(kernel); + for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p])); + } + count += PacketSize*pack; + } + } + for(; k +struct gemm_pack_rhs +{ + typedef typename packet_traits::type Packet; + typedef typename DataMapper::LinearMapper LinearMapper; + enum { PacketSize = packet_traits::size }; + EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); +}; + +template +EIGEN_DONT_INLINE void gemm_pack_rhs + ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) +{ + EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR"); + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(offset); + eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); + conj_if::IsComplex && Conjugate> cj; + Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0; + Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; + Index count = 0; + const Index peeled_k = (depth/PacketSize)*PacketSize; +// if(nr>=8) +// { +// for(Index j2=0; j2 kernel; +// for (int p = 0; p < PacketSize; ++p) { +// kernel.packet[p] = ploadu(&rhs[(j2+p)*rhsStride+k]); +// } +// ptranspose(kernel); +// for (int p = 0; p < PacketSize; ++p) { +// pstoreu(blockB+count, cj.pconj(kernel.packet[p])); +// count+=PacketSize; +// } +// } +// } +// for(; k=4) + { + for(Index j2=packet_cols8; j2 kernel; + kernel.packet[0] = dm0.loadPacket(k); + kernel.packet[1%PacketSize] = dm1.loadPacket(k); + kernel.packet[2%PacketSize] = dm2.loadPacket(k); + kernel.packet[3%PacketSize] = dm3.loadPacket(k); + ptranspose(kernel); + pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0])); + pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize])); + pstoreu(blockB+count+2*PacketSize, cj.pconj(kernel.packet[2%PacketSize])); + pstoreu(blockB+count+3*PacketSize, cj.pconj(kernel.packet[3%PacketSize])); + count+=4*PacketSize; + } + } + for(; k +struct gemm_pack_rhs +{ + typedef typename packet_traits::type Packet; + typedef typename DataMapper::LinearMapper LinearMapper; + enum { PacketSize = packet_traits::size }; + EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); +}; + +template +EIGEN_DONT_INLINE void gemm_pack_rhs + ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) +{ + EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR"); + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(offset); + eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); + conj_if::IsComplex && Conjugate> cj; + Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0; + Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; + Index count = 0; + +// if(nr>=8) +// { +// for(Index j2=0; j2(&rhs[k*rhsStride + j2]); +// pstoreu(blockB+count, cj.pconj(A)); +// } else if (PacketSize==4) { +// Packet A = ploadu(&rhs[k*rhsStride + j2]); +// Packet B = ploadu(&rhs[k*rhsStride + j2 + PacketSize]); +// pstoreu(blockB+count, cj.pconj(A)); +// pstoreu(blockB+count+PacketSize, cj.pconj(B)); +// } else { +// const Scalar* b0 = &rhs[k*rhsStride + j2]; +// blockB[count+0] = cj(b0[0]); +// blockB[count+1] = cj(b0[1]); +// blockB[count+2] = cj(b0[2]); +// blockB[count+3] = cj(b0[3]); +// blockB[count+4] = cj(b0[4]); +// blockB[count+5] = cj(b0[5]); +// blockB[count+6] = cj(b0[6]); +// blockB[count+7] = cj(b0[7]); +// } +// count += 8; +// } +// // skip what we have after +// if(PanelMode) count += 8 * (stride-offset-depth); +// } +// } + if(nr>=4) + { + for(Index j2=packet_cols8; j2 +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H +#define EIGEN_GENERAL_MATRIX_MATRIX_H + +namespace Eigen { + +namespace internal { + +template class level3_blocking; + +/* Specialization for a row-major destination matrix => simple transposition of the product */ +template< + typename Index, + typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, + typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> +struct general_matrix_matrix_product +{ + typedef gebp_traits Traits; + + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; + static EIGEN_STRONG_INLINE void run( + Index rows, Index cols, Index depth, + const LhsScalar* lhs, Index lhsStride, + const RhsScalar* rhs, Index rhsStride, + ResScalar* res, Index resStride, + ResScalar alpha, + level3_blocking& blocking, + GemmParallelInfo* info = 0) + { + // transpose the product such that the result is column major + general_matrix_matrix_product + ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); + } +}; + +/* Specialization for a col-major destination matrix + * => Blocking algorithm following Goto's paper */ +template< + typename Index, + typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, + typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> +struct general_matrix_matrix_product +{ + +typedef gebp_traits Traits; + +typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; +static void run(Index rows, Index cols, Index depth, + const LhsScalar* _lhs, Index lhsStride, + const RhsScalar* _rhs, Index rhsStride, + ResScalar* _res, Index resStride, + ResScalar alpha, + level3_blocking& blocking, + GemmParallelInfo* info = 0) +{ + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + typedef blas_data_mapper ResMapper; + LhsMapper lhs(_lhs,lhsStride); + RhsMapper rhs(_rhs,rhsStride); + ResMapper res(_res, resStride); + + Index kc = blocking.kc(); // cache block size along the K direction + Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction + Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction + + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + gebp_kernel gebp; + +#ifdef EIGEN_HAS_OPENMP + if(info) + { + // this is the parallel version! + int tid = omp_get_thread_num(); + int threads = omp_get_num_threads(); + + LhsScalar* blockA = blocking.blockA(); + eigen_internal_assert(blockA!=0); + + std::size_t sizeB = kc*nc; + ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); + + // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... + for(Index k=0; k rows of B', and cols of the A' + + // In order to reduce the chance that a thread has to wait for the other, + // let's start by packing B'. + pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); + + // Pack A_k to A' in a parallel fashion: + // each thread packs the sub block A_k,i to A'_i where i is the thread id. + + // However, before copying to A'_i, we have to make sure that no other thread is still using it, + // i.e., we test that info[tid].users equals 0. + // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. + while(info[tid].users!=0) {} + info[tid].users += threads; + + pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); + + // Notify the other threads that the part A'_i is ready to go. + info[tid].sync = k; + + // Computes C_i += A' * B' per A'_i + for(int shift=0; shift0) { + while(info[i].sync!=k) { + } + } + + gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); + } + + // Then keep going as usual with the remaining B' + for(Index j=nc; j Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) + // Note that this panel will be read as many times as the number of blocks in the rhs's + // horizontal panel which is, in practice, a very low number. + pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); + + // For each kc x nc block of the rhs's horizontal panel... + for(Index j2=0; j2 +struct gemm_functor +{ + gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) + : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) + {} + + void initParallelSession(Index num_threads) const + { + m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); + m_blocking.allocateA(); + } + + void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo* info=0) const + { + if(cols==-1) + cols = m_rhs.cols(); + + Gemm::run(rows, cols, m_lhs.cols(), + &m_lhs.coeffRef(row,0), m_lhs.outerStride(), + &m_rhs.coeffRef(0,col), m_rhs.outerStride(), + (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), + m_actualAlpha, m_blocking, info); + } + + typedef typename Gemm::Traits Traits; + + protected: + const Lhs& m_lhs; + const Rhs& m_rhs; + Dest& m_dest; + Scalar m_actualAlpha; + BlockingType& m_blocking; +}; + +template class gemm_blocking_space; + +template +class level3_blocking +{ + typedef _LhsScalar LhsScalar; + typedef _RhsScalar RhsScalar; + + protected: + LhsScalar* m_blockA; + RhsScalar* m_blockB; + + Index m_mc; + Index m_nc; + Index m_kc; + + public: + + level3_blocking() + : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) + {} + + inline Index mc() const { return m_mc; } + inline Index nc() const { return m_nc; } + inline Index kc() const { return m_kc; } + + inline LhsScalar* blockA() { return m_blockA; } + inline RhsScalar* blockB() { return m_blockB; } +}; + +template +class gemm_blocking_space + : public level3_blocking< + typename conditional::type, + typename conditional::type> +{ + enum { + Transpose = StorageOrder==RowMajor, + ActualRows = Transpose ? MaxCols : MaxRows, + ActualCols = Transpose ? MaxRows : MaxCols + }; + typedef typename conditional::type LhsScalar; + typedef typename conditional::type RhsScalar; + typedef gebp_traits Traits; + enum { + SizeA = ActualRows * MaxDepth, + SizeB = ActualCols * MaxDepth + }; + +#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES + EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; + EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; +#else + EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; + EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; +#endif + + public: + + gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) + { + this->m_mc = ActualRows; + this->m_nc = ActualCols; + this->m_kc = MaxDepth; +#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES + this->m_blockA = m_staticA; + this->m_blockB = m_staticB; +#else + this->m_blockA = reinterpret_cast((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); + this->m_blockB = reinterpret_cast((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); +#endif + } + + void initParallel(Index, Index, Index, Index) + {} + + inline void allocateA() {} + inline void allocateB() {} + inline void allocateAll() {} +}; + +template +class gemm_blocking_space + : public level3_blocking< + typename conditional::type, + typename conditional::type> +{ + enum { + Transpose = StorageOrder==RowMajor + }; + typedef typename conditional::type LhsScalar; + typedef typename conditional::type RhsScalar; + typedef gebp_traits Traits; + + Index m_sizeA; + Index m_sizeB; + + public: + + gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) + { + this->m_mc = Transpose ? cols : rows; + this->m_nc = Transpose ? rows : cols; + this->m_kc = depth; + + if(l3_blocking) + { + computeProductBlockingSizes(this->m_kc, this->m_mc, this->m_nc, num_threads); + } + else // no l3 blocking + { + Index n = this->m_nc; + computeProductBlockingSizes(this->m_kc, this->m_mc, n, num_threads); + } + + m_sizeA = this->m_mc * this->m_kc; + m_sizeB = this->m_kc * this->m_nc; + } + + void initParallel(Index rows, Index cols, Index depth, Index num_threads) + { + this->m_mc = Transpose ? cols : rows; + this->m_nc = Transpose ? rows : cols; + this->m_kc = depth; + + eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); + Index m = this->m_mc; + computeProductBlockingSizes(this->m_kc, m, this->m_nc, num_threads); + m_sizeA = this->m_mc * this->m_kc; + m_sizeB = this->m_kc * this->m_nc; + } + + void allocateA() + { + if(this->m_blockA==0) + this->m_blockA = aligned_new(m_sizeA); + } + + void allocateB() + { + if(this->m_blockB==0) + this->m_blockB = aligned_new(m_sizeB); + } + + void allocateAll() + { + allocateA(); + allocateB(); + } + + ~gemm_blocking_space() + { + aligned_delete(this->m_blockA, m_sizeA); + aligned_delete(this->m_blockB, m_sizeB); + } +}; + +} // end namespace internal + +namespace internal { + +template +struct generic_product_impl + : generic_product_impl_base > +{ + typedef typename Product::Scalar Scalar; + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef typename internal::remove_all::type ActualLhsTypeCleaned; + + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + typedef typename internal::remove_all::type ActualRhsTypeCleaned; + + enum { + MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) + }; + + typedef generic_product_impl lazyproduct; + + template + static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) + lazyproduct::evalTo(dst, lhs, rhs); + else + { + dst.setZero(); + scaleAndAddTo(dst, lhs, rhs, Scalar(1)); + } + } + + template + static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) + lazyproduct::addTo(dst, lhs, rhs); + else + scaleAndAddTo(dst,lhs, rhs, Scalar(1)); + } + + template + static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) + lazyproduct::subTo(dst, lhs, rhs); + else + scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); + } + + template + static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) + { + eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); + if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) + return; + + typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(a_lhs); + typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(a_rhs); + + Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) + * RhsBlasTraits::extractScalarFactor(a_rhs); + + typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, + Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; + + typedef internal::gemm_functor< + Scalar, Index, + internal::general_matrix_matrix_product< + Index, + LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), + RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), + (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, + ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; + + BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); + internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> + (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GENERAL_MATRIX_MATRIX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h new file mode 100644 index 000000000..e844e37d1 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h @@ -0,0 +1,311 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H +#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H + +namespace Eigen { + +template +struct selfadjoint_rank1_update; + +namespace internal { + +/********************************************************************** +* This file implements a general A * B product while +* evaluating only one triangular part of the product. +* This is a more general version of self adjoint product (C += A A^T) +* as the level 3 SYRK Blas routine. +**********************************************************************/ + +// forward declarations (defined at the end of this file) +template +struct tribb_kernel; + +/* Optimized matrix-matrix product evaluating only one triangular half */ +template +struct general_matrix_matrix_triangular_product; + +// as usual if the result is row major => we transpose the product +template +struct general_matrix_matrix_triangular_product +{ + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; + static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride, + const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, + const ResScalar& alpha, level3_blocking& blocking) + { + general_matrix_matrix_triangular_product + ::run(size,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking); + } +}; + +template +struct general_matrix_matrix_triangular_product +{ + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; + static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride, + const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, + const ResScalar& alpha, level3_blocking& blocking) + { + typedef gebp_traits Traits; + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + typedef blas_data_mapper ResMapper; + LhsMapper lhs(_lhs,lhsStride); + RhsMapper rhs(_rhs,rhsStride); + ResMapper res(_res, resStride); + + Index kc = blocking.kc(); + Index mc = (std::min)(size,blocking.mc()); + + // !!! mc must be a multiple of nr: + if(mc > Traits::nr) + mc = (mc/Traits::nr)*Traits::nr; + + std::size_t sizeA = kc*mc; + std::size_t sizeB = kc*size; + + ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); + ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); + + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + gebp_kernel gebp; + tribb_kernel sybb; + + for(Index k2=0; k2 processed with gebp or skipped + // 2 - the actual_mc x actual_mc symmetric block => processed with a special kernel + // 3 - after the diagonal => processed with gebp or skipped + if (UpLo==Lower) + gebp(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, + (std::min)(size,i2), alpha, -1, -1, 0, 0); + + + sybb(_res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha); + + if (UpLo==Upper) + { + Index j2 = i2+actual_mc; + gebp(res.getSubMapper(i2, j2), blockA, blockB+actual_kc*j2, actual_mc, + actual_kc, (std::max)(Index(0), size-j2), alpha, -1, -1, 0, 0); + } + } + } + } +}; + +// Optimized packed Block * packed Block product kernel evaluating only one given triangular part +// This kernel is built on top of the gebp kernel: +// - the current destination block is processed per panel of actual_mc x BlockSize +// where BlockSize is set to the minimal value allowing gebp to be as fast as possible +// - then, as usual, each panel is split into three parts along the diagonal, +// the sub blocks above and below the diagonal are processed as usual, +// while the triangular block overlapping the diagonal is evaluated into a +// small temporary buffer which is then accumulated into the result using a +// triangular traversal. +template +struct tribb_kernel +{ + typedef gebp_traits Traits; + typedef typename Traits::ResScalar ResScalar; + + enum { + BlockSize = meta_least_common_multiple::ret + }; + void operator()(ResScalar* _res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha) + { + typedef blas_data_mapper ResMapper; + ResMapper res(_res, resStride); + gebp_kernel gebp_kernel; + + Matrix buffer((internal::constructor_without_unaligned_array_assert())); + + // let's process the block per panel of actual_mc x BlockSize, + // again, each is split into three parts, etc. + for (Index j=0; j(BlockSize,size - j); + const RhsScalar* actual_b = blockB+j*depth; + + if(UpLo==Upper) + gebp_kernel(res.getSubMapper(0, j), blockA, actual_b, j, depth, actualBlockSize, alpha, + -1, -1, 0, 0); + + // selfadjoint micro block + { + Index i = j; + buffer.setZero(); + // 1 - apply the kernel on the temporary buffer + gebp_kernel(ResMapper(buffer.data(), BlockSize), blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha, + -1, -1, 0, 0); + // 2 - triangular accumulation + for(Index j1=0; j1 +struct general_product_to_triangular_selector; + + +template +struct general_product_to_triangular_selector +{ + static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha, bool beta) + { + typedef typename MatrixType::Scalar Scalar; + + typedef typename internal::remove_all::type Lhs; + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs; + typedef typename internal::remove_all::type _ActualLhs; + typename internal::add_const_on_value_type::type actualLhs = LhsBlasTraits::extract(prod.lhs()); + + typedef typename internal::remove_all::type Rhs; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs; + typedef typename internal::remove_all::type _ActualRhs; + typename internal::add_const_on_value_type::type actualRhs = RhsBlasTraits::extract(prod.rhs()); + + Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived()); + + if(!beta) + mat.template triangularView().setZero(); + + enum { + StorageOrder = (internal::traits::Flags&RowMajorBit) ? RowMajor : ColMajor, + UseLhsDirectly = _ActualLhs::InnerStrideAtCompileTime==1, + UseRhsDirectly = _ActualRhs::InnerStrideAtCompileTime==1 + }; + + internal::gemv_static_vector_if static_lhs; + ei_declare_aligned_stack_constructed_variable(Scalar, actualLhsPtr, actualLhs.size(), + (UseLhsDirectly ? const_cast(actualLhs.data()) : static_lhs.data())); + if(!UseLhsDirectly) Map(actualLhsPtr, actualLhs.size()) = actualLhs; + + internal::gemv_static_vector_if static_rhs; + ei_declare_aligned_stack_constructed_variable(Scalar, actualRhsPtr, actualRhs.size(), + (UseRhsDirectly ? const_cast(actualRhs.data()) : static_rhs.data())); + if(!UseRhsDirectly) Map(actualRhsPtr, actualRhs.size()) = actualRhs; + + + selfadjoint_rank1_update::IsComplex, + RhsBlasTraits::NeedToConjugate && NumTraits::IsComplex> + ::run(actualLhs.size(), mat.data(), mat.outerStride(), actualLhsPtr, actualRhsPtr, actualAlpha); + } +}; + +template +struct general_product_to_triangular_selector +{ + static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha, bool beta) + { + typedef typename internal::remove_all::type Lhs; + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs; + typedef typename internal::remove_all::type _ActualLhs; + typename internal::add_const_on_value_type::type actualLhs = LhsBlasTraits::extract(prod.lhs()); + + typedef typename internal::remove_all::type Rhs; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs; + typedef typename internal::remove_all::type _ActualRhs; + typename internal::add_const_on_value_type::type actualRhs = RhsBlasTraits::extract(prod.rhs()); + + typename ProductType::Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived()); + + if(!beta) + mat.template triangularView().setZero(); + + enum { + IsRowMajor = (internal::traits::Flags&RowMajorBit) ? 1 : 0, + LhsIsRowMajor = _ActualLhs::Flags&RowMajorBit ? 1 : 0, + RhsIsRowMajor = _ActualRhs::Flags&RowMajorBit ? 1 : 0, + SkipDiag = (UpLo&(UnitDiag|ZeroDiag))!=0 + }; + + Index size = mat.cols(); + if(SkipDiag) + size--; + Index depth = actualLhs.cols(); + + typedef internal::gemm_blocking_space BlockingType; + + BlockingType blocking(size, size, depth, 1, false); + + internal::general_matrix_matrix_triangular_product + ::run(size, depth, + &actualLhs.coeffRef(SkipDiag&&(UpLo&Lower)==Lower ? 1 : 0,0), actualLhs.outerStride(), + &actualRhs.coeffRef(0,SkipDiag&&(UpLo&Upper)==Upper ? 1 : 0), actualRhs.outerStride(), + mat.data() + (SkipDiag ? (bool(IsRowMajor) != ((UpLo&Lower)==Lower) ? 1 : mat.outerStride() ) : 0), mat.outerStride(), actualAlpha, blocking); + } +}; + +template +template +TriangularView& TriangularViewImpl::_assignProduct(const ProductType& prod, const Scalar& alpha, bool beta) +{ + EIGEN_STATIC_ASSERT((UpLo&UnitDiag)==0, WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED); + eigen_assert(derived().nestedExpression().rows() == prod.rows() && derived().cols() == prod.cols()); + + general_product_to_triangular_selector::InnerSize==1>::run(derived().nestedExpression().const_cast_derived(), prod, alpha, beta); + + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h new file mode 100644 index 000000000..f6f9ebeca --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h @@ -0,0 +1,145 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * Level 3 BLAS SYRK/HERK implementation. + ******************************************************************************** +*/ + +#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_BLAS_H +#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_BLAS_H + +namespace Eigen { + +namespace internal { + +template +struct general_matrix_matrix_rankupdate : + general_matrix_matrix_triangular_product< + Index,Scalar,AStorageOrder,ConjugateA,Scalar,AStorageOrder,ConjugateA,ResStorageOrder,UpLo,BuiltIn> {}; + + +// try to go to BLAS specialization +#define EIGEN_BLAS_RANKUPDATE_SPECIALIZE(Scalar) \ +template \ +struct general_matrix_matrix_triangular_product { \ + static EIGEN_STRONG_INLINE void run(Index size, Index depth,const Scalar* lhs, Index lhsStride, \ + const Scalar* rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking& blocking) \ + { \ + if ( lhs==rhs && ((UpLo&(Lower|Upper))==UpLo) ) { \ + general_matrix_matrix_rankupdate \ + ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha,blocking); \ + } else { \ + general_matrix_matrix_triangular_product \ + ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha,blocking); \ + } \ + } \ +}; + +EIGEN_BLAS_RANKUPDATE_SPECIALIZE(double) +EIGEN_BLAS_RANKUPDATE_SPECIALIZE(float) +// TODO handle complex cases +// EIGEN_BLAS_RANKUPDATE_SPECIALIZE(dcomplex) +// EIGEN_BLAS_RANKUPDATE_SPECIALIZE(scomplex) + +// SYRK for float/double +#define EIGEN_BLAS_RANKUPDATE_R(EIGTYPE, BLASTYPE, BLASFUNC) \ +template \ +struct general_matrix_matrix_rankupdate { \ + enum { \ + IsLower = (UpLo&Lower) == Lower, \ + LowUp = IsLower ? Lower : Upper, \ + conjA = ((AStorageOrder==ColMajor) && ConjugateA) ? 1 : 0 \ + }; \ + static EIGEN_STRONG_INLINE void run(Index size, Index depth,const EIGTYPE* lhs, Index lhsStride, \ + const EIGTYPE* /*rhs*/, Index /*rhsStride*/, EIGTYPE* res, Index resStride, EIGTYPE alpha, level3_blocking& /*blocking*/) \ + { \ + /* typedef Matrix MatrixRhs;*/ \ +\ + BlasIndex lda=convert_index(lhsStride), ldc=convert_index(resStride), n=convert_index(size), k=convert_index(depth); \ + char uplo=((IsLower) ? 'L' : 'U'), trans=((AStorageOrder==RowMajor) ? 'T':'N'); \ + EIGTYPE beta(1); \ + BLASFUNC(&uplo, &trans, &n, &k, (const BLASTYPE*)&numext::real_ref(alpha), lhs, &lda, (const BLASTYPE*)&numext::real_ref(beta), res, &ldc); \ + } \ +}; + +// HERK for complex data +#define EIGEN_BLAS_RANKUPDATE_C(EIGTYPE, BLASTYPE, RTYPE, BLASFUNC) \ +template \ +struct general_matrix_matrix_rankupdate { \ + enum { \ + IsLower = (UpLo&Lower) == Lower, \ + LowUp = IsLower ? Lower : Upper, \ + conjA = (((AStorageOrder==ColMajor) && ConjugateA) || ((AStorageOrder==RowMajor) && !ConjugateA)) ? 1 : 0 \ + }; \ + static EIGEN_STRONG_INLINE void run(Index size, Index depth,const EIGTYPE* lhs, Index lhsStride, \ + const EIGTYPE* /*rhs*/, Index /*rhsStride*/, EIGTYPE* res, Index resStride, EIGTYPE alpha, level3_blocking& /*blocking*/) \ + { \ + typedef Matrix MatrixType; \ +\ + BlasIndex lda=convert_index(lhsStride), ldc=convert_index(resStride), n=convert_index(size), k=convert_index(depth); \ + char uplo=((IsLower) ? 'L' : 'U'), trans=((AStorageOrder==RowMajor) ? 'C':'N'); \ + RTYPE alpha_, beta_; \ + const EIGTYPE* a_ptr; \ +\ + alpha_ = alpha.real(); \ + beta_ = 1.0; \ +/* Copy with conjugation in some cases*/ \ + MatrixType a; \ + if (conjA) { \ + Map > mapA(lhs,n,k,OuterStride<>(lhsStride)); \ + a = mapA.conjugate(); \ + lda = a.outerStride(); \ + a_ptr = a.data(); \ + } else a_ptr=lhs; \ + BLASFUNC(&uplo, &trans, &n, &k, &alpha_, (BLASTYPE*)a_ptr, &lda, &beta_, (BLASTYPE*)res, &ldc); \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_RANKUPDATE_R(double, double, dsyrk) +EIGEN_BLAS_RANKUPDATE_R(float, float, ssyrk) +#else +EIGEN_BLAS_RANKUPDATE_R(double, double, dsyrk_) +EIGEN_BLAS_RANKUPDATE_R(float, float, ssyrk_) +#endif + +// TODO hanlde complex cases +// EIGEN_BLAS_RANKUPDATE_C(dcomplex, double, double, zherk_) +// EIGEN_BLAS_RANKUPDATE_C(scomplex, float, float, cherk_) + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_BLAS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h new file mode 100644 index 000000000..b0f6b0d5b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h @@ -0,0 +1,122 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * General matrix-matrix product functionality based on ?GEMM. + ******************************************************************************** +*/ + +#ifndef EIGEN_GENERAL_MATRIX_MATRIX_BLAS_H +#define EIGEN_GENERAL_MATRIX_MATRIX_BLAS_H + +namespace Eigen { + +namespace internal { + +/********************************************************************** +* This file implements general matrix-matrix multiplication using BLAS +* gemm function via partial specialization of +* general_matrix_matrix_product::run(..) method for float, double, +* std::complex and std::complex types +**********************************************************************/ + +// gemm specialization + +#define GEMM_SPECIALIZATION(EIGTYPE, EIGPREFIX, BLASTYPE, BLASFUNC) \ +template< \ + typename Index, \ + int LhsStorageOrder, bool ConjugateLhs, \ + int RhsStorageOrder, bool ConjugateRhs> \ +struct general_matrix_matrix_product \ +{ \ +typedef gebp_traits Traits; \ +\ +static void run(Index rows, Index cols, Index depth, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha, \ + level3_blocking& /*blocking*/, \ + GemmParallelInfo* /*info = 0*/) \ +{ \ + using std::conj; \ +\ + char transa, transb; \ + BlasIndex m, n, k, lda, ldb, ldc; \ + const EIGTYPE *a, *b; \ + EIGTYPE beta(1); \ + MatrixX##EIGPREFIX a_tmp, b_tmp; \ +\ +/* Set transpose options */ \ + transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \ + transb = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \ +\ +/* Set m, n, k */ \ + m = convert_index(rows); \ + n = convert_index(cols); \ + k = convert_index(depth); \ +\ +/* Set lda, ldb, ldc */ \ + lda = convert_index(lhsStride); \ + ldb = convert_index(rhsStride); \ + ldc = convert_index(resStride); \ +\ +/* Set a, b, c */ \ + if ((LhsStorageOrder==ColMajor) && (ConjugateLhs)) { \ + Map > lhs(_lhs,m,k,OuterStride<>(lhsStride)); \ + a_tmp = lhs.conjugate(); \ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else a = _lhs; \ +\ + if ((RhsStorageOrder==ColMajor) && (ConjugateRhs)) { \ + Map > rhs(_rhs,k,n,OuterStride<>(rhsStride)); \ + b_tmp = rhs.conjugate(); \ + b = b_tmp.data(); \ + ldb = convert_index(b_tmp.outerStride()); \ + } else b = _rhs; \ +\ + BLASFUNC(&transa, &transb, &m, &n, &k, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &ldc); \ +}}; + +#ifdef EIGEN_USE_MKL +GEMM_SPECIALIZATION(double, d, double, dgemm) +GEMM_SPECIALIZATION(float, f, float, sgemm) +GEMM_SPECIALIZATION(dcomplex, cd, MKL_Complex16, zgemm) +GEMM_SPECIALIZATION(scomplex, cf, MKL_Complex8, cgemm) +#else +GEMM_SPECIALIZATION(double, d, double, dgemm_) +GEMM_SPECIALIZATION(float, f, float, sgemm_) +GEMM_SPECIALIZATION(dcomplex, cd, double, zgemm_) +GEMM_SPECIALIZATION(scomplex, cf, float, cgemm_) +#endif + +} // end namespase internal + +} // end namespace Eigen + +#endif // EIGEN_GENERAL_MATRIX_MATRIX_BLAS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixVector.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixVector.h new file mode 100644 index 000000000..a597c1f4e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/GeneralMatrixVector.h @@ -0,0 +1,619 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERAL_MATRIX_VECTOR_H +#define EIGEN_GENERAL_MATRIX_VECTOR_H + +namespace Eigen { + +namespace internal { + +/* Optimized col-major matrix * vector product: + * This algorithm processes 4 columns at onces that allows to both reduce + * the number of load/stores of the result by a factor 4 and to reduce + * the instruction dependency. Moreover, we know that all bands have the + * same alignment pattern. + * + * Mixing type logic: C += alpha * A * B + * | A | B |alpha| comments + * |real |cplx |cplx | no vectorization + * |real |cplx |real | alpha is converted to a cplx when calling the run function, no vectorization + * |cplx |real |cplx | invalid, the caller has to do tmp: = A * B; C += alpha*tmp + * |cplx |real |real | optimal case, vectorization possible via real-cplx mul + * + * Accesses to the matrix coefficients follow the following logic: + * + * - if all columns have the same alignment then + * - if the columns have the same alignment as the result vector, then easy! (-> AllAligned case) + * - otherwise perform unaligned loads only (-> NoneAligned case) + * - otherwise + * - if even columns have the same alignment then + * // odd columns are guaranteed to have the same alignment too + * - if even or odd columns have the same alignment as the result, then + * // for a register size of 2 scalars, this is guarantee to be the case (e.g., SSE with double) + * - perform half aligned and half unaligned loads (-> EvenAligned case) + * - otherwise perform unaligned loads only (-> NoneAligned case) + * - otherwise, if the register size is 4 scalars (e.g., SSE with float) then + * - one over 4 consecutive columns is guaranteed to be aligned with the result vector, + * perform simple aligned loads for this column and aligned loads plus re-alignment for the other. (-> FirstAligned case) + * // this re-alignment is done by the palign function implemented for SSE in Eigen/src/Core/arch/SSE/PacketMath.h + * - otherwise, + * // if we get here, this means the register size is greater than 4 (e.g., AVX with floats), + * // we currently fall back to the NoneAligned case + * + * The same reasoning apply for the transposed case. + * + * The last case (PacketSize>4) could probably be improved by generalizing the FirstAligned case, but since we do not support AVX yet... + * One might also wonder why in the EvenAligned case we perform unaligned loads instead of using the aligned-loads plus re-alignment + * strategy as in the FirstAligned case. The reason is that we observed that unaligned loads on a 8 byte boundary are not too slow + * compared to unaligned loads on a 4 byte boundary. + * + */ +template +struct general_matrix_vector_product +{ + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; + +enum { + Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable + && int(packet_traits::size)==int(packet_traits::size), + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1 +}; + +typedef typename packet_traits::type _LhsPacket; +typedef typename packet_traits::type _RhsPacket; +typedef typename packet_traits::type _ResPacket; + +typedef typename conditional::type LhsPacket; +typedef typename conditional::type RhsPacket; +typedef typename conditional::type ResPacket; + +EIGEN_DONT_INLINE static void run( + Index rows, Index cols, + const LhsMapper& lhs, + const RhsMapper& rhs, + ResScalar* res, Index resIncr, + RhsScalar alpha); +}; + +template +EIGEN_DONT_INLINE void general_matrix_vector_product::run( + Index rows, Index cols, + const LhsMapper& lhs, + const RhsMapper& rhs, + ResScalar* res, Index resIncr, + RhsScalar alpha) +{ + EIGEN_UNUSED_VARIABLE(resIncr); + eigen_internal_assert(resIncr==1); + #ifdef _EIGEN_ACCUMULATE_PACKETS + #error _EIGEN_ACCUMULATE_PACKETS has already been defined + #endif + #define _EIGEN_ACCUMULATE_PACKETS(Alignment0,Alignment13,Alignment2) \ + pstore(&res[j], \ + padd(pload(&res[j]), \ + padd( \ + padd(pcj.pmul(lhs0.template load(j), ptmp0), \ + pcj.pmul(lhs1.template load(j), ptmp1)), \ + padd(pcj.pmul(lhs2.template load(j), ptmp2), \ + pcj.pmul(lhs3.template load(j), ptmp3)) ))) + + typedef typename LhsMapper::VectorMapper LhsScalars; + + conj_helper cj; + conj_helper pcj; + if(ConjugateRhs) + alpha = numext::conj(alpha); + + enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned }; + const Index columnsAtOnce = 4; + const Index peels = 2; + const Index LhsPacketAlignedMask = LhsPacketSize-1; + const Index ResPacketAlignedMask = ResPacketSize-1; +// const Index PeelAlignedMask = ResPacketSize*peels-1; + const Index size = rows; + + const Index lhsStride = lhs.stride(); + + // How many coeffs of the result do we have to skip to be aligned. + // Here we assume data are at least aligned on the base scalar type. + Index alignedStart = internal::first_default_aligned(res,size); + Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0; + const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1; + + const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0; + Index alignmentPattern = alignmentStep==0 ? AllAligned + : alignmentStep==(LhsPacketSize/2) ? EvenAligned + : FirstAligned; + + // we cannot assume the first element is aligned because of sub-matrices + const Index lhsAlignmentOffset = lhs.firstAligned(size); + + // find how many columns do we have to skip to be aligned with the result (if possible) + Index skipColumns = 0; + // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats) + if( (lhsAlignmentOffset < 0) || (lhsAlignmentOffset == size) || (UIntPtr(res)%sizeof(ResScalar)) ) + { + alignedSize = 0; + alignedStart = 0; + alignmentPattern = NoneAligned; + } + else if(LhsPacketSize > 4) + { + // TODO: extend the code to support aligned loads whenever possible when LhsPacketSize > 4. + // Currently, it seems to be better to perform unaligned loads anyway + alignmentPattern = NoneAligned; + } + else if (LhsPacketSize>1) + { + // eigen_internal_assert(size_t(firstLhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || size= cols) + || LhsPacketSize > size + || (size_t(firstLhs+alignedStart+lhsStride*skipColumns)%sizeof(LhsPacket))==0);*/ + } + else if(Vectorizable) + { + alignedStart = 0; + alignedSize = size; + alignmentPattern = AllAligned; + } + + const Index offset1 = (alignmentPattern==FirstAligned && alignmentStep==1)?3:1; + const Index offset3 = (alignmentPattern==FirstAligned && alignmentStep==1)?1:3; + + Index columnBound = ((cols-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns; + for (Index i=skipColumns; i(alpha*rhs(i, 0)), + ptmp1 = pset1(alpha*rhs(i+offset1, 0)), + ptmp2 = pset1(alpha*rhs(i+2, 0)), + ptmp3 = pset1(alpha*rhs(i+offset3, 0)); + + // this helps a lot generating better binary code + const LhsScalars lhs0 = lhs.getVectorMapper(0, i+0), lhs1 = lhs.getVectorMapper(0, i+offset1), + lhs2 = lhs.getVectorMapper(0, i+2), lhs3 = lhs.getVectorMapper(0, i+offset3); + + if (Vectorizable) + { + /* explicit vectorization */ + // process initial unaligned coeffs + for (Index j=0; jalignedStart) + { + switch(alignmentPattern) + { + case AllAligned: + for (Index j = alignedStart; j1) + { + LhsPacket A00, A01, A02, A03, A10, A11, A12, A13; + ResPacket T0, T1; + + A01 = lhs1.template load(alignedStart-1); + A02 = lhs2.template load(alignedStart-2); + A03 = lhs3.template load(alignedStart-3); + + for (; j(j-1+LhsPacketSize); palign<1>(A01,A11); + A12 = lhs2.template load(j-2+LhsPacketSize); palign<2>(A02,A12); + A13 = lhs3.template load(j-3+LhsPacketSize); palign<3>(A03,A13); + + A00 = lhs0.template load(j); + A10 = lhs0.template load(j+LhsPacketSize); + T0 = pcj.pmadd(A00, ptmp0, pload(&res[j])); + T1 = pcj.pmadd(A10, ptmp0, pload(&res[j+ResPacketSize])); + + T0 = pcj.pmadd(A01, ptmp1, T0); + A01 = lhs1.template load(j-1+2*LhsPacketSize); palign<1>(A11,A01); + T0 = pcj.pmadd(A02, ptmp2, T0); + A02 = lhs2.template load(j-2+2*LhsPacketSize); palign<2>(A12,A02); + T0 = pcj.pmadd(A03, ptmp3, T0); + pstore(&res[j],T0); + A03 = lhs3.template load(j-3+2*LhsPacketSize); palign<3>(A13,A03); + T1 = pcj.pmadd(A11, ptmp1, T1); + T1 = pcj.pmadd(A12, ptmp2, T1); + T1 = pcj.pmadd(A13, ptmp3, T1); + pstore(&res[j+ResPacketSize],T1); + } + } + for (; j(alpha*rhs(k, 0)); + const LhsScalars lhs0 = lhs.getVectorMapper(0, k); + + if (Vectorizable) + { + /* explicit vectorization */ + // process first unaligned result's coeffs + for (Index j=0; j(alignedStart)) + for (Index i = alignedStart;i(i), ptmp0, pload(&res[i]))); + else + for (Index i = alignedStart;i(i), ptmp0, pload(&res[i]))); + } + + // process remaining scalars (or all if no explicit vectorization) + for (Index i=alignedSize; i +struct general_matrix_vector_product +{ +typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; + +enum { + Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable + && int(packet_traits::size)==int(packet_traits::size), + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1 +}; + +typedef typename packet_traits::type _LhsPacket; +typedef typename packet_traits::type _RhsPacket; +typedef typename packet_traits::type _ResPacket; + +typedef typename conditional::type LhsPacket; +typedef typename conditional::type RhsPacket; +typedef typename conditional::type ResPacket; + +EIGEN_DONT_INLINE static void run( + Index rows, Index cols, + const LhsMapper& lhs, + const RhsMapper& rhs, + ResScalar* res, Index resIncr, + ResScalar alpha); +}; + +template +EIGEN_DONT_INLINE void general_matrix_vector_product::run( + Index rows, Index cols, + const LhsMapper& lhs, + const RhsMapper& rhs, + ResScalar* res, Index resIncr, + ResScalar alpha) +{ + eigen_internal_assert(rhs.stride()==1); + + #ifdef _EIGEN_ACCUMULATE_PACKETS + #error _EIGEN_ACCUMULATE_PACKETS has already been defined + #endif + + #define _EIGEN_ACCUMULATE_PACKETS(Alignment0,Alignment13,Alignment2) {\ + RhsPacket b = rhs.getVectorMapper(j, 0).template load(0); \ + ptmp0 = pcj.pmadd(lhs0.template load(j), b, ptmp0); \ + ptmp1 = pcj.pmadd(lhs1.template load(j), b, ptmp1); \ + ptmp2 = pcj.pmadd(lhs2.template load(j), b, ptmp2); \ + ptmp3 = pcj.pmadd(lhs3.template load(j), b, ptmp3); } + + conj_helper cj; + conj_helper pcj; + + typedef typename LhsMapper::VectorMapper LhsScalars; + + enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 }; + const Index rowsAtOnce = 4; + const Index peels = 2; + const Index RhsPacketAlignedMask = RhsPacketSize-1; + const Index LhsPacketAlignedMask = LhsPacketSize-1; + const Index depth = cols; + const Index lhsStride = lhs.stride(); + + // How many coeffs of the result do we have to skip to be aligned. + // Here we assume data are at least aligned on the base scalar type + // if that's not the case then vectorization is discarded, see below. + Index alignedStart = rhs.firstAligned(depth); + Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0; + const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1; + + const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0; + Index alignmentPattern = alignmentStep==0 ? AllAligned + : alignmentStep==(LhsPacketSize/2) ? EvenAligned + : FirstAligned; + + // we cannot assume the first element is aligned because of sub-matrices + const Index lhsAlignmentOffset = lhs.firstAligned(depth); + const Index rhsAlignmentOffset = rhs.firstAligned(rows); + + // find how many rows do we have to skip to be aligned with rhs (if possible) + Index skipRows = 0; + // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats) + if( (sizeof(LhsScalar)!=sizeof(RhsScalar)) || + (lhsAlignmentOffset < 0) || (lhsAlignmentOffset == depth) || + (rhsAlignmentOffset < 0) || (rhsAlignmentOffset == rows) ) + { + alignedSize = 0; + alignedStart = 0; + alignmentPattern = NoneAligned; + } + else if(LhsPacketSize > 4) + { + // TODO: extend the code to support aligned loads whenever possible when LhsPacketSize > 4. + alignmentPattern = NoneAligned; + } + else if (LhsPacketSize>1) + { + // eigen_internal_assert(size_t(firstLhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || depth= rows) + || LhsPacketSize > depth + || (size_t(firstLhs+alignedStart+lhsStride*skipRows)%sizeof(LhsPacket))==0);*/ + } + else if(Vectorizable) + { + alignedStart = 0; + alignedSize = depth; + alignmentPattern = AllAligned; + } + + const Index offset1 = (alignmentPattern==FirstAligned && alignmentStep==1)?3:1; + const Index offset3 = (alignmentPattern==FirstAligned && alignmentStep==1)?1:3; + + Index rowBound = ((rows-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows; + for (Index i=skipRows; i(ResScalar(0)), ptmp1 = pset1(ResScalar(0)), + ptmp2 = pset1(ResScalar(0)), ptmp3 = pset1(ResScalar(0)); + + // process initial unaligned coeffs + // FIXME this loop get vectorized by the compiler ! + for (Index j=0; jalignedStart) + { + switch(alignmentPattern) + { + case AllAligned: + for (Index j = alignedStart; j1) + { + /* Here we proccess 4 rows with with two peeled iterations to hide + * the overhead of unaligned loads. Moreover unaligned loads are handled + * using special shift/move operations between the two aligned packets + * overlaping the desired unaligned packet. This is *much* more efficient + * than basic unaligned loads. + */ + LhsPacket A01, A02, A03, A11, A12, A13; + A01 = lhs1.template load(alignedStart-1); + A02 = lhs2.template load(alignedStart-2); + A03 = lhs3.template load(alignedStart-3); + + for (; j(0); + A11 = lhs1.template load(j-1+LhsPacketSize); palign<1>(A01,A11); + A12 = lhs2.template load(j-2+LhsPacketSize); palign<2>(A02,A12); + A13 = lhs3.template load(j-3+LhsPacketSize); palign<3>(A03,A13); + + ptmp0 = pcj.pmadd(lhs0.template load(j), b, ptmp0); + ptmp1 = pcj.pmadd(A01, b, ptmp1); + A01 = lhs1.template load(j-1+2*LhsPacketSize); palign<1>(A11,A01); + ptmp2 = pcj.pmadd(A02, b, ptmp2); + A02 = lhs2.template load(j-2+2*LhsPacketSize); palign<2>(A12,A02); + ptmp3 = pcj.pmadd(A03, b, ptmp3); + A03 = lhs3.template load(j-3+2*LhsPacketSize); palign<3>(A13,A03); + + b = rhs.getVectorMapper(j+RhsPacketSize, 0).template load(0); + ptmp0 = pcj.pmadd(lhs0.template load(j+LhsPacketSize), b, ptmp0); + ptmp1 = pcj.pmadd(A11, b, ptmp1); + ptmp2 = pcj.pmadd(A12, b, ptmp2); + ptmp3 = pcj.pmadd(A13, b, ptmp3); + } + } + for (; j(tmp0); + const LhsScalars lhs0 = lhs.getVectorMapper(i, 0); + // process first unaligned result's coeffs + // FIXME this loop get vectorized by the compiler ! + for (Index j=0; jalignedStart) + { + // process aligned rhs coeffs + if (lhs0.template aligned(alignedStart)) + for (Index j = alignedStart;j(j), rhs.getVectorMapper(j, 0).template load(0), ptmp0); + else + for (Index j = alignedStart;j(j), rhs.getVectorMapper(j, 0).template load(0), ptmp0); + tmp0 += predux(ptmp0); + } + + // process remaining scalars + // FIXME this loop get vectorized by the compiler ! + for (Index j=alignedSize; j and std::complex types +**********************************************************************/ + +// gemv specialization + +template +struct general_matrix_vector_product_gemv; + +#define EIGEN_BLAS_GEMV_SPECIALIZE(Scalar) \ +template \ +struct general_matrix_vector_product,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper,ConjugateRhs,Specialized> { \ +static void run( \ + Index rows, Index cols, \ + const const_blas_data_mapper &lhs, \ + const const_blas_data_mapper &rhs, \ + Scalar* res, Index resIncr, Scalar alpha) \ +{ \ + if (ConjugateLhs) { \ + general_matrix_vector_product,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper,ConjugateRhs,BuiltIn>::run( \ + rows, cols, lhs, rhs, res, resIncr, alpha); \ + } else { \ + general_matrix_vector_product_gemv::run( \ + rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \ + } \ +} \ +}; \ +template \ +struct general_matrix_vector_product,RowMajor,ConjugateLhs,Scalar,const_blas_data_mapper,ConjugateRhs,Specialized> { \ +static void run( \ + Index rows, Index cols, \ + const const_blas_data_mapper &lhs, \ + const const_blas_data_mapper &rhs, \ + Scalar* res, Index resIncr, Scalar alpha) \ +{ \ + general_matrix_vector_product_gemv::run( \ + rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \ +} \ +}; \ + +EIGEN_BLAS_GEMV_SPECIALIZE(double) +EIGEN_BLAS_GEMV_SPECIALIZE(float) +EIGEN_BLAS_GEMV_SPECIALIZE(dcomplex) +EIGEN_BLAS_GEMV_SPECIALIZE(scomplex) + +#define EIGEN_BLAS_GEMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASFUNC) \ +template \ +struct general_matrix_vector_product_gemv \ +{ \ +typedef Matrix GEMVVector;\ +\ +static void run( \ + Index rows, Index cols, \ + const EIGTYPE* lhs, Index lhsStride, \ + const EIGTYPE* rhs, Index rhsIncr, \ + EIGTYPE* res, Index resIncr, EIGTYPE alpha) \ +{ \ + BlasIndex m=convert_index(rows), n=convert_index(cols), \ + lda=convert_index(lhsStride), incx=convert_index(rhsIncr), incy=convert_index(resIncr); \ + const EIGTYPE beta(1); \ + const EIGTYPE *x_ptr; \ + char trans=(LhsStorageOrder==ColMajor) ? 'N' : (ConjugateLhs) ? 'C' : 'T'; \ + if (LhsStorageOrder==RowMajor) { \ + m = convert_index(cols); \ + n = convert_index(rows); \ + }\ + GEMVVector x_tmp; \ + if (ConjugateRhs) { \ + Map > map_x(rhs,cols,1,InnerStride<>(incx)); \ + x_tmp=map_x.conjugate(); \ + x_ptr=x_tmp.data(); \ + incx=1; \ + } else x_ptr=rhs; \ + BLASFUNC(&trans, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &incy); \ +}\ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, dgemv) +EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, sgemv) +EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, MKL_Complex16, zgemv) +EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, MKL_Complex8 , cgemv) +#else +EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, dgemv_) +EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, sgemv_) +EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, double, zgemv_) +EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, float, cgemv_) +#endif + +} // end namespase internal + +} // end namespace Eigen + +#endif // EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/Parallelizer.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/Parallelizer.h new file mode 100644 index 000000000..c2f084c82 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/Parallelizer.h @@ -0,0 +1,163 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PARALLELIZER_H +#define EIGEN_PARALLELIZER_H + +namespace Eigen { + +namespace internal { + +/** \internal */ +inline void manage_multi_threading(Action action, int* v) +{ + static EIGEN_UNUSED int m_maxThreads = -1; + + if(action==SetAction) + { + eigen_internal_assert(v!=0); + m_maxThreads = *v; + } + else if(action==GetAction) + { + eigen_internal_assert(v!=0); + #ifdef EIGEN_HAS_OPENMP + if(m_maxThreads>0) + *v = m_maxThreads; + else + *v = omp_get_max_threads(); + #else + *v = 1; + #endif + } + else + { + eigen_internal_assert(false); + } +} + +} + +/** Must be call first when calling Eigen from multiple threads */ +inline void initParallel() +{ + int nbt; + internal::manage_multi_threading(GetAction, &nbt); + std::ptrdiff_t l1, l2, l3; + internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); +} + +/** \returns the max number of threads reserved for Eigen + * \sa setNbThreads */ +inline int nbThreads() +{ + int ret; + internal::manage_multi_threading(GetAction, &ret); + return ret; +} + +/** Sets the max number of threads reserved for Eigen + * \sa nbThreads */ +inline void setNbThreads(int v) +{ + internal::manage_multi_threading(SetAction, &v); +} + +namespace internal { + +template struct GemmParallelInfo +{ + GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} + + Index volatile sync; + int volatile users; + + Index lhs_start; + Index lhs_length; +}; + +template +void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) +{ + // TODO when EIGEN_USE_BLAS is defined, + // we should still enable OMP for other scalar types +#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) + // FIXME the transpose variable is only needed to properly split + // the matrix product when multithreading is enabled. This is a temporary + // fix to support row-major destination matrices. This whole + // parallelizer mechanism has to be redisigned anyway. + EIGEN_UNUSED_VARIABLE(depth); + EIGEN_UNUSED_VARIABLE(transpose); + func(0,rows, 0,cols); +#else + + // Dynamically check whether we should enable or disable OpenMP. + // The conditions are: + // - the max number of threads we can create is greater than 1 + // - we are not already in a parallel code + // - the sizes are large enough + + // compute the maximal number of threads from the size of the product: + // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. + Index size = transpose ? rows : cols; + Index pb_max_threads = std::max(1,size / Functor::Traits::nr); + + // compute the maximal number of threads from the total amount of work: + double work = static_cast(rows) * static_cast(cols) * + static_cast(depth); + double kMinTaskSize = 50000; // FIXME improve this heuristic. + pb_max_threads = std::max(1, std::min(pb_max_threads, work / kMinTaskSize)); + + // compute the number of threads we are going to use + Index threads = std::min(nbThreads(), pb_max_threads); + + // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session, + // then abort multi-threading + // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? + if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) + return func(0,rows, 0,cols); + + Eigen::initParallel(); + func.initParallelSession(threads); + + if(transpose) + std::swap(rows,cols); + + ei_declare_aligned_stack_constructed_variable(GemmParallelInfo,info,threads,0); + + #pragma omp parallel num_threads(threads) + { + Index i = omp_get_thread_num(); + // Note that the actual number of threads might be lower than the number of request ones. + Index actual_threads = omp_get_num_threads(); + + Index blockCols = (cols / actual_threads) & ~Index(0x3); + Index blockRows = (rows / actual_threads); + blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; + + Index r0 = i*blockRows; + Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; + + Index c0 = i*blockCols; + Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; + + info[i].lhs_start = r0; + info[i].lhs_length = actualBlockRows; + + if(transpose) func(c0, actualBlockCols, 0, rows, info); + else func(0, rows, c0, actualBlockCols, info); + } +#endif +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PARALLELIZER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h new file mode 100644 index 000000000..da6f82abc --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h @@ -0,0 +1,521 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_H +#define EIGEN_SELFADJOINT_MATRIX_MATRIX_H + +namespace Eigen { + +namespace internal { + +// pack a selfadjoint block diagonal for use with the gebp_kernel +template +struct symm_pack_lhs +{ + template inline + void pack(Scalar* blockA, const const_blas_data_mapper& lhs, Index cols, Index i, Index& count) + { + // normal copy + for(Index k=0; k::size }; + const_blas_data_mapper lhs(_lhs,lhsStride); + Index count = 0; + //Index peeled_mc3 = (rows/Pack1)*Pack1; + + const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; + const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0; + const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; + + if(Pack1>=3*PacketSize) + for(Index i=0; i(blockA, lhs, cols, i, count); + + if(Pack1>=2*PacketSize) + for(Index i=peeled_mc3; i(blockA, lhs, cols, i, count); + + if(Pack1>=1*PacketSize) + for(Index i=peeled_mc2; i(blockA, lhs, cols, i, count); + + // do the same with mr==1 + for(Index i=peeled_mc1; i +struct symm_pack_rhs +{ + enum { PacketSize = packet_traits::size }; + void operator()(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Index rows, Index cols, Index k2) + { + Index end_k = k2 + rows; + Index count = 0; + const_blas_data_mapper rhs(_rhs,rhsStride); + Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0; + Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; + + // first part: normal case + for(Index j2=0; j2=4) + { + blockB[count+2] = rhs(k,j2+2); + blockB[count+3] = rhs(k,j2+3); + } + if (nr>=8) + { + blockB[count+4] = rhs(k,j2+4); + blockB[count+5] = rhs(k,j2+5); + blockB[count+6] = rhs(k,j2+6); + blockB[count+7] = rhs(k,j2+7); + } + count += nr; + } + } + + // second part: diagonal block + Index end8 = nr>=8 ? (std::min)(k2+rows,packet_cols8) : k2; + if(nr>=8) + { + for(Index j2=k2; j2=4) + { + for(Index j2=end8; j2<(std::min)(k2+rows,packet_cols4); j2+=4) + { + // again we can split vertically in three different parts (transpose, symmetric, normal) + // transpose + for(Index k=k2; k=8) + { + for(Index j2=k2+rows; j2=4) + { + for(Index j2=(std::max)(packet_cols8,k2+rows); j2 the same with nr==1) + for(Index j2=packet_cols4; j2 +struct product_selfadjoint_matrix; + +template +struct product_selfadjoint_matrix +{ + + static EIGEN_STRONG_INLINE void run( + Index rows, Index cols, + const Scalar* lhs, Index lhsStride, + const Scalar* rhs, Index rhsStride, + Scalar* res, Index resStride, + const Scalar& alpha, level3_blocking& blocking) + { + product_selfadjoint_matrix::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs), + EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor, + LhsSelfAdjoint, NumTraits::IsComplex && EIGEN_LOGICAL_XOR(LhsSelfAdjoint,ConjugateLhs), + ColMajor> + ::run(cols, rows, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking); + } +}; + +template +struct product_selfadjoint_matrix +{ + + static EIGEN_DONT_INLINE void run( + Index rows, Index cols, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* res, Index resStride, + const Scalar& alpha, level3_blocking& blocking); +}; + +template +EIGEN_DONT_INLINE void product_selfadjoint_matrix::run( + Index rows, Index cols, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* _res, Index resStride, + const Scalar& alpha, level3_blocking& blocking) + { + Index size = rows; + + typedef gebp_traits Traits; + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper LhsTransposeMapper; + typedef const_blas_data_mapper RhsMapper; + typedef blas_data_mapper ResMapper; + LhsMapper lhs(_lhs,lhsStride); + LhsTransposeMapper lhs_transpose(_lhs,lhsStride); + RhsMapper rhs(_rhs,rhsStride); + ResMapper res(_res, resStride); + + Index kc = blocking.kc(); // cache block size along the K direction + Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction + // kc must be smaller than mc + kc = (std::min)(kc,mc); + std::size_t sizeA = kc*mc; + std::size_t sizeB = kc*cols; + ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA()); + ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB()); + + gebp_kernel gebp_kernel; + symm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + gemm_pack_lhs pack_lhs_transposed; + + for(Index k2=0; k2 transposed packed copy + // 2 - the diagonal block => special packed copy + // 3 - the panel below the diagonal block => generic packed copy + for(Index i2=0; i2() + (blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc); + + gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha); + } + } + } + +// matrix * selfadjoint product +template +struct product_selfadjoint_matrix +{ + + static EIGEN_DONT_INLINE void run( + Index rows, Index cols, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* res, Index resStride, + const Scalar& alpha, level3_blocking& blocking); +}; + +template +EIGEN_DONT_INLINE void product_selfadjoint_matrix::run( + Index rows, Index cols, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* _res, Index resStride, + const Scalar& alpha, level3_blocking& blocking) + { + Index size = cols; + + typedef gebp_traits Traits; + + typedef const_blas_data_mapper LhsMapper; + typedef blas_data_mapper ResMapper; + LhsMapper lhs(_lhs,lhsStride); + ResMapper res(_res,resStride); + + Index kc = blocking.kc(); // cache block size along the K direction + Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction + std::size_t sizeA = kc*mc; + std::size_t sizeB = kc*cols; + ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA()); + ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB()); + + gebp_kernel gebp_kernel; + gemm_pack_lhs pack_lhs; + symm_pack_rhs pack_rhs; + + for(Index k2=0; k2 GEPP + for(Index i2=0; i2 +struct selfadjoint_product_impl +{ + typedef typename Product::Scalar Scalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + + enum { + LhsIsUpper = (LhsMode&(Upper|Lower))==Upper, + LhsIsSelfAdjoint = (LhsMode&SelfAdjoint)==SelfAdjoint, + RhsIsUpper = (RhsMode&(Upper|Lower))==Upper, + RhsIsSelfAdjoint = (RhsMode&SelfAdjoint)==SelfAdjoint + }; + + template + static void run(Dest &dst, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha) + { + eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); + + typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(a_lhs); + typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(a_rhs); + + Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) + * RhsBlasTraits::extractScalarFactor(a_rhs); + + typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar, + Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,1> BlockingType; + + BlockingType blocking(lhs.rows(), rhs.cols(), lhs.cols(), 1, false); + + internal::product_selfadjoint_matrix::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint, + NumTraits::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)), + EIGEN_LOGICAL_XOR(RhsIsUpper,internal::traits::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint, + NumTraits::IsComplex && EIGEN_LOGICAL_XOR(RhsIsUpper,bool(RhsBlasTraits::NeedToConjugate)), + internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor> + ::run( + lhs.rows(), rhs.cols(), // sizes + &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info + &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info + &dst.coeffRef(0,0), dst.outerStride(), // result info + actualAlpha, blocking // alpha + ); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h new file mode 100644 index 000000000..9a5318507 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h @@ -0,0 +1,287 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * Self adjoint matrix * matrix product functionality based on ?SYMM/?HEMM. + ******************************************************************************** +*/ + +#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_BLAS_H +#define EIGEN_SELFADJOINT_MATRIX_MATRIX_BLAS_H + +namespace Eigen { + +namespace internal { + + +/* Optimized selfadjoint matrix * matrix (?SYMM/?HEMM) product */ + +#define EIGEN_BLAS_SYMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \ +template \ +struct product_selfadjoint_matrix \ +{\ +\ + static void run( \ + Index rows, Index cols, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha, level3_blocking& /*blocking*/) \ + { \ + char side='L', uplo='L'; \ + BlasIndex m, n, lda, ldb, ldc; \ + const EIGTYPE *a, *b; \ + EIGTYPE beta(1); \ + MatrixX##EIGPREFIX b_tmp; \ +\ +/* Set transpose options */ \ +/* Set m, n, k */ \ + m = convert_index(rows); \ + n = convert_index(cols); \ +\ +/* Set lda, ldb, ldc */ \ + lda = convert_index(lhsStride); \ + ldb = convert_index(rhsStride); \ + ldc = convert_index(resStride); \ +\ +/* Set a, b, c */ \ + if (LhsStorageOrder==RowMajor) uplo='U'; \ + a = _lhs; \ +\ + if (RhsStorageOrder==RowMajor) { \ + Map > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \ + b_tmp = rhs.adjoint(); \ + b = b_tmp.data(); \ + ldb = convert_index(b_tmp.outerStride()); \ + } else b = _rhs; \ +\ + BLASFUNC(&side, &uplo, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &ldc); \ +\ + } \ +}; + + +#define EIGEN_BLAS_HEMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \ +template \ +struct product_selfadjoint_matrix \ +{\ + static void run( \ + Index rows, Index cols, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha, level3_blocking& /*blocking*/) \ + { \ + char side='L', uplo='L'; \ + BlasIndex m, n, lda, ldb, ldc; \ + const EIGTYPE *a, *b; \ + EIGTYPE beta(1); \ + MatrixX##EIGPREFIX b_tmp; \ + Matrix a_tmp; \ +\ +/* Set transpose options */ \ +/* Set m, n, k */ \ + m = convert_index(rows); \ + n = convert_index(cols); \ +\ +/* Set lda, ldb, ldc */ \ + lda = convert_index(lhsStride); \ + ldb = convert_index(rhsStride); \ + ldc = convert_index(resStride); \ +\ +/* Set a, b, c */ \ + if (((LhsStorageOrder==ColMajor) && ConjugateLhs) || ((LhsStorageOrder==RowMajor) && (!ConjugateLhs))) { \ + Map, 0, OuterStride<> > lhs(_lhs,m,m,OuterStride<>(lhsStride)); \ + a_tmp = lhs.conjugate(); \ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else a = _lhs; \ + if (LhsStorageOrder==RowMajor) uplo='U'; \ +\ + if (RhsStorageOrder==ColMajor && (!ConjugateRhs)) { \ + b = _rhs; } \ + else { \ + if (RhsStorageOrder==ColMajor && ConjugateRhs) { \ + Map > rhs(_rhs,m,n,OuterStride<>(rhsStride)); \ + b_tmp = rhs.conjugate(); \ + } else \ + if (ConjugateRhs) { \ + Map > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \ + b_tmp = rhs.adjoint(); \ + } else { \ + Map > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \ + b_tmp = rhs.transpose(); \ + } \ + b = b_tmp.data(); \ + ldb = convert_index(b_tmp.outerStride()); \ + } \ +\ + BLASFUNC(&side, &uplo, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &ldc); \ +\ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_SYMM_L(double, double, d, dsymm) +EIGEN_BLAS_SYMM_L(float, float, f, ssymm) +EIGEN_BLAS_HEMM_L(dcomplex, MKL_Complex16, cd, zhemm) +EIGEN_BLAS_HEMM_L(scomplex, MKL_Complex8, cf, chemm) +#else +EIGEN_BLAS_SYMM_L(double, double, d, dsymm_) +EIGEN_BLAS_SYMM_L(float, float, f, ssymm_) +EIGEN_BLAS_HEMM_L(dcomplex, double, cd, zhemm_) +EIGEN_BLAS_HEMM_L(scomplex, float, cf, chemm_) +#endif + +/* Optimized matrix * selfadjoint matrix (?SYMM/?HEMM) product */ + +#define EIGEN_BLAS_SYMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \ +template \ +struct product_selfadjoint_matrix \ +{\ +\ + static void run( \ + Index rows, Index cols, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha, level3_blocking& /*blocking*/) \ + { \ + char side='R', uplo='L'; \ + BlasIndex m, n, lda, ldb, ldc; \ + const EIGTYPE *a, *b; \ + EIGTYPE beta(1); \ + MatrixX##EIGPREFIX b_tmp; \ +\ +/* Set m, n, k */ \ + m = convert_index(rows); \ + n = convert_index(cols); \ +\ +/* Set lda, ldb, ldc */ \ + lda = convert_index(rhsStride); \ + ldb = convert_index(lhsStride); \ + ldc = convert_index(resStride); \ +\ +/* Set a, b, c */ \ + if (RhsStorageOrder==RowMajor) uplo='U'; \ + a = _rhs; \ +\ + if (LhsStorageOrder==RowMajor) { \ + Map > lhs(_lhs,n,m,OuterStride<>(rhsStride)); \ + b_tmp = lhs.adjoint(); \ + b = b_tmp.data(); \ + ldb = convert_index(b_tmp.outerStride()); \ + } else b = _lhs; \ +\ + BLASFUNC(&side, &uplo, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &ldc); \ +\ + } \ +}; + + +#define EIGEN_BLAS_HEMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \ +template \ +struct product_selfadjoint_matrix \ +{\ + static void run( \ + Index rows, Index cols, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha, level3_blocking& /*blocking*/) \ + { \ + char side='R', uplo='L'; \ + BlasIndex m, n, lda, ldb, ldc; \ + const EIGTYPE *a, *b; \ + EIGTYPE beta(1); \ + MatrixX##EIGPREFIX b_tmp; \ + Matrix a_tmp; \ +\ +/* Set m, n, k */ \ + m = convert_index(rows); \ + n = convert_index(cols); \ +\ +/* Set lda, ldb, ldc */ \ + lda = convert_index(rhsStride); \ + ldb = convert_index(lhsStride); \ + ldc = convert_index(resStride); \ +\ +/* Set a, b, c */ \ + if (((RhsStorageOrder==ColMajor) && ConjugateRhs) || ((RhsStorageOrder==RowMajor) && (!ConjugateRhs))) { \ + Map, 0, OuterStride<> > rhs(_rhs,n,n,OuterStride<>(rhsStride)); \ + a_tmp = rhs.conjugate(); \ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else a = _rhs; \ + if (RhsStorageOrder==RowMajor) uplo='U'; \ +\ + if (LhsStorageOrder==ColMajor && (!ConjugateLhs)) { \ + b = _lhs; } \ + else { \ + if (LhsStorageOrder==ColMajor && ConjugateLhs) { \ + Map > lhs(_lhs,m,n,OuterStride<>(lhsStride)); \ + b_tmp = lhs.conjugate(); \ + } else \ + if (ConjugateLhs) { \ + Map > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \ + b_tmp = lhs.adjoint(); \ + } else { \ + Map > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \ + b_tmp = lhs.transpose(); \ + } \ + b = b_tmp.data(); \ + ldb = convert_index(b_tmp.outerStride()); \ + } \ +\ + BLASFUNC(&side, &uplo, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &ldc); \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_SYMM_R(double, double, d, dsymm) +EIGEN_BLAS_SYMM_R(float, float, f, ssymm) +EIGEN_BLAS_HEMM_R(dcomplex, MKL_Complex16, cd, zhemm) +EIGEN_BLAS_HEMM_R(scomplex, MKL_Complex8, cf, chemm) +#else +EIGEN_BLAS_SYMM_R(double, double, d, dsymm_) +EIGEN_BLAS_SYMM_R(float, float, f, ssymm_) +EIGEN_BLAS_HEMM_R(dcomplex, double, cd, zhemm_) +EIGEN_BLAS_HEMM_R(scomplex, float, cf, chemm_) +#endif +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_BLAS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h new file mode 100644 index 000000000..3fd180e6c --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h @@ -0,0 +1,260 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H +#define EIGEN_SELFADJOINT_MATRIX_VECTOR_H + +namespace Eigen { + +namespace internal { + +/* Optimized selfadjoint matrix * vector product: + * This algorithm processes 2 columns at onces that allows to both reduce + * the number of load/stores of the result by a factor 2 and to reduce + * the instruction dependency. + */ + +template +struct selfadjoint_matrix_vector_product; + +template +struct selfadjoint_matrix_vector_product + +{ +static EIGEN_DONT_INLINE void run( + Index size, + const Scalar* lhs, Index lhsStride, + const Scalar* rhs, + Scalar* res, + Scalar alpha); +}; + +template +EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product::run( + Index size, + const Scalar* lhs, Index lhsStride, + const Scalar* rhs, + Scalar* res, + Scalar alpha) +{ + typedef typename packet_traits::type Packet; + typedef typename NumTraits::Real RealScalar; + const Index PacketSize = sizeof(Packet)/sizeof(Scalar); + + enum { + IsRowMajor = StorageOrder==RowMajor ? 1 : 0, + IsLower = UpLo == Lower ? 1 : 0, + FirstTriangular = IsRowMajor == IsLower + }; + + conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> cj0; + conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1; + conj_helper cjd; + + conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0; + conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1; + + Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha; + + + Index bound = (std::max)(Index(0),size-8) & 0xfffffffe; + if (FirstTriangular) + bound = size - bound; + + for (Index j=FirstTriangular ? bound : 0; + j<(FirstTriangular ? size : bound);j+=2) + { + const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride; + const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride; + + Scalar t0 = cjAlpha * rhs[j]; + Packet ptmp0 = pset1(t0); + Scalar t1 = cjAlpha * rhs[j+1]; + Packet ptmp1 = pset1(t1); + + Scalar t2(0); + Packet ptmp2 = pset1(t2); + Scalar t3(0); + Packet ptmp3 = pset1(t3); + + Index starti = FirstTriangular ? 0 : j+2; + Index endi = FirstTriangular ? j : size; + Index alignedStart = (starti) + internal::first_default_aligned(&res[starti], endi-starti); + Index alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize); + + res[j] += cjd.pmul(numext::real(A0[j]), t0); + res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1); + if(FirstTriangular) + { + res[j] += cj0.pmul(A1[j], t1); + t3 += cj1.pmul(A1[j], rhs[j]); + } + else + { + res[j+1] += cj0.pmul(A0[j+1],t0); + t2 += cj1.pmul(A0[j+1], rhs[j+1]); + } + + for (Index i=starti; i huge speed up) + // gcc 4.2 does this optimization automatically. + const Scalar* EIGEN_RESTRICT a0It = A0 + alignedStart; + const Scalar* EIGEN_RESTRICT a1It = A1 + alignedStart; + const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart; + Scalar* EIGEN_RESTRICT resIt = res + alignedStart; + for (Index i=alignedStart; i(a0It); a0It += PacketSize; + Packet A1i = ploadu(a1It); a1It += PacketSize; + Packet Bi = ploadu(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases + Packet Xi = pload (resIt); + + Xi = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi)); + ptmp2 = pcj1.pmadd(A0i, Bi, ptmp2); + ptmp3 = pcj1.pmadd(A1i, Bi, ptmp3); + pstore(resIt,Xi); resIt += PacketSize; + } + for (Index i=alignedEnd; i +struct selfadjoint_product_impl +{ + typedef typename Product::Scalar Scalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef typename internal::remove_all::type ActualLhsTypeCleaned; + + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + typedef typename internal::remove_all::type ActualRhsTypeCleaned; + + enum { LhsUpLo = LhsMode&(Upper|Lower) }; + + template + static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha) + { + typedef typename Dest::Scalar ResScalar; + typedef typename Rhs::Scalar RhsScalar; + typedef Map, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits::size)> MappedDest; + + eigen_assert(dest.rows()==a_lhs.rows() && dest.cols()==a_rhs.cols()); + + typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(a_lhs); + typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(a_rhs); + + Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) + * RhsBlasTraits::extractScalarFactor(a_rhs); + + enum { + EvalToDest = (Dest::InnerStrideAtCompileTime==1), + UseRhs = (ActualRhsTypeCleaned::InnerStrideAtCompileTime==1) + }; + + internal::gemv_static_vector_if static_dest; + internal::gemv_static_vector_if static_rhs; + + ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), + EvalToDest ? dest.data() : static_dest.data()); + + ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(), + UseRhs ? const_cast(rhs.data()) : static_rhs.data()); + + if(!EvalToDest) + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + Index size = dest.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + MappedDest(actualDestPtr, dest.size()) = dest; + } + + if(!UseRhs) + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + Index size = rhs.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + Map(actualRhsPtr, rhs.size()) = rhs; + } + + + internal::selfadjoint_matrix_vector_product::Flags&RowMajorBit) ? RowMajor : ColMajor, + int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run + ( + lhs.rows(), // size + &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info + actualRhsPtr, // rhs info + actualDestPtr, // result info + actualAlpha // scale factor + ); + + if(!EvalToDest) + dest = MappedDest(actualDestPtr, dest.size()); + } +}; + +template +struct selfadjoint_product_impl +{ + typedef typename Product::Scalar Scalar; + enum { RhsUpLo = RhsMode&(Upper|Lower) }; + + template + static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha) + { + // let's simply transpose the product + Transpose destT(dest); + selfadjoint_product_impl, int(RhsUpLo)==Upper ? Lower : Upper, false, + Transpose, 0, true>::run(destT, a_rhs.transpose(), a_lhs.transpose(), alpha); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h new file mode 100644 index 000000000..1238345e3 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h @@ -0,0 +1,118 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * Selfadjoint matrix-vector product functionality based on ?SYMV/HEMV. + ******************************************************************************** +*/ + +#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H +#define EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H + +namespace Eigen { + +namespace internal { + +/********************************************************************** +* This file implements selfadjoint matrix-vector multiplication using BLAS +**********************************************************************/ + +// symv/hemv specialization + +template +struct selfadjoint_matrix_vector_product_symv : + selfadjoint_matrix_vector_product {}; + +#define EIGEN_BLAS_SYMV_SPECIALIZE(Scalar) \ +template \ +struct selfadjoint_matrix_vector_product { \ +static void run( \ + Index size, const Scalar* lhs, Index lhsStride, \ + const Scalar* _rhs, Scalar* res, Scalar alpha) { \ + enum {\ + IsColMajor = StorageOrder==ColMajor \ + }; \ + if (IsColMajor == ConjugateLhs) {\ + selfadjoint_matrix_vector_product::run( \ + size, lhs, lhsStride, _rhs, res, alpha); \ + } else {\ + selfadjoint_matrix_vector_product_symv::run( \ + size, lhs, lhsStride, _rhs, res, alpha); \ + }\ + } \ +}; \ + +EIGEN_BLAS_SYMV_SPECIALIZE(double) +EIGEN_BLAS_SYMV_SPECIALIZE(float) +EIGEN_BLAS_SYMV_SPECIALIZE(dcomplex) +EIGEN_BLAS_SYMV_SPECIALIZE(scomplex) + +#define EIGEN_BLAS_SYMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASFUNC) \ +template \ +struct selfadjoint_matrix_vector_product_symv \ +{ \ +typedef Matrix SYMVVector;\ +\ +static void run( \ +Index size, const EIGTYPE* lhs, Index lhsStride, \ +const EIGTYPE* _rhs, EIGTYPE* res, EIGTYPE alpha) \ +{ \ + enum {\ + IsRowMajor = StorageOrder==RowMajor ? 1 : 0, \ + IsLower = UpLo == Lower ? 1 : 0 \ + }; \ + BlasIndex n=convert_index(size), lda=convert_index(lhsStride), incx=1, incy=1; \ + EIGTYPE beta(1); \ + const EIGTYPE *x_ptr; \ + char uplo=(IsRowMajor) ? (IsLower ? 'U' : 'L') : (IsLower ? 'L' : 'U'); \ + SYMVVector x_tmp; \ + if (ConjugateRhs) { \ + Map map_x(_rhs,size,1); \ + x_tmp=map_x.conjugate(); \ + x_ptr=x_tmp.data(); \ + } else x_ptr=_rhs; \ + BLASFUNC(&uplo, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &incy); \ +}\ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_SYMV_SPECIALIZATION(double, double, dsymv) +EIGEN_BLAS_SYMV_SPECIALIZATION(float, float, ssymv) +EIGEN_BLAS_SYMV_SPECIALIZATION(dcomplex, MKL_Complex16, zhemv) +EIGEN_BLAS_SYMV_SPECIALIZATION(scomplex, MKL_Complex8, chemv) +#else +EIGEN_BLAS_SYMV_SPECIALIZATION(double, double, dsymv_) +EIGEN_BLAS_SYMV_SPECIALIZATION(float, float, ssymv_) +EIGEN_BLAS_SYMV_SPECIALIZATION(dcomplex, double, zhemv_) +EIGEN_BLAS_SYMV_SPECIALIZATION(scomplex, float, chemv_) +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointProduct.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointProduct.h new file mode 100644 index 000000000..f038d686f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointProduct.h @@ -0,0 +1,133 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SELFADJOINT_PRODUCT_H +#define EIGEN_SELFADJOINT_PRODUCT_H + +/********************************************************************** +* This file implements a self adjoint product: C += A A^T updating only +* half of the selfadjoint matrix C. +* It corresponds to the level 3 SYRK and level 2 SYR Blas routines. +**********************************************************************/ + +namespace Eigen { + + +template +struct selfadjoint_rank1_update +{ + static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha) + { + internal::conj_if cj; + typedef Map > OtherMap; + typedef typename internal::conditional::type ConjLhsType; + for (Index i=0; i >(mat+stride*i+(UpLo==Lower ? i : 0), (UpLo==Lower ? size-i : (i+1))) + += (alpha * cj(vecY[i])) * ConjLhsType(OtherMap(vecX+(UpLo==Lower ? i : 0),UpLo==Lower ? size-i : (i+1))); + } + } +}; + +template +struct selfadjoint_rank1_update +{ + static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha) + { + selfadjoint_rank1_update::run(size,mat,stride,vecY,vecX,alpha); + } +}; + +template +struct selfadjoint_product_selector; + +template +struct selfadjoint_product_selector +{ + static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha) + { + typedef typename MatrixType::Scalar Scalar; + typedef internal::blas_traits OtherBlasTraits; + typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType; + typedef typename internal::remove_all::type _ActualOtherType; + typename internal::add_const_on_value_type::type actualOther = OtherBlasTraits::extract(other.derived()); + + Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived()); + + enum { + StorageOrder = (internal::traits::Flags&RowMajorBit) ? RowMajor : ColMajor, + UseOtherDirectly = _ActualOtherType::InnerStrideAtCompileTime==1 + }; + internal::gemv_static_vector_if static_other; + + ei_declare_aligned_stack_constructed_variable(Scalar, actualOtherPtr, other.size(), + (UseOtherDirectly ? const_cast(actualOther.data()) : static_other.data())); + + if(!UseOtherDirectly) + Map(actualOtherPtr, actualOther.size()) = actualOther; + + selfadjoint_rank1_update::IsComplex, + (!OtherBlasTraits::NeedToConjugate) && NumTraits::IsComplex> + ::run(other.size(), mat.data(), mat.outerStride(), actualOtherPtr, actualOtherPtr, actualAlpha); + } +}; + +template +struct selfadjoint_product_selector +{ + static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha) + { + typedef typename MatrixType::Scalar Scalar; + typedef internal::blas_traits OtherBlasTraits; + typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType; + typedef typename internal::remove_all::type _ActualOtherType; + typename internal::add_const_on_value_type::type actualOther = OtherBlasTraits::extract(other.derived()); + + Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived()); + + enum { + IsRowMajor = (internal::traits::Flags&RowMajorBit) ? 1 : 0, + OtherIsRowMajor = _ActualOtherType::Flags&RowMajorBit ? 1 : 0 + }; + + Index size = mat.cols(); + Index depth = actualOther.cols(); + + typedef internal::gemm_blocking_space BlockingType; + + BlockingType blocking(size, size, depth, 1, false); + + + internal::general_matrix_matrix_triangular_product::IsComplex, + Scalar, OtherIsRowMajor ? ColMajor : RowMajor, (!OtherBlasTraits::NeedToConjugate) && NumTraits::IsComplex, + IsRowMajor ? RowMajor : ColMajor, UpLo> + ::run(size, depth, + &actualOther.coeffRef(0,0), actualOther.outerStride(), &actualOther.coeffRef(0,0), actualOther.outerStride(), + mat.data(), mat.outerStride(), actualAlpha, blocking); + } +}; + +// high level API + +template +template +SelfAdjointView& SelfAdjointView +::rankUpdate(const MatrixBase& u, const Scalar& alpha) +{ + selfadjoint_product_selector::run(_expression().const_cast_derived(), u.derived(), alpha); + + return *this; +} + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINT_PRODUCT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h new file mode 100644 index 000000000..2ae364111 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h @@ -0,0 +1,93 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SELFADJOINTRANK2UPTADE_H +#define EIGEN_SELFADJOINTRANK2UPTADE_H + +namespace Eigen { + +namespace internal { + +/* Optimized selfadjoint matrix += alpha * uv' + conj(alpha)*vu' + * It corresponds to the Level2 syr2 BLAS routine + */ + +template +struct selfadjoint_rank2_update_selector; + +template +struct selfadjoint_rank2_update_selector +{ + static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha) + { + const Index size = u.size(); + for (Index i=0; i >(mat+stride*i+i, size-i) += + (numext::conj(alpha) * numext::conj(u.coeff(i))) * v.tail(size-i) + + (alpha * numext::conj(v.coeff(i))) * u.tail(size-i); + } + } +}; + +template +struct selfadjoint_rank2_update_selector +{ + static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha) + { + const Index size = u.size(); + for (Index i=0; i >(mat+stride*i, i+1) += + (numext::conj(alpha) * numext::conj(u.coeff(i))) * v.head(i+1) + + (alpha * numext::conj(v.coeff(i))) * u.head(i+1); + } +}; + +template struct conj_expr_if + : conditional::Scalar>,T> > {}; + +} // end namespace internal + +template +template +SelfAdjointView& SelfAdjointView +::rankUpdate(const MatrixBase& u, const MatrixBase& v, const Scalar& alpha) +{ + typedef internal::blas_traits UBlasTraits; + typedef typename UBlasTraits::DirectLinearAccessType ActualUType; + typedef typename internal::remove_all::type _ActualUType; + typename internal::add_const_on_value_type::type actualU = UBlasTraits::extract(u.derived()); + + typedef internal::blas_traits VBlasTraits; + typedef typename VBlasTraits::DirectLinearAccessType ActualVType; + typedef typename internal::remove_all::type _ActualVType; + typename internal::add_const_on_value_type::type actualV = VBlasTraits::extract(v.derived()); + + // If MatrixType is row major, then we use the routine for lower triangular in the upper triangular case and + // vice versa, and take the complex conjugate of all coefficients and vector entries. + + enum { IsRowMajor = (internal::traits::Flags&RowMajorBit) ? 1 : 0 }; + Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived()) + * numext::conj(VBlasTraits::extractScalarFactor(v.derived())); + if (IsRowMajor) + actualAlpha = numext::conj(actualAlpha); + + typedef typename internal::remove_all::type>::type UType; + typedef typename internal::remove_all::type>::type VType; + internal::selfadjoint_rank2_update_selector + ::run(_expression().const_cast_derived().data(),_expression().outerStride(),UType(actualU),VType(actualV),actualAlpha); + + return *this; +} + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINTRANK2UPTADE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h new file mode 100644 index 000000000..f784507e7 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h @@ -0,0 +1,466 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_H +#define EIGEN_TRIANGULAR_MATRIX_MATRIX_H + +namespace Eigen { + +namespace internal { + +// template +// struct gemm_pack_lhs_triangular +// { +// Matrix::IsComplex && Conjugate> cj; +// const_blas_data_mapper lhs(_lhs,lhsStride); +// int count = 0; +// const int peeled_mc = (rows/mr)*mr; +// for(int i=0; i +struct product_triangular_matrix_matrix; + +template +struct product_triangular_matrix_matrix +{ + static EIGEN_STRONG_INLINE void run( + Index rows, Index cols, Index depth, + const Scalar* lhs, Index lhsStride, + const Scalar* rhs, Index rhsStride, + Scalar* res, Index resStride, + const Scalar& alpha, level3_blocking& blocking) + { + product_triangular_matrix_matrix + ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking); + } +}; + +// implements col-major += alpha * op(triangular) * op(general) +template +struct product_triangular_matrix_matrix +{ + + typedef gebp_traits Traits; + enum { + SmallPanelWidth = 2 * EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr), + IsLower = (Mode&Lower) == Lower, + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1 + }; + + static EIGEN_DONT_INLINE void run( + Index _rows, Index _cols, Index _depth, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* res, Index resStride, + const Scalar& alpha, level3_blocking& blocking); +}; + +template +EIGEN_DONT_INLINE void product_triangular_matrix_matrix::run( + Index _rows, Index _cols, Index _depth, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* _res, Index resStride, + const Scalar& alpha, level3_blocking& blocking) + { + // strip zeros + Index diagSize = (std::min)(_rows,_depth); + Index rows = IsLower ? _rows : diagSize; + Index depth = IsLower ? diagSize : _depth; + Index cols = _cols; + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + typedef blas_data_mapper ResMapper; + LhsMapper lhs(_lhs,lhsStride); + RhsMapper rhs(_rhs,rhsStride); + ResMapper res(_res, resStride); + + Index kc = blocking.kc(); // cache block size along the K direction + Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction + // The small panel size must not be larger than blocking size. + // Usually this should never be the case because SmallPanelWidth^2 is very small + // compared to L2 cache size, but let's be safe: + Index panelWidth = (std::min)(Index(SmallPanelWidth),(std::min)(kc,mc)); + + std::size_t sizeA = kc*mc; + std::size_t sizeB = kc*cols; + + ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA()); + ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB()); + + // To work around an "error: member reference base type 'Matrix<...> + // (Eigen::internal::constructor_without_unaligned_array_assert (*)())' is + // not a structure or union" compilation error in nvcc (tested V8.0.61), + // create a dummy internal::constructor_without_unaligned_array_assert + // object to pass to the Matrix constructor. + internal::constructor_without_unaligned_array_assert a; + Matrix triangularBuffer(a); + triangularBuffer.setZero(); + if((Mode&ZeroDiag)==ZeroDiag) + triangularBuffer.diagonal().setZero(); + else + triangularBuffer.diagonal().setOnes(); + + gebp_kernel gebp_kernel; + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + + for(Index k2=IsLower ? depth : 0; + IsLower ? k2>0 : k2rows)) + { + actual_kc = rows-k2; + k2 = k2+actual_kc-kc; + } + + pack_rhs(blockB, rhs.getSubMapper(actual_k2,0), actual_kc, cols); + + // the selected lhs's panel has to be split in three different parts: + // 1 - the part which is zero => skip it + // 2 - the diagonal block => special kernel + // 3 - the dense panel below (lower case) or above (upper case) the diagonal block => GEPP + + // the block diagonal, if any: + if(IsLower || actual_k2(actual_kc-k1, panelWidth); + Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1; + Index startBlock = actual_k2+k1; + Index blockBOffset = k1; + + // => GEBP with the micro triangular block + // The trick is to pack this micro block while filling the opposite triangular part with zeros. + // To this end we do an extra triangular copy to a small temporary buffer + for (Index k=0;k0) + { + Index startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2; + + pack_lhs(blockA, lhs.getSubMapper(startTarget,startBlock), actualPanelWidth, lengthTarget); + + gebp_kernel(res.getSubMapper(startTarget, 0), blockA, blockB, + lengthTarget, actualPanelWidth, cols, alpha, + actualPanelWidth, actual_kc, 0, blockBOffset); + } + } + } + // the part below (lower case) or above (upper case) the diagonal => GEPP + { + Index start = IsLower ? k2 : 0; + Index end = IsLower ? rows : (std::min)(actual_k2,rows); + for(Index i2=start; i2() + (blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc); + + gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, + actual_kc, cols, alpha, -1, -1, 0, 0); + } + } + } + } + +// implements col-major += alpha * op(general) * op(triangular) +template +struct product_triangular_matrix_matrix +{ + typedef gebp_traits Traits; + enum { + SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr), + IsLower = (Mode&Lower) == Lower, + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1 + }; + + static EIGEN_DONT_INLINE void run( + Index _rows, Index _cols, Index _depth, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* res, Index resStride, + const Scalar& alpha, level3_blocking& blocking); +}; + +template +EIGEN_DONT_INLINE void product_triangular_matrix_matrix::run( + Index _rows, Index _cols, Index _depth, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* _res, Index resStride, + const Scalar& alpha, level3_blocking& blocking) + { + const Index PacketBytes = packet_traits::size*sizeof(Scalar); + // strip zeros + Index diagSize = (std::min)(_cols,_depth); + Index rows = _rows; + Index depth = IsLower ? _depth : diagSize; + Index cols = IsLower ? diagSize : _cols; + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + typedef blas_data_mapper ResMapper; + LhsMapper lhs(_lhs,lhsStride); + RhsMapper rhs(_rhs,rhsStride); + ResMapper res(_res, resStride); + + Index kc = blocking.kc(); // cache block size along the K direction + Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction + + std::size_t sizeA = kc*mc; + std::size_t sizeB = kc*cols+EIGEN_MAX_ALIGN_BYTES/sizeof(Scalar); + + ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA()); + ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB()); + + internal::constructor_without_unaligned_array_assert a; + Matrix triangularBuffer(a); + triangularBuffer.setZero(); + if((Mode&ZeroDiag)==ZeroDiag) + triangularBuffer.diagonal().setZero(); + else + triangularBuffer.diagonal().setOnes(); + + gebp_kernel gebp_kernel; + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + gemm_pack_rhs pack_rhs_panel; + + for(Index k2=IsLower ? 0 : depth; + IsLower ? k20; + IsLower ? k2+=kc : k2-=kc) + { + Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc); + Index actual_k2 = IsLower ? k2 : k2-actual_kc; + + // align blocks with the end of the triangular part for trapezoidal rhs + if(IsLower && (k2cols)) + { + actual_kc = cols-k2; + k2 = actual_k2 + actual_kc - kc; + } + + // remaining size + Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2; + // size of the triangular part + Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc; + + Scalar* geb = blockB+ts*ts; + geb = geb + internal::first_aligned(geb,PacketBytes/sizeof(Scalar)); + + pack_rhs(geb, rhs.getSubMapper(actual_k2,IsLower ? 0 : k2), actual_kc, rs); + + // pack the triangular part of the rhs padding the unrolled blocks with zeros + if(ts>0) + { + for (Index j2=0; j2(actual_kc-j2, SmallPanelWidth); + Index actual_j2 = actual_k2 + j2; + Index panelOffset = IsLower ? j2+actualPanelWidth : 0; + Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2; + // general part + pack_rhs_panel(blockB+j2*actual_kc, + rhs.getSubMapper(actual_k2+panelOffset, actual_j2), + panelLength, actualPanelWidth, + actual_kc, panelOffset); + + // append the triangular part via a temporary buffer + for (Index j=0;j0) + { + for (Index j2=0; j2(actual_kc-j2, SmallPanelWidth); + Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth; + Index blockOffset = IsLower ? j2 : 0; + + gebp_kernel(res.getSubMapper(i2, actual_k2 + j2), + blockA, blockB+j2*actual_kc, + actual_mc, panelLength, actualPanelWidth, + alpha, + actual_kc, actual_kc, // strides + blockOffset, blockOffset);// offsets + } + } + gebp_kernel(res.getSubMapper(i2, IsLower ? 0 : k2), + blockA, geb, actual_mc, actual_kc, rs, + alpha, + -1, -1, 0, 0); + } + } + } + +/*************************************************************************** +* Wrapper to product_triangular_matrix_matrix +***************************************************************************/ + +} // end namespace internal + +namespace internal { +template +struct triangular_product_impl +{ + template static void run(Dest& dst, const Lhs &a_lhs, const Rhs &a_rhs, const typename Dest::Scalar& alpha) + { + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + typedef typename Dest::Scalar Scalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef typename internal::remove_all::type ActualLhsTypeCleaned; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + typedef typename internal::remove_all::type ActualRhsTypeCleaned; + + typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(a_lhs); + typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(a_rhs); + + LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(a_lhs); + RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(a_rhs); + Scalar actualAlpha = alpha * lhs_alpha * rhs_alpha; + + typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar, + Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,4> BlockingType; + + enum { IsLower = (Mode&Lower) == Lower }; + Index stripedRows = ((!LhsIsTriangular) || (IsLower)) ? lhs.rows() : (std::min)(lhs.rows(),lhs.cols()); + Index stripedCols = ((LhsIsTriangular) || (!IsLower)) ? rhs.cols() : (std::min)(rhs.cols(),rhs.rows()); + Index stripedDepth = LhsIsTriangular ? ((!IsLower) ? lhs.cols() : (std::min)(lhs.cols(),lhs.rows())) + : ((IsLower) ? rhs.rows() : (std::min)(rhs.rows(),rhs.cols())); + + BlockingType blocking(stripedRows, stripedCols, stripedDepth, 1, false); + + internal::product_triangular_matrix_matrix::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate, + (internal::traits::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate, + (internal::traits::Flags&RowMajorBit) ? RowMajor : ColMajor> + ::run( + stripedRows, stripedCols, stripedDepth, // sizes + &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info + &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info + &dst.coeffRef(0,0), dst.outerStride(), // result info + actualAlpha, blocking + ); + + // Apply correction if the diagonal is unit and a scalar factor was nested: + if ((Mode&UnitDiag)==UnitDiag) + { + if (LhsIsTriangular && lhs_alpha!=LhsScalar(1)) + { + Index diagSize = (std::min)(lhs.rows(),lhs.cols()); + dst.topRows(diagSize) -= ((lhs_alpha-LhsScalar(1))*a_rhs).topRows(diagSize); + } + else if ((!LhsIsTriangular) && rhs_alpha!=RhsScalar(1)) + { + Index diagSize = (std::min)(rhs.rows(),rhs.cols()); + dst.leftCols(diagSize) -= (rhs_alpha-RhsScalar(1))*a_lhs.leftCols(diagSize); + } + } + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h new file mode 100644 index 000000000..a25197ab0 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h @@ -0,0 +1,315 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * Triangular matrix * matrix product functionality based on ?TRMM. + ******************************************************************************** +*/ + +#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H +#define EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H + +namespace Eigen { + +namespace internal { + + +template +struct product_triangular_matrix_matrix_trmm : + product_triangular_matrix_matrix {}; + + +// try to go to BLAS specialization +#define EIGEN_BLAS_TRMM_SPECIALIZE(Scalar, LhsIsTriangular) \ +template \ +struct product_triangular_matrix_matrix { \ + static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\ + const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking& blocking) { \ + product_triangular_matrix_matrix_trmm::run( \ + _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \ + } \ +}; + +EIGEN_BLAS_TRMM_SPECIALIZE(double, true) +EIGEN_BLAS_TRMM_SPECIALIZE(double, false) +EIGEN_BLAS_TRMM_SPECIALIZE(dcomplex, true) +EIGEN_BLAS_TRMM_SPECIALIZE(dcomplex, false) +EIGEN_BLAS_TRMM_SPECIALIZE(float, true) +EIGEN_BLAS_TRMM_SPECIALIZE(float, false) +EIGEN_BLAS_TRMM_SPECIALIZE(scomplex, true) +EIGEN_BLAS_TRMM_SPECIALIZE(scomplex, false) + +// implements col-major += alpha * op(triangular) * op(general) +#define EIGEN_BLAS_TRMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \ +template \ +struct product_triangular_matrix_matrix_trmm \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + LowUp = IsLower ? Lower : Upper, \ + conjA = ((LhsStorageOrder==ColMajor) && ConjugateLhs) ? 1 : 0 \ + }; \ +\ + static void run( \ + Index _rows, Index _cols, Index _depth, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha, level3_blocking& blocking) \ + { \ + Index diagSize = (std::min)(_rows,_depth); \ + Index rows = IsLower ? _rows : diagSize; \ + Index depth = IsLower ? diagSize : _depth; \ + Index cols = _cols; \ +\ + typedef Matrix MatrixLhs; \ + typedef Matrix MatrixRhs; \ +\ +/* Non-square case - doesn't fit to BLAS ?TRMM. Fall to default triangular product or call BLAS ?GEMM*/ \ + if (rows != depth) { \ +\ + /* FIXME handle mkl_domain_get_max_threads */ \ + /*int nthr = mkl_domain_get_max_threads(EIGEN_BLAS_DOMAIN_BLAS);*/ int nthr = 1;\ +\ + if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \ + /* Most likely no benefit to call TRMM or GEMM from BLAS */ \ + product_triangular_matrix_matrix::run( \ + _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \ + /*std::cout << "TRMM_L: A is not square! Go to Eigen TRMM implementation!\n";*/ \ + } else { \ + /* Make sense to call GEMM */ \ + Map > lhsMap(_lhs,rows,depth,OuterStride<>(lhsStride)); \ + MatrixLhs aa_tmp=lhsMap.template triangularView(); \ + BlasIndex aStride = convert_index(aa_tmp.outerStride()); \ + gemm_blocking_space gemm_blocking(_rows,_cols,_depth, 1, true); \ + general_matrix_matrix_product::run( \ + rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, gemm_blocking, 0); \ +\ + /*std::cout << "TRMM_L: A is not square! Go to BLAS GEMM implementation! " << nthr<<" \n";*/ \ + } \ + return; \ + } \ + char side = 'L', transa, uplo, diag = 'N'; \ + EIGTYPE *b; \ + const EIGTYPE *a; \ + BlasIndex m, n, lda, ldb; \ +\ +/* Set m, n */ \ + m = convert_index(diagSize); \ + n = convert_index(cols); \ +\ +/* Set trans */ \ + transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \ +\ +/* Set b, ldb */ \ + Map > rhs(_rhs,depth,cols,OuterStride<>(rhsStride)); \ + MatrixX##EIGPREFIX b_tmp; \ +\ + if (ConjugateRhs) b_tmp = rhs.conjugate(); else b_tmp = rhs; \ + b = b_tmp.data(); \ + ldb = convert_index(b_tmp.outerStride()); \ +\ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (LhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + Map > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \ + MatrixLhs a_tmp; \ +\ + if ((conjA!=0) || (SetDiag==0)) { \ + if (conjA) a_tmp = lhs.conjugate(); else a_tmp = lhs; \ + if (IsZeroDiag) \ + a_tmp.diagonal().setZero(); \ + else if (IsUnitDiag) \ + a_tmp.diagonal().setOnes();\ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else { \ + a = _lhs; \ + lda = convert_index(lhsStride); \ + } \ + /*std::cout << "TRMM_L: A is square! Go to BLAS TRMM implementation! \n";*/ \ +/* call ?trmm*/ \ + BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \ +\ +/* Add op(a_triangular)*b into res*/ \ + Map > res_tmp(res,rows,cols,OuterStride<>(resStride)); \ + res_tmp=res_tmp+b_tmp; \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_TRMM_L(double, double, d, dtrmm) +EIGEN_BLAS_TRMM_L(dcomplex, MKL_Complex16, cd, ztrmm) +EIGEN_BLAS_TRMM_L(float, float, f, strmm) +EIGEN_BLAS_TRMM_L(scomplex, MKL_Complex8, cf, ctrmm) +#else +EIGEN_BLAS_TRMM_L(double, double, d, dtrmm_) +EIGEN_BLAS_TRMM_L(dcomplex, double, cd, ztrmm_) +EIGEN_BLAS_TRMM_L(float, float, f, strmm_) +EIGEN_BLAS_TRMM_L(scomplex, float, cf, ctrmm_) +#endif + +// implements col-major += alpha * op(general) * op(triangular) +#define EIGEN_BLAS_TRMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \ +template \ +struct product_triangular_matrix_matrix_trmm \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + LowUp = IsLower ? Lower : Upper, \ + conjA = ((RhsStorageOrder==ColMajor) && ConjugateRhs) ? 1 : 0 \ + }; \ +\ + static void run( \ + Index _rows, Index _cols, Index _depth, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha, level3_blocking& blocking) \ + { \ + Index diagSize = (std::min)(_cols,_depth); \ + Index rows = _rows; \ + Index depth = IsLower ? _depth : diagSize; \ + Index cols = IsLower ? diagSize : _cols; \ +\ + typedef Matrix MatrixLhs; \ + typedef Matrix MatrixRhs; \ +\ +/* Non-square case - doesn't fit to BLAS ?TRMM. Fall to default triangular product or call BLAS ?GEMM*/ \ + if (cols != depth) { \ +\ + int nthr = 1 /*mkl_domain_get_max_threads(EIGEN_BLAS_DOMAIN_BLAS)*/; \ +\ + if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \ + /* Most likely no benefit to call TRMM or GEMM from BLAS*/ \ + product_triangular_matrix_matrix::run( \ + _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \ + /*std::cout << "TRMM_R: A is not square! Go to Eigen TRMM implementation!\n";*/ \ + } else { \ + /* Make sense to call GEMM */ \ + Map > rhsMap(_rhs,depth,cols, OuterStride<>(rhsStride)); \ + MatrixRhs aa_tmp=rhsMap.template triangularView(); \ + BlasIndex aStride = convert_index(aa_tmp.outerStride()); \ + gemm_blocking_space gemm_blocking(_rows,_cols,_depth, 1, true); \ + general_matrix_matrix_product::run( \ + rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, gemm_blocking, 0); \ +\ + /*std::cout << "TRMM_R: A is not square! Go to BLAS GEMM implementation! " << nthr<<" \n";*/ \ + } \ + return; \ + } \ + char side = 'R', transa, uplo, diag = 'N'; \ + EIGTYPE *b; \ + const EIGTYPE *a; \ + BlasIndex m, n, lda, ldb; \ +\ +/* Set m, n */ \ + m = convert_index(rows); \ + n = convert_index(diagSize); \ +\ +/* Set trans */ \ + transa = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \ +\ +/* Set b, ldb */ \ + Map > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \ + MatrixX##EIGPREFIX b_tmp; \ +\ + if (ConjugateLhs) b_tmp = lhs.conjugate(); else b_tmp = lhs; \ + b = b_tmp.data(); \ + ldb = convert_index(b_tmp.outerStride()); \ +\ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (RhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + Map > rhs(_rhs,depth,cols, OuterStride<>(rhsStride)); \ + MatrixRhs a_tmp; \ +\ + if ((conjA!=0) || (SetDiag==0)) { \ + if (conjA) a_tmp = rhs.conjugate(); else a_tmp = rhs; \ + if (IsZeroDiag) \ + a_tmp.diagonal().setZero(); \ + else if (IsUnitDiag) \ + a_tmp.diagonal().setOnes();\ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else { \ + a = _rhs; \ + lda = convert_index(rhsStride); \ + } \ + /*std::cout << "TRMM_R: A is square! Go to BLAS TRMM implementation! \n";*/ \ +/* call ?trmm*/ \ + BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \ +\ +/* Add op(a_triangular)*b into res*/ \ + Map > res_tmp(res,rows,cols,OuterStride<>(resStride)); \ + res_tmp=res_tmp+b_tmp; \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_TRMM_R(double, double, d, dtrmm) +EIGEN_BLAS_TRMM_R(dcomplex, MKL_Complex16, cd, ztrmm) +EIGEN_BLAS_TRMM_R(float, float, f, strmm) +EIGEN_BLAS_TRMM_R(scomplex, MKL_Complex8, cf, ctrmm) +#else +EIGEN_BLAS_TRMM_R(double, double, d, dtrmm_) +EIGEN_BLAS_TRMM_R(dcomplex, double, cd, ztrmm_) +EIGEN_BLAS_TRMM_R(float, float, f, strmm_) +EIGEN_BLAS_TRMM_R(scomplex, float, cf, ctrmm_) +#endif +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixVector.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixVector.h new file mode 100644 index 000000000..76bfa159c --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixVector.h @@ -0,0 +1,350 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRIANGULARMATRIXVECTOR_H +#define EIGEN_TRIANGULARMATRIXVECTOR_H + +namespace Eigen { + +namespace internal { + +template +struct triangular_matrix_vector_product; + +template +struct triangular_matrix_vector_product +{ + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; + enum { + IsLower = ((Mode&Lower)==Lower), + HasUnitDiag = (Mode & UnitDiag)==UnitDiag, + HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag + }; + static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride, + const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha); +}; + +template +EIGEN_DONT_INLINE void triangular_matrix_vector_product + ::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride, + const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha) + { + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + Index size = (std::min)(_rows,_cols); + Index rows = IsLower ? _rows : (std::min)(_rows,_cols); + Index cols = IsLower ? (std::min)(_rows,_cols) : _cols; + + typedef Map, 0, OuterStride<> > LhsMap; + const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride)); + typename conj_expr_if::type cjLhs(lhs); + + typedef Map, 0, InnerStride<> > RhsMap; + const RhsMap rhs(_rhs,cols,InnerStride<>(rhsIncr)); + typename conj_expr_if::type cjRhs(rhs); + + typedef Map > ResMap; + ResMap res(_res,rows); + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + + for (Index pi=0; pi0) + res.segment(s,r) += (alpha * cjRhs.coeff(i)) * cjLhs.col(i).segment(s,r); + if (HasUnitDiag) + res.coeffRef(i) += alpha * cjRhs.coeff(i); + } + Index r = IsLower ? rows - pi - actualPanelWidth : pi; + if (r>0) + { + Index s = IsLower ? pi+actualPanelWidth : 0; + general_matrix_vector_product::run( + r, actualPanelWidth, + LhsMapper(&lhs.coeffRef(s,pi), lhsStride), + RhsMapper(&rhs.coeffRef(pi), rhsIncr), + &res.coeffRef(s), resIncr, alpha); + } + } + if((!IsLower) && cols>size) + { + general_matrix_vector_product::run( + rows, cols-size, + LhsMapper(&lhs.coeffRef(0,size), lhsStride), + RhsMapper(&rhs.coeffRef(size), rhsIncr), + _res, resIncr, alpha); + } + } + +template +struct triangular_matrix_vector_product +{ + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; + enum { + IsLower = ((Mode&Lower)==Lower), + HasUnitDiag = (Mode & UnitDiag)==UnitDiag, + HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag + }; + static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride, + const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha); +}; + +template +EIGEN_DONT_INLINE void triangular_matrix_vector_product + ::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride, + const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha) + { + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + Index diagSize = (std::min)(_rows,_cols); + Index rows = IsLower ? _rows : diagSize; + Index cols = IsLower ? diagSize : _cols; + + typedef Map, 0, OuterStride<> > LhsMap; + const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride)); + typename conj_expr_if::type cjLhs(lhs); + + typedef Map > RhsMap; + const RhsMap rhs(_rhs,cols); + typename conj_expr_if::type cjRhs(rhs); + + typedef Map, 0, InnerStride<> > ResMap; + ResMap res(_res,rows,InnerStride<>(resIncr)); + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + + for (Index pi=0; pi0) + res.coeffRef(i) += alpha * (cjLhs.row(i).segment(s,r).cwiseProduct(cjRhs.segment(s,r).transpose())).sum(); + if (HasUnitDiag) + res.coeffRef(i) += alpha * cjRhs.coeff(i); + } + Index r = IsLower ? pi : cols - pi - actualPanelWidth; + if (r>0) + { + Index s = IsLower ? 0 : pi + actualPanelWidth; + general_matrix_vector_product::run( + actualPanelWidth, r, + LhsMapper(&lhs.coeffRef(pi,s), lhsStride), + RhsMapper(&rhs.coeffRef(s), rhsIncr), + &res.coeffRef(pi), resIncr, alpha); + } + } + if(IsLower && rows>diagSize) + { + general_matrix_vector_product::run( + rows-diagSize, cols, + LhsMapper(&lhs.coeffRef(diagSize,0), lhsStride), + RhsMapper(&rhs.coeffRef(0), rhsIncr), + &res.coeffRef(diagSize), resIncr, alpha); + } + } + +/*************************************************************************** +* Wrapper to product_triangular_vector +***************************************************************************/ + +template +struct trmv_selector; + +} // end namespace internal + +namespace internal { + +template +struct triangular_product_impl +{ + template static void run(Dest& dst, const Lhs &lhs, const Rhs &rhs, const typename Dest::Scalar& alpha) + { + eigen_assert(dst.rows()==lhs.rows() && dst.cols()==rhs.cols()); + + internal::trmv_selector::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(lhs, rhs, dst, alpha); + } +}; + +template +struct triangular_product_impl +{ + template static void run(Dest& dst, const Lhs &lhs, const Rhs &rhs, const typename Dest::Scalar& alpha) + { + eigen_assert(dst.rows()==lhs.rows() && dst.cols()==rhs.cols()); + + Transpose dstT(dst); + internal::trmv_selector<(Mode & (UnitDiag|ZeroDiag)) | ((Mode & Lower) ? Upper : Lower), + (int(internal::traits::Flags)&RowMajorBit) ? ColMajor : RowMajor> + ::run(rhs.transpose(),lhs.transpose(), dstT, alpha); + } +}; + +} // end namespace internal + +namespace internal { + +// TODO: find a way to factorize this piece of code with gemv_selector since the logic is exactly the same. + +template struct trmv_selector +{ + template + static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) + { + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + typedef typename Dest::Scalar ResScalar; + typedef typename Dest::RealScalar RealScalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + + typedef Map, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits::size)> MappedDest; + + typename internal::add_const_on_value_type::type actualLhs = LhsBlasTraits::extract(lhs); + typename internal::add_const_on_value_type::type actualRhs = RhsBlasTraits::extract(rhs); + + LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(lhs); + RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(rhs); + ResScalar actualAlpha = alpha * lhs_alpha * rhs_alpha; + + enum { + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1, + ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), + MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal + }; + + gemv_static_vector_if static_dest; + + bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0)); + bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; + + RhsScalar compatibleAlpha = get_factor::run(actualAlpha); + + ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), + evalToDest ? dest.data() : static_dest.data()); + + if(!evalToDest) + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + Index size = dest.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + if(!alphaIsCompatible) + { + MappedDest(actualDestPtr, dest.size()).setZero(); + compatibleAlpha = RhsScalar(1); + } + else + MappedDest(actualDestPtr, dest.size()) = dest; + } + + internal::triangular_matrix_vector_product + + ::run(actualLhs.rows(),actualLhs.cols(), + actualLhs.data(),actualLhs.outerStride(), + actualRhs.data(),actualRhs.innerStride(), + actualDestPtr,1,compatibleAlpha); + + if (!evalToDest) + { + if(!alphaIsCompatible) + dest += actualAlpha * MappedDest(actualDestPtr, dest.size()); + else + dest = MappedDest(actualDestPtr, dest.size()); + } + + if ( ((Mode&UnitDiag)==UnitDiag) && (lhs_alpha!=LhsScalar(1)) ) + { + Index diagSize = (std::min)(lhs.rows(),lhs.cols()); + dest.head(diagSize) -= (lhs_alpha-LhsScalar(1))*rhs.head(diagSize); + } + } +}; + +template struct trmv_selector +{ + template + static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) + { + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + typedef typename Dest::Scalar ResScalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + typedef typename internal::remove_all::type ActualRhsTypeCleaned; + + typename add_const::type actualLhs = LhsBlasTraits::extract(lhs); + typename add_const::type actualRhs = RhsBlasTraits::extract(rhs); + + LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(lhs); + RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(rhs); + ResScalar actualAlpha = alpha * lhs_alpha * rhs_alpha; + + enum { + DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1 + }; + + gemv_static_vector_if static_rhs; + + ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(), + DirectlyUseRhs ? const_cast(actualRhs.data()) : static_rhs.data()); + + if(!DirectlyUseRhs) + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + Index size = actualRhs.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + Map(actualRhsPtr, actualRhs.size()) = actualRhs; + } + + internal::triangular_matrix_vector_product + + ::run(actualLhs.rows(),actualLhs.cols(), + actualLhs.data(),actualLhs.outerStride(), + actualRhsPtr,1, + dest.data(),dest.innerStride(), + actualAlpha); + + if ( ((Mode&UnitDiag)==UnitDiag) && (lhs_alpha!=LhsScalar(1)) ) + { + Index diagSize = (std::min)(lhs.rows(),lhs.cols()); + dest.head(diagSize) -= (lhs_alpha-LhsScalar(1))*rhs.head(diagSize); + } + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULARMATRIXVECTOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h new file mode 100644 index 000000000..3d47a2b94 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h @@ -0,0 +1,255 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * Triangular matrix-vector product functionality based on ?TRMV. + ******************************************************************************** +*/ + +#ifndef EIGEN_TRIANGULAR_MATRIX_VECTOR_BLAS_H +#define EIGEN_TRIANGULAR_MATRIX_VECTOR_BLAS_H + +namespace Eigen { + +namespace internal { + +/********************************************************************** +* This file implements triangular matrix-vector multiplication using BLAS +**********************************************************************/ + +// trmv/hemv specialization + +template +struct triangular_matrix_vector_product_trmv : + triangular_matrix_vector_product {}; + +#define EIGEN_BLAS_TRMV_SPECIALIZE(Scalar) \ +template \ +struct triangular_matrix_vector_product { \ + static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \ + const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \ + triangular_matrix_vector_product_trmv::run( \ + _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \ + } \ +}; \ +template \ +struct triangular_matrix_vector_product { \ + static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \ + const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \ + triangular_matrix_vector_product_trmv::run( \ + _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \ + } \ +}; + +EIGEN_BLAS_TRMV_SPECIALIZE(double) +EIGEN_BLAS_TRMV_SPECIALIZE(float) +EIGEN_BLAS_TRMV_SPECIALIZE(dcomplex) +EIGEN_BLAS_TRMV_SPECIALIZE(scomplex) + +// implements col-major: res += alpha * op(triangular) * vector +#define EIGEN_BLAS_TRMV_CM(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX, BLASPOSTFIX) \ +template \ +struct triangular_matrix_vector_product_trmv { \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + LowUp = IsLower ? Lower : Upper \ + }; \ + static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \ + { \ + if (ConjLhs || IsZeroDiag) { \ + triangular_matrix_vector_product::run( \ + _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \ + return; \ + }\ + Index size = (std::min)(_rows,_cols); \ + Index rows = IsLower ? _rows : size; \ + Index cols = IsLower ? size : _cols; \ +\ + typedef VectorX##EIGPREFIX VectorRhs; \ + EIGTYPE *x, *y;\ +\ +/* Set x*/ \ + Map > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \ + VectorRhs x_tmp; \ + if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \ + x = x_tmp.data(); \ +\ +/* Square part handling */\ +\ + char trans, uplo, diag; \ + BlasIndex m, n, lda, incx, incy; \ + EIGTYPE const *a; \ + EIGTYPE beta(1); \ +\ +/* Set m, n */ \ + n = convert_index(size); \ + lda = convert_index(lhsStride); \ + incx = 1; \ + incy = convert_index(resIncr); \ +\ +/* Set uplo, trans and diag*/ \ + trans = 'N'; \ + uplo = IsLower ? 'L' : 'U'; \ + diag = IsUnitDiag ? 'U' : 'N'; \ +\ +/* call ?TRMV*/ \ + BLASPREFIX##trmv##BLASPOSTFIX(&uplo, &trans, &diag, &n, (const BLASTYPE*)_lhs, &lda, (BLASTYPE*)x, &incx); \ +\ +/* Add op(a_tr)rhs into res*/ \ + BLASPREFIX##axpy##BLASPOSTFIX(&n, (const BLASTYPE*)&numext::real_ref(alpha),(const BLASTYPE*)x, &incx, (BLASTYPE*)_res, &incy); \ +/* Non-square case - doesn't fit to BLAS ?TRMV. Fall to default triangular product*/ \ + if (size<(std::max)(rows,cols)) { \ + if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \ + x = x_tmp.data(); \ + if (size(rows-size); \ + n = convert_index(size); \ + } \ + else { \ + x += size; \ + y = _res; \ + a = _lhs + size*lda; \ + m = convert_index(size); \ + n = convert_index(cols-size); \ + } \ + BLASPREFIX##gemv##BLASPOSTFIX(&trans, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)x, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)y, &incy); \ + } \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_TRMV_CM(double, double, d, d,) +EIGEN_BLAS_TRMV_CM(dcomplex, MKL_Complex16, cd, z,) +EIGEN_BLAS_TRMV_CM(float, float, f, s,) +EIGEN_BLAS_TRMV_CM(scomplex, MKL_Complex8, cf, c,) +#else +EIGEN_BLAS_TRMV_CM(double, double, d, d, _) +EIGEN_BLAS_TRMV_CM(dcomplex, double, cd, z, _) +EIGEN_BLAS_TRMV_CM(float, float, f, s, _) +EIGEN_BLAS_TRMV_CM(scomplex, float, cf, c, _) +#endif + +// implements row-major: res += alpha * op(triangular) * vector +#define EIGEN_BLAS_TRMV_RM(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX, BLASPOSTFIX) \ +template \ +struct triangular_matrix_vector_product_trmv { \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + LowUp = IsLower ? Lower : Upper \ + }; \ + static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \ + { \ + if (IsZeroDiag) { \ + triangular_matrix_vector_product::run( \ + _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \ + return; \ + }\ + Index size = (std::min)(_rows,_cols); \ + Index rows = IsLower ? _rows : size; \ + Index cols = IsLower ? size : _cols; \ +\ + typedef VectorX##EIGPREFIX VectorRhs; \ + EIGTYPE *x, *y;\ +\ +/* Set x*/ \ + Map > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \ + VectorRhs x_tmp; \ + if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \ + x = x_tmp.data(); \ +\ +/* Square part handling */\ +\ + char trans, uplo, diag; \ + BlasIndex m, n, lda, incx, incy; \ + EIGTYPE const *a; \ + EIGTYPE beta(1); \ +\ +/* Set m, n */ \ + n = convert_index(size); \ + lda = convert_index(lhsStride); \ + incx = 1; \ + incy = convert_index(resIncr); \ +\ +/* Set uplo, trans and diag*/ \ + trans = ConjLhs ? 'C' : 'T'; \ + uplo = IsLower ? 'U' : 'L'; \ + diag = IsUnitDiag ? 'U' : 'N'; \ +\ +/* call ?TRMV*/ \ + BLASPREFIX##trmv##BLASPOSTFIX(&uplo, &trans, &diag, &n, (const BLASTYPE*)_lhs, &lda, (BLASTYPE*)x, &incx); \ +\ +/* Add op(a_tr)rhs into res*/ \ + BLASPREFIX##axpy##BLASPOSTFIX(&n, (const BLASTYPE*)&numext::real_ref(alpha),(const BLASTYPE*)x, &incx, (BLASTYPE*)_res, &incy); \ +/* Non-square case - doesn't fit to BLAS ?TRMV. Fall to default triangular product*/ \ + if (size<(std::max)(rows,cols)) { \ + if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \ + x = x_tmp.data(); \ + if (size(rows-size); \ + n = convert_index(size); \ + } \ + else { \ + x += size; \ + y = _res; \ + a = _lhs + size; \ + m = convert_index(size); \ + n = convert_index(cols-size); \ + } \ + BLASPREFIX##gemv##BLASPOSTFIX(&trans, &n, &m, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)x, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)y, &incy); \ + } \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_TRMV_RM(double, double, d, d,) +EIGEN_BLAS_TRMV_RM(dcomplex, MKL_Complex16, cd, z,) +EIGEN_BLAS_TRMV_RM(float, float, f, s,) +EIGEN_BLAS_TRMV_RM(scomplex, MKL_Complex8, cf, c,) +#else +EIGEN_BLAS_TRMV_RM(double, double, d, d,_) +EIGEN_BLAS_TRMV_RM(dcomplex, double, cd, z,_) +EIGEN_BLAS_TRMV_RM(float, float, f, s,_) +EIGEN_BLAS_TRMV_RM(scomplex, float, cf, c,_) +#endif + +} // end namespase internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULAR_MATRIX_VECTOR_BLAS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h new file mode 100644 index 000000000..223c38b86 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h @@ -0,0 +1,335 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_H +#define EIGEN_TRIANGULAR_SOLVER_MATRIX_H + +namespace Eigen { + +namespace internal { + +// if the rhs is row major, let's transpose the product +template +struct triangular_solve_matrix +{ + static void run( + Index size, Index cols, + const Scalar* tri, Index triStride, + Scalar* _other, Index otherStride, + level3_blocking& blocking) + { + triangular_solve_matrix< + Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft, + (Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper), + NumTraits::IsComplex && Conjugate, + TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor> + ::run(size, cols, tri, triStride, _other, otherStride, blocking); + } +}; + +/* Optimized triangular solver with multiple right hand side and the triangular matrix on the left + */ +template +struct triangular_solve_matrix +{ + static EIGEN_DONT_INLINE void run( + Index size, Index otherSize, + const Scalar* _tri, Index triStride, + Scalar* _other, Index otherStride, + level3_blocking& blocking); +}; +template +EIGEN_DONT_INLINE void triangular_solve_matrix::run( + Index size, Index otherSize, + const Scalar* _tri, Index triStride, + Scalar* _other, Index otherStride, + level3_blocking& blocking) + { + Index cols = otherSize; + + typedef const_blas_data_mapper TriMapper; + typedef blas_data_mapper OtherMapper; + TriMapper tri(_tri, triStride); + OtherMapper other(_other, otherStride); + + typedef gebp_traits Traits; + + enum { + SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr), + IsLower = (Mode&Lower) == Lower + }; + + Index kc = blocking.kc(); // cache block size along the K direction + Index mc = (std::min)(size,blocking.mc()); // cache block size along the M direction + + std::size_t sizeA = kc*mc; + std::size_t sizeB = kc*cols; + + ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA()); + ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB()); + + conj_if conj; + gebp_kernel gebp_kernel; + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + + // the goal here is to subdivise the Rhs panels such that we keep some cache + // coherence when accessing the rhs elements + std::ptrdiff_t l1, l2, l3; + manage_caching_sizes(GetAction, &l1, &l2, &l3); + Index subcols = cols>0 ? l2/(4 * sizeof(Scalar) * std::max(otherStride,size)) : 0; + subcols = std::max((subcols/Traits::nr)*Traits::nr, Traits::nr); + + for(Index k2=IsLower ? 0 : size; + IsLower ? k20; + IsLower ? k2+=kc : k2-=kc) + { + const Index actual_kc = (std::min)(IsLower ? size-k2 : k2, kc); + + // We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel, + // and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into + // A11 (the triangular part) and A21 the remaining rectangular part. + // Then the high level algorithm is: + // - B = R1 => general block copy (done during the next step) + // - R1 = A11^-1 B => tricky part + // - update B from the new R1 => actually this has to be performed continuously during the above step + // - R2 -= A21 * B => GEPP + + // The tricky part: compute R1 = A11^-1 B while updating B from R1 + // The idea is to split A11 into multiple small vertical panels. + // Each panel can be split into a small triangular part T1k which is processed without optimization, + // and the remaining small part T2k which is processed using gebp with appropriate block strides + for(Index j2=0; j2(actual_kc-k1, SmallPanelWidth); + // tr solve + for (Index k=0; k0) + { + Index startTarget = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc; + + pack_lhs(blockA, tri.getSubMapper(startTarget,startBlock), actualPanelWidth, lengthTarget); + + gebp_kernel(other.getSubMapper(startTarget,j2), blockA, blockB+actual_kc*j2, lengthTarget, actualPanelWidth, actual_cols, Scalar(-1), + actualPanelWidth, actual_kc, 0, blockBOffset); + } + } + } + + // R2 -= A21 * B => GEPP + { + Index start = IsLower ? k2+kc : 0; + Index end = IsLower ? size : k2-kc; + for(Index i2=start; i20) + { + pack_lhs(blockA, tri.getSubMapper(i2, IsLower ? k2 : k2-kc), actual_kc, actual_mc); + + gebp_kernel(other.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, Scalar(-1), -1, -1, 0, 0); + } + } + } + } + } + +/* Optimized triangular solver with multiple left hand sides and the triangular matrix on the right + */ +template +struct triangular_solve_matrix +{ + static EIGEN_DONT_INLINE void run( + Index size, Index otherSize, + const Scalar* _tri, Index triStride, + Scalar* _other, Index otherStride, + level3_blocking& blocking); +}; +template +EIGEN_DONT_INLINE void triangular_solve_matrix::run( + Index size, Index otherSize, + const Scalar* _tri, Index triStride, + Scalar* _other, Index otherStride, + level3_blocking& blocking) + { + Index rows = otherSize; + typedef typename NumTraits::Real RealScalar; + + typedef blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + LhsMapper lhs(_other, otherStride); + RhsMapper rhs(_tri, triStride); + + typedef gebp_traits Traits; + enum { + RhsStorageOrder = TriStorageOrder, + SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr), + IsLower = (Mode&Lower) == Lower + }; + + Index kc = blocking.kc(); // cache block size along the K direction + Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction + + std::size_t sizeA = kc*mc; + std::size_t sizeB = kc*size; + + ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA()); + ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB()); + + conj_if conj; + gebp_kernel gebp_kernel; + gemm_pack_rhs pack_rhs; + gemm_pack_rhs pack_rhs_panel; + gemm_pack_lhs pack_lhs_panel; + + for(Index k2=IsLower ? size : 0; + IsLower ? k2>0 : k20) pack_rhs(geb, rhs.getSubMapper(actual_k2,startPanel), actual_kc, rs); + + // triangular packing (we only pack the panels off the diagonal, + // neglecting the blocks overlapping the diagonal + { + for (Index j2=0; j2(actual_kc-j2, SmallPanelWidth); + Index actual_j2 = actual_k2 + j2; + Index panelOffset = IsLower ? j2+actualPanelWidth : 0; + Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2; + + if (panelLength>0) + pack_rhs_panel(blockB+j2*actual_kc, + rhs.getSubMapper(actual_k2+panelOffset, actual_j2), + panelLength, actualPanelWidth, + actual_kc, panelOffset); + } + } + + for(Index i2=0; i2 vertical panels of rhs) + for (Index j2 = IsLower + ? (actual_kc - ((actual_kc%SmallPanelWidth) ? Index(actual_kc%SmallPanelWidth) + : Index(SmallPanelWidth))) + : 0; + IsLower ? j2>=0 : j2(actual_kc-j2, SmallPanelWidth); + Index absolute_j2 = actual_k2 + j2; + Index panelOffset = IsLower ? j2+actualPanelWidth : 0; + Index panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2; + + // GEBP + if(panelLength>0) + { + gebp_kernel(lhs.getSubMapper(i2,absolute_j2), + blockA, blockB+j2*actual_kc, + actual_mc, panelLength, actualPanelWidth, + Scalar(-1), + actual_kc, actual_kc, // strides + panelOffset, panelOffset); // offsets + } + + // unblocked triangular solve + for (Index k=0; k0) + gebp_kernel(lhs.getSubMapper(i2, startPanel), blockA, geb, + actual_mc, actual_kc, rs, Scalar(-1), + -1, -1, 0, 0); + } + } + } + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h new file mode 100644 index 000000000..f0775116a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h @@ -0,0 +1,163 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * Triangular matrix * matrix product functionality based on ?TRMM. + ******************************************************************************** +*/ + +#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H +#define EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H + +namespace Eigen { + +namespace internal { + +// implements LeftSide op(triangular)^-1 * general +#define EIGEN_BLAS_TRSM_L(EIGTYPE, BLASTYPE, BLASFUNC) \ +template \ +struct triangular_solve_matrix \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \ + }; \ + static void run( \ + Index size, Index otherSize, \ + const EIGTYPE* _tri, Index triStride, \ + EIGTYPE* _other, Index otherStride, level3_blocking& /*blocking*/) \ + { \ + BlasIndex m = convert_index(size), n = convert_index(otherSize), lda, ldb; \ + char side = 'L', uplo, diag='N', transa; \ + /* Set alpha_ */ \ + EIGTYPE alpha(1); \ + ldb = convert_index(otherStride);\ +\ + const EIGTYPE *a; \ +/* Set trans */ \ + transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + typedef Matrix MatrixTri; \ + Map > tri(_tri,size,size,OuterStride<>(triStride)); \ + MatrixTri a_tmp; \ +\ + if (conjA) { \ + a_tmp = tri.conjugate(); \ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else { \ + a = _tri; \ + lda = convert_index(triStride); \ + } \ + if (IsUnitDiag) diag='U'; \ +/* call ?trsm*/ \ + BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_TRSM_L(double, double, dtrsm) +EIGEN_BLAS_TRSM_L(dcomplex, MKL_Complex16, ztrsm) +EIGEN_BLAS_TRSM_L(float, float, strsm) +EIGEN_BLAS_TRSM_L(scomplex, MKL_Complex8, ctrsm) +#else +EIGEN_BLAS_TRSM_L(double, double, dtrsm_) +EIGEN_BLAS_TRSM_L(dcomplex, double, ztrsm_) +EIGEN_BLAS_TRSM_L(float, float, strsm_) +EIGEN_BLAS_TRSM_L(scomplex, float, ctrsm_) +#endif + +// implements RightSide general * op(triangular)^-1 +#define EIGEN_BLAS_TRSM_R(EIGTYPE, BLASTYPE, BLASFUNC) \ +template \ +struct triangular_solve_matrix \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \ + }; \ + static void run( \ + Index size, Index otherSize, \ + const EIGTYPE* _tri, Index triStride, \ + EIGTYPE* _other, Index otherStride, level3_blocking& /*blocking*/) \ + { \ + BlasIndex m = convert_index(otherSize), n = convert_index(size), lda, ldb; \ + char side = 'R', uplo, diag='N', transa; \ + /* Set alpha_ */ \ + EIGTYPE alpha(1); \ + ldb = convert_index(otherStride);\ +\ + const EIGTYPE *a; \ +/* Set trans */ \ + transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + typedef Matrix MatrixTri; \ + Map > tri(_tri,size,size,OuterStride<>(triStride)); \ + MatrixTri a_tmp; \ +\ + if (conjA) { \ + a_tmp = tri.conjugate(); \ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else { \ + a = _tri; \ + lda = convert_index(triStride); \ + } \ + if (IsUnitDiag) diag='U'; \ +/* call ?trsm*/ \ + BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \ + /*std::cout << "TRMS_L specialization!\n";*/ \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_TRSM_R(double, double, dtrsm) +EIGEN_BLAS_TRSM_R(dcomplex, MKL_Complex16, ztrsm) +EIGEN_BLAS_TRSM_R(float, float, strsm) +EIGEN_BLAS_TRSM_R(scomplex, MKL_Complex8, ctrsm) +#else +EIGEN_BLAS_TRSM_R(double, double, dtrsm_) +EIGEN_BLAS_TRSM_R(dcomplex, double, ztrsm_) +EIGEN_BLAS_TRSM_R(float, float, strsm_) +EIGEN_BLAS_TRSM_R(scomplex, float, ctrsm_) +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularSolverVector.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularSolverVector.h new file mode 100644 index 000000000..b994759b2 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/products/TriangularSolverVector.h @@ -0,0 +1,145 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRIANGULAR_SOLVER_VECTOR_H +#define EIGEN_TRIANGULAR_SOLVER_VECTOR_H + +namespace Eigen { + +namespace internal { + +template +struct triangular_solve_vector +{ + static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs) + { + triangular_solve_vector::run(size, _lhs, lhsStride, rhs); + } +}; + +// forward and backward substitution, row-major, rhs is a vector +template +struct triangular_solve_vector +{ + enum { + IsLower = ((Mode&Lower)==Lower) + }; + static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs) + { + typedef Map, 0, OuterStride<> > LhsMap; + const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride)); + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + + typename internal::conditional< + Conjugate, + const CwiseUnaryOp,LhsMap>, + const LhsMap&> + ::type cjLhs(lhs); + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + for(Index pi=IsLower ? 0 : size; + IsLower ? pi0; + IsLower ? pi+=PanelWidth : pi-=PanelWidth) + { + Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth); + + Index r = IsLower ? pi : size - pi; // remaining size + if (r > 0) + { + // let's directly call the low level product function because: + // 1 - it is faster to compile + // 2 - it is slighlty faster at runtime + Index startRow = IsLower ? pi : pi-actualPanelWidth; + Index startCol = IsLower ? 0 : pi; + + general_matrix_vector_product::run( + actualPanelWidth, r, + LhsMapper(&lhs.coeffRef(startRow,startCol), lhsStride), + RhsMapper(rhs + startCol, 1), + rhs + startRow, 1, + RhsScalar(-1)); + } + + for(Index k=0; k0) + rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map >(rhs+s,k))).sum(); + + if(!(Mode & UnitDiag)) + rhs[i] /= cjLhs(i,i); + } + } + } +}; + +// forward and backward substitution, column-major, rhs is a vector +template +struct triangular_solve_vector +{ + enum { + IsLower = ((Mode&Lower)==Lower) + }; + static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs) + { + typedef Map, 0, OuterStride<> > LhsMap; + const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride)); + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + typename internal::conditional,LhsMap>, + const LhsMap& + >::type cjLhs(lhs); + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + + for(Index pi=IsLower ? 0 : size; + IsLower ? pi0; + IsLower ? pi+=PanelWidth : pi-=PanelWidth) + { + Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth); + Index startBlock = IsLower ? pi : pi-actualPanelWidth; + Index endBlock = IsLower ? pi + actualPanelWidth : 0; + + for(Index k=0; k0) + Map >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r); + } + Index r = IsLower ? size - endBlock : startBlock; // remaining size + if (r > 0) + { + // let's directly call the low level product function because: + // 1 - it is faster to compile + // 2 - it is slighlty faster at runtime + general_matrix_vector_product::run( + r, actualPanelWidth, + LhsMapper(&lhs.coeffRef(endBlock,startBlock), lhsStride), + RhsMapper(rhs+startBlock, 1), + rhs+endBlock, 1, RhsScalar(-1)); + } + } + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULAR_SOLVER_VECTOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/BlasUtil.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/BlasUtil.h new file mode 100644 index 000000000..6e6ee119b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/BlasUtil.h @@ -0,0 +1,398 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BLASUTIL_H +#define EIGEN_BLASUTIL_H + +// This file contains many lightweight helper classes used to +// implement and control fast level 2 and level 3 BLAS-like routines. + +namespace Eigen { + +namespace internal { + +// forward declarations +template +struct gebp_kernel; + +template +struct gemm_pack_rhs; + +template +struct gemm_pack_lhs; + +template< + typename Index, + typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, + typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, + int ResStorageOrder> +struct general_matrix_matrix_product; + +template +struct general_matrix_vector_product; + + +template struct conj_if; + +template<> struct conj_if { + template + inline T operator()(const T& x) const { return numext::conj(x); } + template + inline T pconj(const T& x) const { return internal::pconj(x); } +}; + +template<> struct conj_if { + template + inline const T& operator()(const T& x) const { return x; } + template + inline const T& pconj(const T& x) const { return x; } +}; + +// Generic implementation for custom complex types. +template +struct conj_helper +{ + typedef typename ScalarBinaryOpTraits::ReturnType Scalar; + + EIGEN_STRONG_INLINE Scalar pmadd(const LhsScalar& x, const RhsScalar& y, const Scalar& c) const + { return padd(c, pmul(x,y)); } + + EIGEN_STRONG_INLINE Scalar pmul(const LhsScalar& x, const RhsScalar& y) const + { return conj_if()(x) * conj_if()(y); } +}; + +template struct conj_helper +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return internal::pmadd(x,y,c); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const { return internal::pmul(x,y); } +}; + +template struct conj_helper, std::complex, false,true> +{ + typedef std::complex Scalar; + EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const + { return c + pmul(x,y); } + + EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const + { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::imag(x)*numext::real(y) - numext::real(x)*numext::imag(y)); } +}; + +template struct conj_helper, std::complex, true,false> +{ + typedef std::complex Scalar; + EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const + { return c + pmul(x,y); } + + EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const + { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); } +}; + +template struct conj_helper, std::complex, true,true> +{ + typedef std::complex Scalar; + EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const + { return c + pmul(x,y); } + + EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const + { return Scalar(numext::real(x)*numext::real(y) - numext::imag(x)*numext::imag(y), - numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); } +}; + +template struct conj_helper, RealScalar, Conj,false> +{ + typedef std::complex Scalar; + EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const RealScalar& y, const Scalar& c) const + { return padd(c, pmul(x,y)); } + EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const RealScalar& y) const + { return conj_if()(x)*y; } +}; + +template struct conj_helper, false,Conj> +{ + typedef std::complex Scalar; + EIGEN_STRONG_INLINE Scalar pmadd(const RealScalar& x, const Scalar& y, const Scalar& c) const + { return padd(c, pmul(x,y)); } + EIGEN_STRONG_INLINE Scalar pmul(const RealScalar& x, const Scalar& y) const + { return x*conj_if()(y); } +}; + +template struct get_factor { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE To run(const From& x) { return To(x); } +}; + +template struct get_factor::Real> { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE typename NumTraits::Real run(const Scalar& x) { return numext::real(x); } +}; + + +template +class BlasVectorMapper { + public: + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasVectorMapper(Scalar *data) : m_data(data) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const { + return m_data[i]; + } + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet load(Index i) const { + return ploadt(m_data + i); + } + + template + EIGEN_DEVICE_FUNC bool aligned(Index i) const { + return (UIntPtr(m_data+i)%sizeof(Packet))==0; + } + + protected: + Scalar* m_data; +}; + +template +class BlasLinearMapper { + public: + typedef typename packet_traits::type Packet; + typedef typename packet_traits::half HalfPacket; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data) : m_data(data) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void prefetch(int i) const { + internal::prefetch(&operator()(i)); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar& operator()(Index i) const { + return m_data[i]; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const { + return ploadt(m_data + i); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const { + return ploadt(m_data + i); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const Packet &p) const { + pstoret(m_data + i, p); + } + + protected: + Scalar *m_data; +}; + +// Lightweight helper class to access matrix coefficients. +template +class blas_data_mapper { + public: + typedef typename packet_traits::type Packet; + typedef typename packet_traits::half HalfPacket; + + typedef BlasLinearMapper LinearMapper; + typedef BlasVectorMapper VectorMapper; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper + getSubMapper(Index i, Index j) const { + return blas_data_mapper(&operator()(i, j), m_stride); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const { + return LinearMapper(&operator()(i, j)); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const { + return VectorMapper(&operator()(i, j)); + } + + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Scalar& operator()(Index i, Index j) const { + return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, Index j) const { + return ploadt(&operator()(i, j)); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i, Index j) const { + return ploadt(&operator()(i, j)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void scatterPacket(Index i, Index j, const SubPacket &p) const { + pscatter(&operator()(i, j), p, m_stride); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SubPacket gatherPacket(Index i, Index j) const { + return pgather(&operator()(i, j), m_stride); + } + + EIGEN_DEVICE_FUNC const Index stride() const { return m_stride; } + EIGEN_DEVICE_FUNC const Scalar* data() const { return m_data; } + + EIGEN_DEVICE_FUNC Index firstAligned(Index size) const { + if (UIntPtr(m_data)%sizeof(Scalar)) { + return -1; + } + return internal::first_default_aligned(m_data, size); + } + + protected: + Scalar* EIGEN_RESTRICT m_data; + const Index m_stride; +}; + +// lightweight helper class to access matrix coefficients (const version) +template +class const_blas_data_mapper : public blas_data_mapper { + public: + EIGEN_ALWAYS_INLINE const_blas_data_mapper(const Scalar *data, Index stride) : blas_data_mapper(data, stride) {} + + EIGEN_ALWAYS_INLINE const_blas_data_mapper getSubMapper(Index i, Index j) const { + return const_blas_data_mapper(&(this->operator()(i, j)), this->m_stride); + } +}; + + +/* Helper class to analyze the factors of a Product expression. + * In particular it allows to pop out operator-, scalar multiples, + * and conjugate */ +template struct blas_traits +{ + typedef typename traits::Scalar Scalar; + typedef const XprType& ExtractType; + typedef XprType _ExtractType; + enum { + IsComplex = NumTraits::IsComplex, + IsTransposed = false, + NeedToConjugate = false, + HasUsableDirectAccess = ( (int(XprType::Flags)&DirectAccessBit) + && ( bool(XprType::IsVectorAtCompileTime) + || int(inner_stride_at_compile_time::ret) == 1) + ) ? 1 : 0 + }; + typedef typename conditional::type DirectLinearAccessType; + static inline ExtractType extract(const XprType& x) { return x; } + static inline const Scalar extractScalarFactor(const XprType&) { return Scalar(1); } +}; + +// pop conjugate +template +struct blas_traits, NestedXpr> > + : blas_traits +{ + typedef blas_traits Base; + typedef CwiseUnaryOp, NestedXpr> XprType; + typedef typename Base::ExtractType ExtractType; + + enum { + IsComplex = NumTraits::IsComplex, + NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex + }; + static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline Scalar extractScalarFactor(const XprType& x) { return conj(Base::extractScalarFactor(x.nestedExpression())); } +}; + +// pop scalar multiple +template +struct blas_traits, const CwiseNullaryOp,Plain>, NestedXpr> > + : blas_traits +{ + typedef blas_traits Base; + typedef CwiseBinaryOp, const CwiseNullaryOp,Plain>, NestedXpr> XprType; + typedef typename Base::ExtractType ExtractType; + static inline ExtractType extract(const XprType& x) { return Base::extract(x.rhs()); } + static inline Scalar extractScalarFactor(const XprType& x) + { return x.lhs().functor().m_other * Base::extractScalarFactor(x.rhs()); } +}; +template +struct blas_traits, NestedXpr, const CwiseNullaryOp,Plain> > > + : blas_traits +{ + typedef blas_traits Base; + typedef CwiseBinaryOp, NestedXpr, const CwiseNullaryOp,Plain> > XprType; + typedef typename Base::ExtractType ExtractType; + static inline ExtractType extract(const XprType& x) { return Base::extract(x.lhs()); } + static inline Scalar extractScalarFactor(const XprType& x) + { return Base::extractScalarFactor(x.lhs()) * x.rhs().functor().m_other; } +}; +template +struct blas_traits, const CwiseNullaryOp,Plain1>, + const CwiseNullaryOp,Plain2> > > + : blas_traits,Plain1> > +{}; + +// pop opposite +template +struct blas_traits, NestedXpr> > + : blas_traits +{ + typedef blas_traits Base; + typedef CwiseUnaryOp, NestedXpr> XprType; + typedef typename Base::ExtractType ExtractType; + static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline Scalar extractScalarFactor(const XprType& x) + { return - Base::extractScalarFactor(x.nestedExpression()); } +}; + +// pop/push transpose +template +struct blas_traits > + : blas_traits +{ + typedef typename NestedXpr::Scalar Scalar; + typedef blas_traits Base; + typedef Transpose XprType; + typedef Transpose ExtractType; // const to get rid of a compile error; anyway blas traits are only used on the RHS + typedef Transpose _ExtractType; + typedef typename conditional::type DirectLinearAccessType; + enum { + IsTransposed = Base::IsTransposed ? 0 : 1 + }; + static inline ExtractType extract(const XprType& x) { return ExtractType(Base::extract(x.nestedExpression())); } + static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x.nestedExpression()); } +}; + +template +struct blas_traits + : blas_traits +{}; + +template::HasUsableDirectAccess> +struct extract_data_selector { + static const typename T::Scalar* run(const T& m) + { + return blas_traits::extract(m).data(); + } +}; + +template +struct extract_data_selector { + static typename T::Scalar* run(const T&) { return 0; } +}; + +template const typename T::Scalar* extract_data(const T& m) +{ + return extract_data_selector::run(m); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BLASUTIL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Constants.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Constants.h new file mode 100644 index 000000000..7587d6842 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Constants.h @@ -0,0 +1,547 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// Copyright (C) 2007-2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CONSTANTS_H +#define EIGEN_CONSTANTS_H + +namespace Eigen { + +/** This value means that a positive quantity (e.g., a size) is not known at compile-time, and that instead the value is + * stored in some runtime variable. + * + * Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix. + */ +const int Dynamic = -1; + +/** This value means that a signed quantity (e.g., a signed index) is not known at compile-time, and that instead its value + * has to be specified at runtime. + */ +const int DynamicIndex = 0xffffff; + +/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm(). + * The value Infinity there means the L-infinity norm. + */ +const int Infinity = -1; + +/** This value means that the cost to evaluate an expression coefficient is either very expensive or + * cannot be known at compile time. + * + * This value has to be positive to (1) simplify cost computation, and (2) allow to distinguish between a very expensive and very very expensive expressions. + * It thus must also be large enough to make sure unrolling won't happen and that sub expressions will be evaluated, but not too large to avoid overflow. + */ +const int HugeCost = 10000; + +/** \defgroup flags Flags + * \ingroup Core_Module + * + * These are the possible bits which can be OR'ed to constitute the flags of a matrix or + * expression. + * + * It is important to note that these flags are a purely compile-time notion. They are a compile-time property of + * an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any + * runtime overhead. + * + * \sa MatrixBase::Flags + */ + +/** \ingroup flags + * + * for a matrix, this means that the storage order is row-major. + * If this bit is not set, the storage order is column-major. + * For an expression, this determines the storage order of + * the matrix created by evaluation of that expression. + * \sa \blank \ref TopicStorageOrders */ +const unsigned int RowMajorBit = 0x1; + +/** \ingroup flags + * means the expression should be evaluated by the calling expression */ +const unsigned int EvalBeforeNestingBit = 0x2; + +/** \ingroup flags + * \deprecated + * means the expression should be evaluated before any assignment */ +EIGEN_DEPRECATED +const unsigned int EvalBeforeAssigningBit = 0x4; // FIXME deprecated + +/** \ingroup flags + * + * Short version: means the expression might be vectorized + * + * Long version: means that the coefficients can be handled by packets + * and start at a memory location whose alignment meets the requirements + * of the present CPU architecture for optimized packet access. In the fixed-size + * case, there is the additional condition that it be possible to access all the + * coefficients by packets (this implies the requirement that the size be a multiple of 16 bytes, + * and that any nontrivial strides don't break the alignment). In the dynamic-size case, + * there is no such condition on the total size and strides, so it might not be possible to access + * all coeffs by packets. + * + * \note This bit can be set regardless of whether vectorization is actually enabled. + * To check for actual vectorizability, see \a ActualPacketAccessBit. + */ +const unsigned int PacketAccessBit = 0x8; + +#ifdef EIGEN_VECTORIZE +/** \ingroup flags + * + * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant + * is set to the value \a PacketAccessBit. + * + * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant + * is set to the value 0. + */ +const unsigned int ActualPacketAccessBit = PacketAccessBit; +#else +const unsigned int ActualPacketAccessBit = 0x0; +#endif + +/** \ingroup flags + * + * Short version: means the expression can be seen as 1D vector. + * + * Long version: means that one can access the coefficients + * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These + * index-based access methods are guaranteed + * to not have to do any runtime computation of a (row, col)-pair from the index, so that it + * is guaranteed that whenever it is available, index-based access is at least as fast as + * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit. + * + * If both PacketAccessBit and LinearAccessBit are set, then the + * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a + * lvalue expression. + * + * Typically, all vector expressions have the LinearAccessBit, but there is one exception: + * Product expressions don't have it, because it would be troublesome for vectorization, even when the + * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but + * not index-based packet access, so they don't have the LinearAccessBit. + */ +const unsigned int LinearAccessBit = 0x10; + +/** \ingroup flags + * + * Means the expression has a coeffRef() method, i.e. is writable as its individual coefficients are directly addressable. + * This rules out read-only expressions. + * + * Note that DirectAccessBit and LvalueBit are mutually orthogonal, as there are examples of expression having one but note + * the other: + * \li writable expressions that don't have a very simple memory layout as a strided array, have LvalueBit but not DirectAccessBit + * \li Map-to-const expressions, for example Map, have DirectAccessBit but not LvalueBit + * + * Expressions having LvalueBit also have their coeff() method returning a const reference instead of returning a new value. + */ +const unsigned int LvalueBit = 0x20; + +/** \ingroup flags + * + * Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout + * of the array of coefficients must be exactly the natural one suggested by rows(), cols(), + * outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients, + * though referencable, do not have such a regular memory layout. + * + * See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal. + */ +const unsigned int DirectAccessBit = 0x40; + +/** \deprecated \ingroup flags + * + * means the first coefficient packet is guaranteed to be aligned. + * An expression cannot has the AlignedBit without the PacketAccessBit flag. + * In other words, this means we are allow to perform an aligned packet access to the first element regardless + * of the expression kind: + * \code + * expression.packet(0); + * \endcode + */ +EIGEN_DEPRECATED const unsigned int AlignedBit = 0x80; + +const unsigned int NestByRefBit = 0x100; + +/** \ingroup flags + * + * for an expression, this means that the storage order + * can be either row-major or column-major. + * The precise choice will be decided at evaluation time or when + * combined with other expressions. + * \sa \blank \ref RowMajorBit, \ref TopicStorageOrders */ +const unsigned int NoPreferredStorageOrderBit = 0x200; + +/** \ingroup flags + * + * Means that the underlying coefficients can be accessed through pointers to the sparse (un)compressed storage format, + * that is, the expression provides: + * \code + inline const Scalar* valuePtr() const; + inline const Index* innerIndexPtr() const; + inline const Index* outerIndexPtr() const; + inline const Index* innerNonZeroPtr() const; + \endcode + */ +const unsigned int CompressedAccessBit = 0x400; + + +// list of flags that are inherited by default +const unsigned int HereditaryBits = RowMajorBit + | EvalBeforeNestingBit; + +/** \defgroup enums Enumerations + * \ingroup Core_Module + * + * Various enumerations used in %Eigen. Many of these are used as template parameters. + */ + +/** \ingroup enums + * Enum containing possible values for the \c Mode or \c UpLo parameter of + * MatrixBase::selfadjointView() and MatrixBase::triangularView(), and selfadjoint solvers. */ +enum UpLoType { + /** View matrix as a lower triangular matrix. */ + Lower=0x1, + /** View matrix as an upper triangular matrix. */ + Upper=0x2, + /** %Matrix has ones on the diagonal; to be used in combination with #Lower or #Upper. */ + UnitDiag=0x4, + /** %Matrix has zeros on the diagonal; to be used in combination with #Lower or #Upper. */ + ZeroDiag=0x8, + /** View matrix as a lower triangular matrix with ones on the diagonal. */ + UnitLower=UnitDiag|Lower, + /** View matrix as an upper triangular matrix with ones on the diagonal. */ + UnitUpper=UnitDiag|Upper, + /** View matrix as a lower triangular matrix with zeros on the diagonal. */ + StrictlyLower=ZeroDiag|Lower, + /** View matrix as an upper triangular matrix with zeros on the diagonal. */ + StrictlyUpper=ZeroDiag|Upper, + /** Used in BandMatrix and SelfAdjointView to indicate that the matrix is self-adjoint. */ + SelfAdjoint=0x10, + /** Used to support symmetric, non-selfadjoint, complex matrices. */ + Symmetric=0x20 +}; + +/** \ingroup enums + * Enum for indicating whether a buffer is aligned or not. */ +enum AlignmentType { + Unaligned=0, /**< Data pointer has no specific alignment. */ + Aligned8=8, /**< Data pointer is aligned on a 8 bytes boundary. */ + Aligned16=16, /**< Data pointer is aligned on a 16 bytes boundary. */ + Aligned32=32, /**< Data pointer is aligned on a 32 bytes boundary. */ + Aligned64=64, /**< Data pointer is aligned on a 64 bytes boundary. */ + Aligned128=128, /**< Data pointer is aligned on a 128 bytes boundary. */ + AlignedMask=255, + Aligned=16, /**< \deprecated Synonym for Aligned16. */ +#if EIGEN_MAX_ALIGN_BYTES==128 + AlignedMax = Aligned128 +#elif EIGEN_MAX_ALIGN_BYTES==64 + AlignedMax = Aligned64 +#elif EIGEN_MAX_ALIGN_BYTES==32 + AlignedMax = Aligned32 +#elif EIGEN_MAX_ALIGN_BYTES==16 + AlignedMax = Aligned16 +#elif EIGEN_MAX_ALIGN_BYTES==8 + AlignedMax = Aligned8 +#elif EIGEN_MAX_ALIGN_BYTES==0 + AlignedMax = Unaligned +#else +#error Invalid value for EIGEN_MAX_ALIGN_BYTES +#endif +}; + +/** \ingroup enums + * Enum used by DenseBase::corner() in Eigen2 compatibility mode. */ +// FIXME after the corner() API change, this was not needed anymore, except by AlignedBox +// TODO: find out what to do with that. Adapt the AlignedBox API ? +enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight }; + +/** \ingroup enums + * Enum containing possible values for the \p Direction parameter of + * Reverse, PartialReduxExpr and VectorwiseOp. */ +enum DirectionType { + /** For Reverse, all columns are reversed; + * for PartialReduxExpr and VectorwiseOp, act on columns. */ + Vertical, + /** For Reverse, all rows are reversed; + * for PartialReduxExpr and VectorwiseOp, act on rows. */ + Horizontal, + /** For Reverse, both rows and columns are reversed; + * not used for PartialReduxExpr and VectorwiseOp. */ + BothDirections +}; + +/** \internal \ingroup enums + * Enum to specify how to traverse the entries of a matrix. */ +enum TraversalType { + /** \internal Default traversal, no vectorization, no index-based access */ + DefaultTraversal, + /** \internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */ + LinearTraversal, + /** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignment + * and good size */ + InnerVectorizedTraversal, + /** \internal Vectorization path using a single loop plus scalar loops for the + * unaligned boundaries */ + LinearVectorizedTraversal, + /** \internal Generic vectorization path using one vectorized loop per row/column with some + * scalar loops to handle the unaligned boundaries */ + SliceVectorizedTraversal, + /** \internal Special case to properly handle incompatible scalar types or other defecting cases*/ + InvalidTraversal, + /** \internal Evaluate all entries at once */ + AllAtOnceTraversal +}; + +/** \internal \ingroup enums + * Enum to specify whether to unroll loops when traversing over the entries of a matrix. */ +enum UnrollingType { + /** \internal Do not unroll loops. */ + NoUnrolling, + /** \internal Unroll only the inner loop, but not the outer loop. */ + InnerUnrolling, + /** \internal Unroll both the inner and the outer loop. If there is only one loop, + * because linear traversal is used, then unroll that loop. */ + CompleteUnrolling +}; + +/** \internal \ingroup enums + * Enum to specify whether to use the default (built-in) implementation or the specialization. */ +enum SpecializedType { + Specialized, + BuiltIn +}; + +/** \ingroup enums + * Enum containing possible values for the \p _Options template parameter of + * Matrix, Array and BandMatrix. */ +enum StorageOptions { + /** Storage order is column major (see \ref TopicStorageOrders). */ + ColMajor = 0, + /** Storage order is row major (see \ref TopicStorageOrders). */ + RowMajor = 0x1, // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that + /** Align the matrix itself if it is vectorizable fixed-size */ + AutoAlign = 0, + /** Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation + DontAlign = 0x2 +}; + +/** \ingroup enums + * Enum for specifying whether to apply or solve on the left or right. */ +enum SideType { + /** Apply transformation on the left. */ + OnTheLeft = 1, + /** Apply transformation on the right. */ + OnTheRight = 2 +}; + +/* the following used to be written as: + * + * struct NoChange_t {}; + * namespace { + * EIGEN_UNUSED NoChange_t NoChange; + * } + * + * on the ground that it feels dangerous to disambiguate overloaded functions on enum/integer types. + * However, this leads to "variable declared but never referenced" warnings on Intel Composer XE, + * and we do not know how to get rid of them (bug 450). + */ + +enum NoChange_t { NoChange }; +enum Sequential_t { Sequential }; +enum Default_t { Default }; + +/** \internal \ingroup enums + * Used in AmbiVector. */ +enum AmbiVectorMode { + IsDense = 0, + IsSparse +}; + +/** \ingroup enums + * Used as template parameter in DenseCoeffBase and MapBase to indicate + * which accessors should be provided. */ +enum AccessorLevels { + /** Read-only access via a member function. */ + ReadOnlyAccessors, + /** Read/write access via member functions. */ + WriteAccessors, + /** Direct read-only access to the coefficients. */ + DirectAccessors, + /** Direct read/write access to the coefficients. */ + DirectWriteAccessors +}; + +/** \ingroup enums + * Enum with options to give to various decompositions. */ +enum DecompositionOptions { + /** \internal Not used (meant for LDLT?). */ + Pivoting = 0x01, + /** \internal Not used (meant for LDLT?). */ + NoPivoting = 0x02, + /** Used in JacobiSVD to indicate that the square matrix U is to be computed. */ + ComputeFullU = 0x04, + /** Used in JacobiSVD to indicate that the thin matrix U is to be computed. */ + ComputeThinU = 0x08, + /** Used in JacobiSVD to indicate that the square matrix V is to be computed. */ + ComputeFullV = 0x10, + /** Used in JacobiSVD to indicate that the thin matrix V is to be computed. */ + ComputeThinV = 0x20, + /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify + * that only the eigenvalues are to be computed and not the eigenvectors. */ + EigenvaluesOnly = 0x40, + /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify + * that both the eigenvalues and the eigenvectors are to be computed. */ + ComputeEigenvectors = 0x80, + /** \internal */ + EigVecMask = EigenvaluesOnly | ComputeEigenvectors, + /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should + * solve the generalized eigenproblem \f$ Ax = \lambda B x \f$. */ + Ax_lBx = 0x100, + /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should + * solve the generalized eigenproblem \f$ ABx = \lambda x \f$. */ + ABx_lx = 0x200, + /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should + * solve the generalized eigenproblem \f$ BAx = \lambda x \f$. */ + BAx_lx = 0x400, + /** \internal */ + GenEigMask = Ax_lBx | ABx_lx | BAx_lx +}; + +/** \ingroup enums + * Possible values for the \p QRPreconditioner template parameter of JacobiSVD. */ +enum QRPreconditioners { + /** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */ + NoQRPreconditioner, + /** Use a QR decomposition without pivoting as the first step. */ + HouseholderQRPreconditioner, + /** Use a QR decomposition with column pivoting as the first step. */ + ColPivHouseholderQRPreconditioner, + /** Use a QR decomposition with full pivoting as the first step. */ + FullPivHouseholderQRPreconditioner +}; + +#ifdef Success +#error The preprocessor symbol 'Success' is defined, possibly by the X11 header file X.h +#endif + +/** \ingroup enums + * Enum for reporting the status of a computation. */ +enum ComputationInfo { + /** Computation was successful. */ + Success = 0, + /** The provided data did not satisfy the prerequisites. */ + NumericalIssue = 1, + /** Iterative procedure did not converge. */ + NoConvergence = 2, + /** The inputs are invalid, or the algorithm has been improperly called. + * When assertions are enabled, such errors trigger an assert. */ + InvalidInput = 3 +}; + +/** \ingroup enums + * Enum used to specify how a particular transformation is stored in a matrix. + * \sa Transform, Hyperplane::transform(). */ +enum TransformTraits { + /** Transformation is an isometry. */ + Isometry = 0x1, + /** Transformation is an affine transformation stored as a (Dim+1)^2 matrix whose last row is + * assumed to be [0 ... 0 1]. */ + Affine = 0x2, + /** Transformation is an affine transformation stored as a (Dim) x (Dim+1) matrix. */ + AffineCompact = 0x10 | Affine, + /** Transformation is a general projective transformation stored as a (Dim+1)^2 matrix. */ + Projective = 0x20 +}; + +/** \internal \ingroup enums + * Enum used to choose between implementation depending on the computer architecture. */ +namespace Architecture +{ + enum Type { + Generic = 0x0, + SSE = 0x1, + AltiVec = 0x2, + VSX = 0x3, + NEON = 0x4, +#if defined EIGEN_VECTORIZE_SSE + Target = SSE +#elif defined EIGEN_VECTORIZE_ALTIVEC + Target = AltiVec +#elif defined EIGEN_VECTORIZE_VSX + Target = VSX +#elif defined EIGEN_VECTORIZE_NEON + Target = NEON +#else + Target = Generic +#endif + }; +} + +/** \internal \ingroup enums + * Enum used as template parameter in Product and product evaluators. */ +enum ProductImplType +{ DefaultProduct=0, LazyProduct, AliasFreeProduct, CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct }; + +/** \internal \ingroup enums + * Enum used in experimental parallel implementation. */ +enum Action {GetAction, SetAction}; + +/** The type used to identify a dense storage. */ +struct Dense {}; + +/** The type used to identify a general sparse storage. */ +struct Sparse {}; + +/** The type used to identify a general solver (factored) storage. */ +struct SolverStorage {}; + +/** The type used to identify a permutation storage. */ +struct PermutationStorage {}; + +/** The type used to identify a permutation storage. */ +struct TranspositionsStorage {}; + +/** The type used to identify a matrix expression */ +struct MatrixXpr {}; + +/** The type used to identify an array expression */ +struct ArrayXpr {}; + +// An evaluator must define its shape. By default, it can be one of the following: +struct DenseShape { static std::string debugName() { return "DenseShape"; } }; +struct SolverShape { static std::string debugName() { return "SolverShape"; } }; +struct HomogeneousShape { static std::string debugName() { return "HomogeneousShape"; } }; +struct DiagonalShape { static std::string debugName() { return "DiagonalShape"; } }; +struct BandShape { static std::string debugName() { return "BandShape"; } }; +struct TriangularShape { static std::string debugName() { return "TriangularShape"; } }; +struct SelfAdjointShape { static std::string debugName() { return "SelfAdjointShape"; } }; +struct PermutationShape { static std::string debugName() { return "PermutationShape"; } }; +struct TranspositionsShape { static std::string debugName() { return "TranspositionsShape"; } }; +struct SparseShape { static std::string debugName() { return "SparseShape"; } }; + +namespace internal { + + // random access iterators based on coeff*() accessors. +struct IndexBased {}; + +// evaluator based on iterators to access coefficients. +struct IteratorBased {}; + +/** \internal + * Constants for comparison functors + */ +enum ComparisonName { + cmp_EQ = 0, + cmp_LT = 1, + cmp_LE = 2, + cmp_UNORD = 3, + cmp_NEQ = 4, + cmp_GT = 5, + cmp_GE = 6 +}; +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CONSTANTS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/DisableStupidWarnings.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/DisableStupidWarnings.h new file mode 100644 index 000000000..351bd6c60 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/DisableStupidWarnings.h @@ -0,0 +1,83 @@ +#ifndef EIGEN_WARNINGS_DISABLED +#define EIGEN_WARNINGS_DISABLED + +#ifdef _MSC_VER + // 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p)) + // 4101 - unreferenced local variable + // 4127 - conditional expression is constant + // 4181 - qualifier applied to reference type ignored + // 4211 - nonstandard extension used : redefined extern to static + // 4244 - 'argument' : conversion from 'type1' to 'type2', possible loss of data + // 4273 - QtAlignedMalloc, inconsistent DLL linkage + // 4324 - structure was padded due to declspec(align()) + // 4503 - decorated name length exceeded, name was truncated + // 4512 - assignment operator could not be generated + // 4522 - 'class' : multiple assignment operators specified + // 4700 - uninitialized local variable 'xyz' used + // 4714 - function marked as __forceinline not inlined + // 4717 - 'function' : recursive on all control paths, function will cause runtime stack overflow + // 4800 - 'type' : forcing value to bool 'true' or 'false' (performance warning) + #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #pragma warning( push ) + #endif + #pragma warning( disable : 4100 4101 4127 4181 4211 4244 4273 4324 4503 4512 4522 4700 4714 4717 4800) + +#elif defined __INTEL_COMPILER + // 2196 - routine is both "inline" and "noinline" ("noinline" assumed) + // ICC 12 generates this warning even without any inline keyword, when defining class methods 'inline' i.e. inside of class body + // typedef that may be a reference type. + // 279 - controlling expression is constant + // ICC 12 generates this warning on assert(constant_expression_depending_on_template_params) and frankly this is a legitimate use case. + // 1684 - conversion from pointer to same-sized integral type (potential portability problem) + // 2259 - non-pointer conversion from "Eigen::Index={ptrdiff_t={long}}" to "int" may lose significant bits + #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #pragma warning push + #endif + #pragma warning disable 2196 279 1684 2259 + +#elif defined __clang__ + // -Wconstant-logical-operand - warning: use of logical && with constant operand; switch to bitwise & or remove constant + // this is really a stupid warning as it warns on compile-time expressions involving enums + #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #pragma clang diagnostic push + #endif + #pragma clang diagnostic ignored "-Wconstant-logical-operand" + +#elif defined __GNUC__ + + #if (!defined(EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS)) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) + #pragma GCC diagnostic push + #endif + // g++ warns about local variables shadowing member functions, which is too strict + #pragma GCC diagnostic ignored "-Wshadow" + #if __GNUC__ == 4 && __GNUC_MINOR__ < 8 + // Until g++-4.7 there are warnings when comparing unsigned int vs 0, even in templated functions: + #pragma GCC diagnostic ignored "-Wtype-limits" + #endif + #if __GNUC__>=6 + #pragma GCC diagnostic ignored "-Wignored-attributes" + #endif + +#endif + +#if defined __NVCC__ + // Disable the "statement is unreachable" message + #pragma diag_suppress code_is_unreachable + // Disable the "dynamic initialization in unreachable code" message + #pragma diag_suppress initialization_not_reachable + // Disable the "invalid error number" message that we get with older versions of nvcc + #pragma diag_suppress 1222 + // Disable the "calling a __host__ function from a __host__ __device__ function is not allowed" messages (yes, there are many of them and they seem to change with every version of the compiler) + #pragma diag_suppress 2527 + #pragma diag_suppress 2529 + #pragma diag_suppress 2651 + #pragma diag_suppress 2653 + #pragma diag_suppress 2668 + #pragma diag_suppress 2669 + #pragma diag_suppress 2670 + #pragma diag_suppress 2671 + #pragma diag_suppress 2735 + #pragma diag_suppress 2737 +#endif + +#endif // not EIGEN_WARNINGS_DISABLED diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/ForwardDeclarations.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/ForwardDeclarations.h new file mode 100644 index 000000000..ea107393a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/ForwardDeclarations.h @@ -0,0 +1,302 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FORWARDDECLARATIONS_H +#define EIGEN_FORWARDDECLARATIONS_H + +namespace Eigen { +namespace internal { + +template struct traits; + +// here we say once and for all that traits == traits +// When constness must affect traits, it has to be constness on template parameters on which T itself depends. +// For example, traits > != traits >, but +// traits > == traits > +template struct traits : traits {}; + +template struct has_direct_access +{ + enum { ret = (traits::Flags & DirectAccessBit) ? 1 : 0 }; +}; + +template struct accessors_level +{ + enum { has_direct_access = (traits::Flags & DirectAccessBit) ? 1 : 0, + has_write_access = (traits::Flags & LvalueBit) ? 1 : 0, + value = has_direct_access ? (has_write_access ? DirectWriteAccessors : DirectAccessors) + : (has_write_access ? WriteAccessors : ReadOnlyAccessors) + }; +}; + +template struct evaluator_traits; + +template< typename T> struct evaluator; + +} // end namespace internal + +template struct NumTraits; + +template struct EigenBase; +template class DenseBase; +template class PlainObjectBase; + + +template::value > +class DenseCoeffsBase; + +template class Matrix; + +template class MatrixBase; +template class ArrayBase; + +template class Flagged; +template class StorageBase > class NoAlias; +template class NestByValue; +template class ForceAlignedAccess; +template class SwapWrapper; + +template class Block; + +template class VectorBlock; +template class Transpose; +template class Conjugate; +template class CwiseNullaryOp; +template class CwiseUnaryOp; +template class CwiseUnaryView; +template class CwiseBinaryOp; +template class CwiseTernaryOp; +template class Solve; +template class Inverse; + +template class Product; + +template class DiagonalBase; +template class DiagonalWrapper; +template class DiagonalMatrix; +template class DiagonalProduct; +template class Diagonal; +template class PermutationMatrix; +template class Transpositions; +template class PermutationBase; +template class TranspositionsBase; +template class PermutationWrapper; +template class TranspositionsWrapper; + +template::has_write_access ? WriteAccessors : ReadOnlyAccessors +> class MapBase; +template class Stride; +template class InnerStride; +template class OuterStride; +template > class Map; +template class RefBase; +template,OuterStride<> >::type > class Ref; + +template class TriangularBase; +template class TriangularView; +template class SelfAdjointView; +template class SparseView; +template class WithFormat; +template struct CommaInitializer; +template class ReturnByValue; +template class ArrayWrapper; +template class MatrixWrapper; +template class SolverBase; +template class InnerIterator; + +namespace internal { +template struct kernel_retval_base; +template struct kernel_retval; +template struct image_retval_base; +template struct image_retval; +} // end namespace internal + +namespace internal { +template class BandMatrix; +} + +namespace internal { +template struct product_type; + +template struct EnableIf; + +/** \internal + * \class product_evaluator + * Products need their own evaluator with more template arguments allowing for + * easier partial template specializations. + */ +template< typename T, + int ProductTag = internal::product_type::ret, + typename LhsShape = typename evaluator_traits::Shape, + typename RhsShape = typename evaluator_traits::Shape, + typename LhsScalar = typename traits::Scalar, + typename RhsScalar = typename traits::Scalar + > struct product_evaluator; +} + +template::value> +struct ProductReturnType; + +// this is a workaround for sun CC +template struct LazyProductReturnType; + +namespace internal { + +// Provides scalar/packet-wise product and product with accumulation +// with optional conjugation of the arguments. +template struct conj_helper; + +template struct scalar_sum_op; +template struct scalar_difference_op; +template struct scalar_conj_product_op; +template struct scalar_min_op; +template struct scalar_max_op; +template struct scalar_opposite_op; +template struct scalar_conjugate_op; +template struct scalar_real_op; +template struct scalar_imag_op; +template struct scalar_abs_op; +template struct scalar_abs2_op; +template struct scalar_sqrt_op; +template struct scalar_rsqrt_op; +template struct scalar_exp_op; +template struct scalar_log_op; +template struct scalar_cos_op; +template struct scalar_sin_op; +template struct scalar_acos_op; +template struct scalar_asin_op; +template struct scalar_tan_op; +template struct scalar_inverse_op; +template struct scalar_square_op; +template struct scalar_cube_op; +template struct scalar_cast_op; +template struct scalar_random_op; +template struct scalar_constant_op; +template struct scalar_identity_op; +template struct scalar_sign_op; +template struct scalar_pow_op; +template struct scalar_hypot_op; +template struct scalar_product_op; +template struct scalar_quotient_op; + +// SpecialFunctions module +template struct scalar_lgamma_op; +template struct scalar_digamma_op; +template struct scalar_erf_op; +template struct scalar_erfc_op; +template struct scalar_igamma_op; +template struct scalar_igammac_op; +template struct scalar_zeta_op; +template struct scalar_betainc_op; + +} // end namespace internal + +struct IOFormat; + +// Array module +template class Array; +template class Select; +template class PartialReduxExpr; +template class VectorwiseOp; +template class Replicate; +template class Reverse; + +template class FullPivLU; +template class PartialPivLU; +namespace internal { +template struct inverse_impl; +} +template class HouseholderQR; +template class ColPivHouseholderQR; +template class FullPivHouseholderQR; +template class CompleteOrthogonalDecomposition; +template class JacobiSVD; +template class BDCSVD; +template class LLT; +template class LDLT; +template class HouseholderSequence; +template class JacobiRotation; + +// Geometry module: +template class RotationBase; +template class Cross; +template class QuaternionBase; +template class Rotation2D; +template class AngleAxis; +template class Translation; +template class AlignedBox; +template class Quaternion; +template class Transform; +template class ParametrizedLine; +template class Hyperplane; +template class UniformScaling; +template class Homogeneous; + +// Sparse module: +template class SparseMatrixBase; + +// MatrixFunctions module +template struct MatrixExponentialReturnValue; +template class MatrixFunctionReturnValue; +template class MatrixSquareRootReturnValue; +template class MatrixLogarithmReturnValue; +template class MatrixPowerReturnValue; +template class MatrixComplexPowerReturnValue; + +namespace internal { +template +struct stem_function +{ + typedef std::complex::Real> ComplexScalar; + typedef ComplexScalar type(ComplexScalar, int); +}; +} + +} // end namespace Eigen + +#endif // EIGEN_FORWARDDECLARATIONS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/MKL_support.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/MKL_support.h new file mode 100644 index 000000000..b7d6ecc76 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/MKL_support.h @@ -0,0 +1,130 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Include file with common MKL declarations + ******************************************************************************** +*/ + +#ifndef EIGEN_MKL_SUPPORT_H +#define EIGEN_MKL_SUPPORT_H + +#ifdef EIGEN_USE_MKL_ALL + #ifndef EIGEN_USE_BLAS + #define EIGEN_USE_BLAS + #endif + #ifndef EIGEN_USE_LAPACKE + #define EIGEN_USE_LAPACKE + #endif + #ifndef EIGEN_USE_MKL_VML + #define EIGEN_USE_MKL_VML + #endif +#endif + +#ifdef EIGEN_USE_LAPACKE_STRICT + #define EIGEN_USE_LAPACKE +#endif + +#if defined(EIGEN_USE_MKL_VML) && !defined(EIGEN_USE_MKL) + #define EIGEN_USE_MKL +#endif + + +#if defined EIGEN_USE_MKL +# include +/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/ +# ifndef INTEL_MKL_VERSION +# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */ +# elif INTEL_MKL_VERSION < 100305 /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/ +# undef EIGEN_USE_MKL +# endif +# ifndef EIGEN_USE_MKL + /*If the MKL version is too old, undef everything*/ +# undef EIGEN_USE_MKL_ALL +# undef EIGEN_USE_LAPACKE +# undef EIGEN_USE_MKL_VML +# undef EIGEN_USE_LAPACKE_STRICT +# undef EIGEN_USE_LAPACKE +# endif +#endif + +#if defined EIGEN_USE_MKL + +#define EIGEN_MKL_VML_THRESHOLD 128 + +/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */ +/* MKL_BLAS, etc are not defined in 11.2 */ +#ifdef MKL_DOMAIN_ALL +#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL +#else +#define EIGEN_MKL_DOMAIN_ALL MKL_ALL +#endif + +#ifdef MKL_DOMAIN_BLAS +#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS +#else +#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS +#endif + +#ifdef MKL_DOMAIN_FFT +#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT +#else +#define EIGEN_MKL_DOMAIN_FFT MKL_FFT +#endif + +#ifdef MKL_DOMAIN_VML +#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML +#else +#define EIGEN_MKL_DOMAIN_VML MKL_VML +#endif + +#ifdef MKL_DOMAIN_PARDISO +#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO +#else +#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO +#endif +#endif + +#if defined(EIGEN_USE_BLAS) && !defined(EIGEN_USE_MKL) +#include "../../misc/blas.h" +#endif + +namespace Eigen { + +typedef std::complex dcomplex; +typedef std::complex scomplex; + +#if defined(EIGEN_USE_MKL) +typedef MKL_INT BlasIndex; +#else +typedef int BlasIndex; +#endif + +} // end namespace Eigen + + +#endif // EIGEN_MKL_SUPPORT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Macros.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Macros.h new file mode 100644 index 000000000..8a39b27a4 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Macros.h @@ -0,0 +1,1038 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MACROS_H +#define EIGEN_MACROS_H + +#define EIGEN_WORLD_VERSION 3 +#define EIGEN_MAJOR_VERSION 3 +#define EIGEN_MINOR_VERSION 7 + +#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ + (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ + EIGEN_MINOR_VERSION>=z)))) + +// Compiler identification, EIGEN_COMP_* + +/// \internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC +#ifdef __GNUC__ + #define EIGEN_COMP_GNUC 1 +#else + #define EIGEN_COMP_GNUC 0 +#endif + +/// \internal EIGEN_COMP_CLANG set to major+minor version (e.g., 307 for clang 3.7) if the compiler is clang +#if defined(__clang__) + #define EIGEN_COMP_CLANG (__clang_major__*100+__clang_minor__) +#else + #define EIGEN_COMP_CLANG 0 +#endif + + +/// \internal EIGEN_COMP_LLVM set to 1 if the compiler backend is llvm +#if defined(__llvm__) + #define EIGEN_COMP_LLVM 1 +#else + #define EIGEN_COMP_LLVM 0 +#endif + +/// \internal EIGEN_COMP_ICC set to __INTEL_COMPILER if the compiler is Intel compiler, 0 otherwise +#if defined(__INTEL_COMPILER) + #define EIGEN_COMP_ICC __INTEL_COMPILER +#else + #define EIGEN_COMP_ICC 0 +#endif + +/// \internal EIGEN_COMP_MINGW set to 1 if the compiler is mingw +#if defined(__MINGW32__) + #define EIGEN_COMP_MINGW 1 +#else + #define EIGEN_COMP_MINGW 0 +#endif + +/// \internal EIGEN_COMP_SUNCC set to 1 if the compiler is Solaris Studio +#if defined(__SUNPRO_CC) + #define EIGEN_COMP_SUNCC 1 +#else + #define EIGEN_COMP_SUNCC 0 +#endif + +/// \internal EIGEN_COMP_MSVC set to _MSC_VER if the compiler is Microsoft Visual C++, 0 otherwise. +#if defined(_MSC_VER) + #define EIGEN_COMP_MSVC _MSC_VER +#else + #define EIGEN_COMP_MSVC 0 +#endif + +// For the record, here is a table summarizing the possible values for EIGEN_COMP_MSVC: +// name ver MSC_VER +// 2008 9 1500 +// 2010 10 1600 +// 2012 11 1700 +// 2013 12 1800 +// 2015 14 1900 +// "15" 15 1900 + +/// \internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC or clang-cl +#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC || EIGEN_COMP_LLVM || EIGEN_COMP_CLANG) + #define EIGEN_COMP_MSVC_STRICT _MSC_VER +#else + #define EIGEN_COMP_MSVC_STRICT 0 +#endif + +/// \internal EIGEN_COMP_IBM set to 1 if the compiler is IBM XL C++ +#if defined(__IBMCPP__) || defined(__xlc__) + #define EIGEN_COMP_IBM 1 +#else + #define EIGEN_COMP_IBM 0 +#endif + +/// \internal EIGEN_COMP_PGI set to 1 if the compiler is Portland Group Compiler +#if defined(__PGI) + #define EIGEN_COMP_PGI 1 +#else + #define EIGEN_COMP_PGI 0 +#endif + +/// \internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler +#if defined(__CC_ARM) || defined(__ARMCC_VERSION) + #define EIGEN_COMP_ARM 1 +#else + #define EIGEN_COMP_ARM 0 +#endif + +/// \internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler +#if defined(__EMSCRIPTEN__) + #define EIGEN_COMP_EMSCRIPTEN 1 +#else + #define EIGEN_COMP_EMSCRIPTEN 0 +#endif + + +/// \internal EIGEN_GNUC_STRICT set to 1 if the compiler is really GCC and not a compatible compiler (e.g., ICC, clang, mingw, etc.) +#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_ICC || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM || EIGEN_COMP_EMSCRIPTEN) + #define EIGEN_COMP_GNUC_STRICT 1 +#else + #define EIGEN_COMP_GNUC_STRICT 0 +#endif + + +#if EIGEN_COMP_GNUC + #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x) + #define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__=11 && (defined(__cplusplus) && (__cplusplus >= 201103L) || EIGEN_COMP_MSVC >= 1900) +#define EIGEN_HAS_CXX11 1 +#else +#define EIGEN_HAS_CXX11 0 +#endif + + +// Do we support r-value references? +#ifndef EIGEN_HAS_RVALUE_REFERENCES +#if EIGEN_MAX_CPP_VER>=11 && \ + (__has_feature(cxx_rvalue_references) || \ + (defined(__cplusplus) && __cplusplus >= 201103L) || \ + (EIGEN_COMP_MSVC >= 1600)) + #define EIGEN_HAS_RVALUE_REFERENCES 1 +#else + #define EIGEN_HAS_RVALUE_REFERENCES 0 +#endif +#endif + +// Does the compiler support C99? +#ifndef EIGEN_HAS_C99_MATH +#if EIGEN_MAX_CPP_VER>=11 && \ + ((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)) \ + || (defined(__GNUC__) && defined(_GLIBCXX_USE_C99)) \ + || (defined(_LIBCPP_VERSION) && !defined(_MSC_VER))) + #define EIGEN_HAS_C99_MATH 1 +#else + #define EIGEN_HAS_C99_MATH 0 +#endif +#endif + +// Does the compiler support result_of? +#ifndef EIGEN_HAS_STD_RESULT_OF +#if EIGEN_MAX_CPP_VER>=11 && ((__has_feature(cxx_lambdas) || (defined(__cplusplus) && __cplusplus >= 201103L))) +#define EIGEN_HAS_STD_RESULT_OF 1 +#else +#define EIGEN_HAS_STD_RESULT_OF 0 +#endif +#endif + +// Does the compiler support variadic templates? +#ifndef EIGEN_HAS_VARIADIC_TEMPLATES +#if EIGEN_MAX_CPP_VER>=11 && (__cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900) \ + && (!defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (EIGEN_CUDACC_VER >= 80000) ) + // ^^ Disable the use of variadic templates when compiling with versions of nvcc older than 8.0 on ARM devices: + // this prevents nvcc from crashing when compiling Eigen on Tegra X1 +#define EIGEN_HAS_VARIADIC_TEMPLATES 1 +#else +#define EIGEN_HAS_VARIADIC_TEMPLATES 0 +#endif +#endif + +// Does the compiler fully support const expressions? (as in c++14) +#ifndef EIGEN_HAS_CONSTEXPR + +#ifdef __CUDACC__ +// Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above +#if EIGEN_MAX_CPP_VER>=14 && (__cplusplus > 199711L && (EIGEN_COMP_CLANG || EIGEN_CUDACC_VER >= 70500)) + #define EIGEN_HAS_CONSTEXPR 1 +#endif +#elif EIGEN_MAX_CPP_VER>=14 && (__has_feature(cxx_relaxed_constexpr) || (defined(__cplusplus) && __cplusplus >= 201402L) || \ + (EIGEN_GNUC_AT_LEAST(4,8) && (__cplusplus > 199711L))) +#define EIGEN_HAS_CONSTEXPR 1 +#endif + +#ifndef EIGEN_HAS_CONSTEXPR +#define EIGEN_HAS_CONSTEXPR 0 +#endif + +#endif + +// Does the compiler support C++11 math? +// Let's be conservative and enable the default C++11 implementation only if we are sure it exists +#ifndef EIGEN_HAS_CXX11_MATH + #if EIGEN_MAX_CPP_VER>=11 && ((__cplusplus > 201103L) || (__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC) \ + && (EIGEN_ARCH_i386_OR_x86_64) && (EIGEN_OS_GNULINUX || EIGEN_OS_WIN_STRICT || EIGEN_OS_MAC)) + #define EIGEN_HAS_CXX11_MATH 1 + #else + #define EIGEN_HAS_CXX11_MATH 0 + #endif +#endif + +// Does the compiler support proper C++11 containers? +#ifndef EIGEN_HAS_CXX11_CONTAINERS + #if EIGEN_MAX_CPP_VER>=11 && \ + ((__cplusplus > 201103L) \ + || ((__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_ICC>=1400)) \ + || EIGEN_COMP_MSVC >= 1900) + #define EIGEN_HAS_CXX11_CONTAINERS 1 + #else + #define EIGEN_HAS_CXX11_CONTAINERS 0 + #endif +#endif + +// Does the compiler support C++11 noexcept? +#ifndef EIGEN_HAS_CXX11_NOEXCEPT + #if EIGEN_MAX_CPP_VER>=11 && \ + (__has_feature(cxx_noexcept) \ + || (__cplusplus > 201103L) \ + || ((__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_ICC>=1400)) \ + || EIGEN_COMP_MSVC >= 1900) + #define EIGEN_HAS_CXX11_NOEXCEPT 1 + #else + #define EIGEN_HAS_CXX11_NOEXCEPT 0 + #endif +#endif + +/** Allows to disable some optimizations which might affect the accuracy of the result. + * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them. + * They currently include: + * - single precision ArrayBase::sin() and ArrayBase::cos() for SSE and AVX vectorization. + */ +#ifndef EIGEN_FAST_MATH +#define EIGEN_FAST_MATH 1 +#endif + +#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl; + +// concatenate two tokens +#define EIGEN_CAT2(a,b) a ## b +#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b) + +#define EIGEN_COMMA , + +// convert a token to a string +#define EIGEN_MAKESTRING2(a) #a +#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a) + +// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC, +// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline +// but GCC is still doing fine with just inline. +#ifndef EIGEN_STRONG_INLINE +#if EIGEN_COMP_MSVC || EIGEN_COMP_ICC +#define EIGEN_STRONG_INLINE __forceinline +#else +#define EIGEN_STRONG_INLINE inline +#endif +#endif + +// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible +// attribute to maximize inlining. This should only be used when really necessary: in particular, +// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times. +// FIXME with the always_inline attribute, +// gcc 3.4.x and 4.1 reports the following compilation error: +// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval Eigen::MatrixBase::eval() const' +// : function body not available +// See also bug 1367 +#if EIGEN_GNUC_AT_LEAST(4,2) +#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline +#else +#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE +#endif + +#if EIGEN_COMP_GNUC +#define EIGEN_DONT_INLINE __attribute__((noinline)) +#elif EIGEN_COMP_MSVC +#define EIGEN_DONT_INLINE __declspec(noinline) +#else +#define EIGEN_DONT_INLINE +#endif + +#if EIGEN_COMP_GNUC +#define EIGEN_PERMISSIVE_EXPR __extension__ +#else +#define EIGEN_PERMISSIVE_EXPR +#endif + +// this macro allows to get rid of linking errors about multiply defined functions. +// - static is not very good because it prevents definitions from different object files to be merged. +// So static causes the resulting linked executable to be bloated with multiple copies of the same function. +// - inline is not perfect either as it unwantedly hints the compiler toward inlining the function. +#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS inline + +#ifdef NDEBUG +# ifndef EIGEN_NO_DEBUG +# define EIGEN_NO_DEBUG +# endif +#endif + +// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89 +#ifdef EIGEN_NO_DEBUG + #define eigen_plain_assert(x) +#else + #if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO + namespace Eigen { + namespace internal { + inline bool copy_bool(bool b) { return b; } + } + } + #define eigen_plain_assert(x) assert(x) + #else + // work around bug 89 + #include // for abort + #include // for std::cerr + + namespace Eigen { + namespace internal { + // trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers. + // see bug 89. + namespace { + EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; } + } + inline void assert_fail(const char *condition, const char *function, const char *file, int line) + { + std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl; + abort(); + } + } + } + #define eigen_plain_assert(x) \ + do { \ + if(!Eigen::internal::copy_bool(x)) \ + Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \ + } while(false) + #endif +#endif + +// eigen_assert can be overridden +#ifndef eigen_assert +#define eigen_assert(x) eigen_plain_assert(x) +#endif + +#ifdef EIGEN_INTERNAL_DEBUGGING +#define eigen_internal_assert(x) eigen_assert(x) +#else +#define eigen_internal_assert(x) +#endif + +#ifdef EIGEN_NO_DEBUG +#define EIGEN_ONLY_USED_FOR_DEBUG(x) EIGEN_UNUSED_VARIABLE(x) +#else +#define EIGEN_ONLY_USED_FOR_DEBUG(x) +#endif + +#ifndef EIGEN_NO_DEPRECATED_WARNING + #if EIGEN_COMP_GNUC + #define EIGEN_DEPRECATED __attribute__((deprecated)) + #elif EIGEN_COMP_MSVC + #define EIGEN_DEPRECATED __declspec(deprecated) + #else + #define EIGEN_DEPRECATED + #endif +#else + #define EIGEN_DEPRECATED +#endif + +#if EIGEN_COMP_GNUC +#define EIGEN_UNUSED __attribute__((unused)) +#else +#define EIGEN_UNUSED +#endif + +// Suppresses 'unused variable' warnings. +namespace Eigen { + namespace internal { + template EIGEN_DEVICE_FUNC void ignore_unused_variable(const T&) {} + } +} +#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var); + +#if !defined(EIGEN_ASM_COMMENT) + #if EIGEN_COMP_GNUC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64) + #define EIGEN_ASM_COMMENT(X) __asm__("#" X) + #else + #define EIGEN_ASM_COMMENT(X) + #endif +#endif + + +//------------------------------------------------------------------------------------------ +// Static and dynamic alignment control +// +// The main purpose of this section is to define EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES +// as the maximal boundary in bytes on which dynamically and statically allocated data may be alignment respectively. +// The values of EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES can be specified by the user. If not, +// a default value is automatically computed based on architecture, compiler, and OS. +// +// This section also defines macros EIGEN_ALIGN_TO_BOUNDARY(N) and the shortcuts EIGEN_ALIGN{8,16,32,_MAX} +// to be used to declare statically aligned buffers. +//------------------------------------------------------------------------------------------ + + +/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements. + * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled, + * so that vectorization doesn't affect binary compatibility. + * + * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link + * vectorized and non-vectorized code. + */ +#if (defined __CUDACC__) + #define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n) +#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM + #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n))) +#elif EIGEN_COMP_MSVC + #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n)) +#elif EIGEN_COMP_SUNCC + // FIXME not sure about this one: + #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n))) +#else + #error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler +#endif + +// If the user explicitly disable vectorization, then we also disable alignment +#if defined(EIGEN_DONT_VECTORIZE) + #define EIGEN_IDEAL_MAX_ALIGN_BYTES 0 +#elif defined(EIGEN_VECTORIZE_AVX512) + // 64 bytes static alignmeent is preferred only if really required + #define EIGEN_IDEAL_MAX_ALIGN_BYTES 64 +#elif defined(__AVX__) + // 32 bytes static alignmeent is preferred only if really required + #define EIGEN_IDEAL_MAX_ALIGN_BYTES 32 +#else + #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16 +#endif + + +// EIGEN_MIN_ALIGN_BYTES defines the minimal value for which the notion of explicit alignment makes sense +#define EIGEN_MIN_ALIGN_BYTES 16 + +// Defined the boundary (in bytes) on which the data needs to be aligned. Note +// that unless EIGEN_ALIGN is defined and not equal to 0, the data may not be +// aligned at all regardless of the value of this #define. + +#if (defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)) && defined(EIGEN_MAX_STATIC_ALIGN_BYTES) && EIGEN_MAX_STATIC_ALIGN_BYTES>0 +#error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY. +#endif + +// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprectated +// They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0 +#if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN) + #ifdef EIGEN_MAX_STATIC_ALIGN_BYTES + #undef EIGEN_MAX_STATIC_ALIGN_BYTES + #endif + #define EIGEN_MAX_STATIC_ALIGN_BYTES 0 +#endif + +#ifndef EIGEN_MAX_STATIC_ALIGN_BYTES + + // Try to automatically guess what is the best default value for EIGEN_MAX_STATIC_ALIGN_BYTES + + // 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable + // 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always + // enable alignment, but it can be a cause of problems on some platforms, so we just disable it in + // certain common platform (compiler+architecture combinations) to avoid these problems. + // Only static alignment is really problematic (relies on nonstandard compiler extensions), + // try to keep heap alignment even when we have to disable static alignment. + #if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64 || EIGEN_ARCH_MIPS) + #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1 + #elif EIGEN_ARCH_ARM_OR_ARM64 && EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(4, 6) + // Old versions of GCC on ARM, at least 4.4, were once seen to have buggy static alignment support. + // Not sure which version fixed it, hopefully it doesn't affect 4.7, which is still somewhat in use. + // 4.8 and newer seem definitely unaffected. + #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1 + #else + #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0 + #endif + + // static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX + #if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \ + && !EIGEN_GCC3_OR_OLDER \ + && !EIGEN_COMP_SUNCC \ + && !EIGEN_OS_QNX + #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1 + #else + #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0 + #endif + + #if EIGEN_ARCH_WANTS_STACK_ALIGNMENT + #define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES + #else + #define EIGEN_MAX_STATIC_ALIGN_BYTES 0 + #endif + +#endif + +// If EIGEN_MAX_ALIGN_BYTES is defined, then it is considered as an upper bound for EIGEN_MAX_ALIGN_BYTES +#if defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES0 is the true test whether we want to align arrays on the stack or not. +// It takes into account both the user choice to explicitly enable/disable alignment (by settting EIGEN_MAX_STATIC_ALIGN_BYTES) +// and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). +// Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used. + + +// Shortcuts to EIGEN_ALIGN_TO_BOUNDARY +#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8) +#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16) +#define EIGEN_ALIGN32 EIGEN_ALIGN_TO_BOUNDARY(32) +#define EIGEN_ALIGN64 EIGEN_ALIGN_TO_BOUNDARY(64) +#if EIGEN_MAX_STATIC_ALIGN_BYTES>0 +#define EIGEN_ALIGN_MAX EIGEN_ALIGN_TO_BOUNDARY(EIGEN_MAX_STATIC_ALIGN_BYTES) +#else +#define EIGEN_ALIGN_MAX +#endif + + +// Dynamic alignment control + +#if defined(EIGEN_DONT_ALIGN) && defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES>0 +#error EIGEN_MAX_ALIGN_BYTES and EIGEN_DONT_ALIGN are both defined with EIGEN_MAX_ALIGN_BYTES!=0. Use EIGEN_MAX_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN. +#endif + +#ifdef EIGEN_DONT_ALIGN + #ifdef EIGEN_MAX_ALIGN_BYTES + #undef EIGEN_MAX_ALIGN_BYTES + #endif + #define EIGEN_MAX_ALIGN_BYTES 0 +#elif !defined(EIGEN_MAX_ALIGN_BYTES) + #define EIGEN_MAX_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES +#endif + +#if EIGEN_IDEAL_MAX_ALIGN_BYTES > EIGEN_MAX_ALIGN_BYTES +#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES +#else +#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES +#endif + + +#ifndef EIGEN_UNALIGNED_VECTORIZE +#define EIGEN_UNALIGNED_VECTORIZE 1 +#endif + +//---------------------------------------------------------------------- + + +#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD + #define EIGEN_RESTRICT +#endif +#ifndef EIGEN_RESTRICT + #define EIGEN_RESTRICT __restrict +#endif + +#ifndef EIGEN_STACK_ALLOCATION_LIMIT +// 131072 == 128 KB +#define EIGEN_STACK_ALLOCATION_LIMIT 131072 +#endif + +#ifndef EIGEN_DEFAULT_IO_FORMAT +#ifdef EIGEN_MAKING_DOCS +// format used in Eigen's documentation +// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic. +#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, " ", "\n", "", "") +#else +#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat() +#endif +#endif + +// just an empty macro ! +#define EIGEN_EMPTY + +#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 || EIGEN_CUDACC_VER>0) + // for older MSVC versions, as well as 1900 && CUDA 8, using the base operator is sufficient (cf Bugs 1000, 1324) + #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + using Base::operator =; +#elif EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653) + #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + using Base::operator =; \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \ + template \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other) { Base::operator=(other.derived()); return *this; } +#else + #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + using Base::operator =; \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \ + { \ + Base::operator=(other); \ + return *this; \ + } +#endif + + +/** + * \internal + * \brief Macro to explicitly define the default copy constructor. + * This is necessary, because the implicit definition is deprecated if the copy-assignment is overridden. + */ +#if EIGEN_HAS_CXX11 +#define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS) EIGEN_DEVICE_FUNC CLASS(const CLASS&) = default; +#else +#define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS) +#endif + + + +/** \internal + * \brief Macro to manually inherit assignment operators. + * This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined. + * With C++11 or later this also default-implements the copy-constructor + */ +#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ + EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + EIGEN_DEFAULT_COPY_CONSTRUCTOR(Derived) + +/** \internal + * \brief Macro to manually define default constructors and destructors. + * This is necessary when the copy constructor is re-defined. + * For empty helper classes this should usually be protected, to avoid accidentally creating empty objects. + * + * Hiding the default destructor lead to problems in C++03 mode together with boost::multiprecision + */ +#if EIGEN_HAS_CXX11 +#define EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(Derived) \ + EIGEN_DEVICE_FUNC Derived() = default; \ + EIGEN_DEVICE_FUNC ~Derived() = default; +#else +#define EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(Derived) \ + EIGEN_DEVICE_FUNC Derived() {}; \ + /* EIGEN_DEVICE_FUNC ~Derived() {}; */ +#endif + + + + + +/** +* Just a side note. Commenting within defines works only by documenting +* behind the object (via '!<'). Comments cannot be multi-line and thus +* we have these extra long lines. What is confusing doxygen over here is +* that we use '\' and basically have a bunch of typedefs with their +* documentation in a single line. +**/ + +#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ + typedef typename Eigen::internal::traits::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex. */ \ + typedef typename Eigen::NumTraits::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex, T were corresponding to RealScalar. */ \ + typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ + typedef typename Eigen::internal::ref_selector::type Nested; \ + typedef typename Eigen::internal::traits::StorageKind StorageKind; \ + typedef typename Eigen::internal::traits::StorageIndex StorageIndex; \ + enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ + ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ + Flags = Eigen::internal::traits::Flags, \ + SizeAtCompileTime = Base::SizeAtCompileTime, \ + MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \ + IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ + using Base::derived; \ + using Base::const_cast_derived; + + +// FIXME Maybe the EIGEN_DENSE_PUBLIC_INTERFACE could be removed as importing PacketScalar is rarely needed +#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ + EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ + typedef typename Base::PacketScalar PacketScalar; + + +#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b) +#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b) + +// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1, +// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over +// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3. +#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \ + : ((int)a == 1 || (int)b == 1) ? 1 \ + : ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \ + : ((int)a <= (int)b) ? (int)a : (int)b) + +// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values +// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is +// (between 0 and 3), it is not more than 3. +#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b) (((int)a == 0 || (int)b == 0) ? 0 \ + : ((int)a == 1 || (int)b == 1) ? 1 \ + : ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \ + : ((int)a == Dynamic) ? (int)b \ + : ((int)b == Dynamic) ? (int)a \ + : ((int)a <= (int)b) ? (int)a : (int)b) + +// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here. +#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \ + : ((int)a >= (int)b) ? (int)a : (int)b) + +#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b))) + +#define EIGEN_IMPLIES(a,b) (!(a) || (b)) + +// the expression type of a standard coefficient wise binary operation +#define EIGEN_CWISE_BINARY_RETURN_TYPE(LHS,RHS,OPNAME) \ + CwiseBinaryOp< \ + EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)< \ + typename internal::traits::Scalar, \ + typename internal::traits::Scalar \ + >, \ + const LHS, \ + const RHS \ + > + +#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,OPNAME) \ + template \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,OPNAME) \ + (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ + { \ + return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,OPNAME)(derived(), other.derived()); \ + } + +#define EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,TYPEA,TYPEB) \ + (Eigen::internal::has_ReturnType > >::value) + +#define EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(EXPR,SCALAR,OPNAME) \ + CwiseBinaryOp::Scalar,SCALAR>, const EXPR, \ + const typename internal::plain_constant_type::type> + +#define EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(SCALAR,EXPR,OPNAME) \ + CwiseBinaryOp::Scalar>, \ + const typename internal::plain_constant_type::type, const EXPR> + +// Workaround for MSVC 2010 (see ML thread "patch with compile for for MSVC 2010") +#if EIGEN_COMP_MSVC_STRICT<=1600 +#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) typename internal::enable_if::type +#else +#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) X +#endif + +#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME) \ + template EIGEN_DEVICE_FUNC inline \ + EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg::type,OPNAME))\ + (METHOD)(const T& scalar) const { \ + typedef typename internal::promote_scalar_arg::type PromotedT; \ + return EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,PromotedT,OPNAME)(derived(), \ + typename internal::plain_constant_type::type(derived().rows(), derived().cols(), internal::scalar_constant_op(scalar))); \ + } + +#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \ + template EIGEN_DEVICE_FUNC inline friend \ + EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg::type,Derived,OPNAME)) \ + (METHOD)(const T& scalar, const StorageBaseType& matrix) { \ + typedef typename internal::promote_scalar_arg::type PromotedT; \ + return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedT,Derived,OPNAME)( \ + typename internal::plain_constant_type::type(matrix.derived().rows(), matrix.derived().cols(), internal::scalar_constant_op(scalar)), matrix.derived()); \ + } + +#define EIGEN_MAKE_SCALAR_BINARY_OP(METHOD,OPNAME) \ + EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \ + EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME) + + +#ifdef EIGEN_EXCEPTIONS +# define EIGEN_THROW_X(X) throw X +# define EIGEN_THROW throw +# define EIGEN_TRY try +# define EIGEN_CATCH(X) catch (X) +#else +# ifdef __CUDA_ARCH__ +# define EIGEN_THROW_X(X) asm("trap;") +# define EIGEN_THROW asm("trap;") +# else +# define EIGEN_THROW_X(X) std::abort() +# define EIGEN_THROW std::abort() +# endif +# define EIGEN_TRY if (true) +# define EIGEN_CATCH(X) else +#endif + + +#if EIGEN_HAS_CXX11_NOEXCEPT +# define EIGEN_INCLUDE_TYPE_TRAITS +# define EIGEN_NOEXCEPT noexcept +# define EIGEN_NOEXCEPT_IF(x) noexcept(x) +# define EIGEN_NO_THROW noexcept(true) +# define EIGEN_EXCEPTION_SPEC(X) noexcept(false) +#else +# define EIGEN_NOEXCEPT +# define EIGEN_NOEXCEPT_IF(x) +# define EIGEN_NO_THROW throw() +# if EIGEN_COMP_MSVC + // MSVC does not support exception specifications (warning C4290), + // and they are deprecated in c++11 anyway. +# define EIGEN_EXCEPTION_SPEC(X) throw() +# else +# define EIGEN_EXCEPTION_SPEC(X) throw(X) +# endif +#endif + +#endif // EIGEN_MACROS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Memory.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Memory.h new file mode 100644 index 000000000..291383c58 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Memory.h @@ -0,0 +1,993 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// Copyright (C) 2008-2009 Benoit Jacob +// Copyright (C) 2009 Kenneth Riddile +// Copyright (C) 2010 Hauke Heibel +// Copyright (C) 2010 Thomas Capricelli +// Copyright (C) 2013 Pavel Holoborodko +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +/***************************************************************************** +*** Platform checks for aligned malloc functions *** +*****************************************************************************/ + +#ifndef EIGEN_MEMORY_H +#define EIGEN_MEMORY_H + +#ifndef EIGEN_MALLOC_ALREADY_ALIGNED + +// Try to determine automatically if malloc is already aligned. + +// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see: +// http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html +// This is true at least since glibc 2.8. +// This leaves the question how to detect 64-bit. According to this document, +// http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf +// page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed +// quite safe, at least within the context of glibc, to equate 64-bit with LP64. +#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \ + && defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ ) && (EIGEN_DEFAULT_ALIGN_BYTES == 16) + #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1 +#else + #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0 +#endif + +// FreeBSD 6 seems to have 16-byte aligned malloc +// See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup +// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures +// See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup +#if defined(__FreeBSD__) && !(EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) && (EIGEN_DEFAULT_ALIGN_BYTES == 16) + #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1 +#else + #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0 +#endif + +#if (EIGEN_OS_MAC && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) \ + || (EIGEN_OS_WIN64 && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) \ + || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \ + || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED + #define EIGEN_MALLOC_ALREADY_ALIGNED 1 +#else + #define EIGEN_MALLOC_ALREADY_ALIGNED 0 +#endif + +#endif + +namespace Eigen { + +namespace internal { + +EIGEN_DEVICE_FUNC +inline void throw_std_bad_alloc() +{ + #ifdef EIGEN_EXCEPTIONS + throw std::bad_alloc(); + #else + std::size_t huge = static_cast(-1); + ::operator new(huge); + #endif +} + +/***************************************************************************** +*** Implementation of handmade aligned functions *** +*****************************************************************************/ + +/* ----- Hand made implementations of aligned malloc/free and realloc ----- */ + +/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned. + * Fast, but wastes 16 additional bytes of memory. Does not throw any exception. + */ +inline void* handmade_aligned_malloc(std::size_t size) +{ + void *original = std::malloc(size+EIGEN_DEFAULT_ALIGN_BYTES); + if (original == 0) return 0; + void *aligned = reinterpret_cast((reinterpret_cast(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES); + *(reinterpret_cast(aligned) - 1) = original; + return aligned; +} + +/** \internal Frees memory allocated with handmade_aligned_malloc */ +inline void handmade_aligned_free(void *ptr) +{ + if (ptr) std::free(*(reinterpret_cast(ptr) - 1)); +} + +/** \internal + * \brief Reallocates aligned memory. + * Since we know that our handmade version is based on std::malloc + * we can use std::realloc to implement efficient reallocation. + */ +inline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0) +{ + if (ptr == 0) return handmade_aligned_malloc(size); + void *original = *(reinterpret_cast(ptr) - 1); + std::ptrdiff_t previous_offset = static_cast(ptr)-static_cast(original); + original = std::realloc(original,size+EIGEN_DEFAULT_ALIGN_BYTES); + if (original == 0) return 0; + void *aligned = reinterpret_cast((reinterpret_cast(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES); + void *previous_aligned = static_cast(original)+previous_offset; + if(aligned!=previous_aligned) + std::memmove(aligned, previous_aligned, size); + + *(reinterpret_cast(aligned) - 1) = original; + return aligned; +} + +/***************************************************************************** +*** Implementation of portable aligned versions of malloc/free/realloc *** +*****************************************************************************/ + +#ifdef EIGEN_NO_MALLOC +EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed() +{ + eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)"); +} +#elif defined EIGEN_RUNTIME_NO_MALLOC +EIGEN_DEVICE_FUNC inline bool is_malloc_allowed_impl(bool update, bool new_value = false) +{ + static bool value = true; + if (update == 1) + value = new_value; + return value; +} +EIGEN_DEVICE_FUNC inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); } +EIGEN_DEVICE_FUNC inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); } +EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed() +{ + eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)"); +} +#else +EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed() +{} +#endif + +/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 or 32 bytes alignment depending on the requirements. + * On allocation error, the returned pointer is null, and std::bad_alloc is thrown. + */ +EIGEN_DEVICE_FUNC inline void* aligned_malloc(std::size_t size) +{ + check_that_malloc_is_allowed(); + + void *result; + #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED + result = std::malloc(size); + #if EIGEN_DEFAULT_ALIGN_BYTES==16 + eigen_assert((size<16 || (std::size_t(result)%16)==0) && "System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade alignd memory allocator."); + #endif + #else + result = handmade_aligned_malloc(size); + #endif + + if(!result && size) + throw_std_bad_alloc(); + + return result; +} + +/** \internal Frees memory allocated with aligned_malloc. */ +EIGEN_DEVICE_FUNC inline void aligned_free(void *ptr) +{ + #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED + std::free(ptr); + #else + handmade_aligned_free(ptr); + #endif +} + +/** + * \internal + * \brief Reallocates an aligned block of memory. + * \throws std::bad_alloc on allocation failure + */ +inline void* aligned_realloc(void *ptr, std::size_t new_size, std::size_t old_size) +{ + EIGEN_UNUSED_VARIABLE(old_size); + + void *result; +#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED + result = std::realloc(ptr,new_size); +#else + result = handmade_aligned_realloc(ptr,new_size,old_size); +#endif + + if (!result && new_size) + throw_std_bad_alloc(); + + return result; +} + +/***************************************************************************** +*** Implementation of conditionally aligned functions *** +*****************************************************************************/ + +/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned. + * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown. + */ +template EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc(std::size_t size) +{ + return aligned_malloc(size); +} + +template<> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc(std::size_t size) +{ + check_that_malloc_is_allowed(); + + void *result = std::malloc(size); + if(!result && size) + throw_std_bad_alloc(); + return result; +} + +/** \internal Frees memory allocated with conditional_aligned_malloc */ +template EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void *ptr) +{ + aligned_free(ptr); +} + +template<> EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void *ptr) +{ + std::free(ptr); +} + +template inline void* conditional_aligned_realloc(void* ptr, std::size_t new_size, std::size_t old_size) +{ + return aligned_realloc(ptr, new_size, old_size); +} + +template<> inline void* conditional_aligned_realloc(void* ptr, std::size_t new_size, std::size_t) +{ + return std::realloc(ptr, new_size); +} + +/***************************************************************************** +*** Construction/destruction of array elements *** +*****************************************************************************/ + +/** \internal Destructs the elements of an array. + * The \a size parameters tells on how many objects to call the destructor of T. + */ +template EIGEN_DEVICE_FUNC inline void destruct_elements_of_array(T *ptr, std::size_t size) +{ + // always destruct an array starting from the end. + if(ptr) + while(size) ptr[--size].~T(); +} + +/** \internal Constructs the elements of an array. + * The \a size parameter tells on how many objects to call the constructor of T. + */ +template EIGEN_DEVICE_FUNC inline T* construct_elements_of_array(T *ptr, std::size_t size) +{ + std::size_t i; + EIGEN_TRY + { + for (i = 0; i < size; ++i) ::new (ptr + i) T; + return ptr; + } + EIGEN_CATCH(...) + { + destruct_elements_of_array(ptr, i); + EIGEN_THROW; + } + return NULL; +} + +/***************************************************************************** +*** Implementation of aligned new/delete-like functions *** +*****************************************************************************/ + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void check_size_for_overflow(std::size_t size) +{ + if(size > std::size_t(-1) / sizeof(T)) + throw_std_bad_alloc(); +} + +/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment. + * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown. + * The default constructor of T is called. + */ +template EIGEN_DEVICE_FUNC inline T* aligned_new(std::size_t size) +{ + check_size_for_overflow(size); + T *result = reinterpret_cast(aligned_malloc(sizeof(T)*size)); + EIGEN_TRY + { + return construct_elements_of_array(result, size); + } + EIGEN_CATCH(...) + { + aligned_free(result); + EIGEN_THROW; + } + return result; +} + +template EIGEN_DEVICE_FUNC inline T* conditional_aligned_new(std::size_t size) +{ + check_size_for_overflow(size); + T *result = reinterpret_cast(conditional_aligned_malloc(sizeof(T)*size)); + EIGEN_TRY + { + return construct_elements_of_array(result, size); + } + EIGEN_CATCH(...) + { + conditional_aligned_free(result); + EIGEN_THROW; + } + return result; +} + +/** \internal Deletes objects constructed with aligned_new + * The \a size parameters tells on how many objects to call the destructor of T. + */ +template EIGEN_DEVICE_FUNC inline void aligned_delete(T *ptr, std::size_t size) +{ + destruct_elements_of_array(ptr, size); + aligned_free(ptr); +} + +/** \internal Deletes objects constructed with conditional_aligned_new + * The \a size parameters tells on how many objects to call the destructor of T. + */ +template EIGEN_DEVICE_FUNC inline void conditional_aligned_delete(T *ptr, std::size_t size) +{ + destruct_elements_of_array(ptr, size); + conditional_aligned_free(ptr); +} + +template EIGEN_DEVICE_FUNC inline T* conditional_aligned_realloc_new(T* pts, std::size_t new_size, std::size_t old_size) +{ + check_size_for_overflow(new_size); + check_size_for_overflow(old_size); + if(new_size < old_size) + destruct_elements_of_array(pts+new_size, old_size-new_size); + T *result = reinterpret_cast(conditional_aligned_realloc(reinterpret_cast(pts), sizeof(T)*new_size, sizeof(T)*old_size)); + if(new_size > old_size) + { + EIGEN_TRY + { + construct_elements_of_array(result+old_size, new_size-old_size); + } + EIGEN_CATCH(...) + { + conditional_aligned_free(result); + EIGEN_THROW; + } + } + return result; +} + + +template EIGEN_DEVICE_FUNC inline T* conditional_aligned_new_auto(std::size_t size) +{ + if(size==0) + return 0; // short-cut. Also fixes Bug 884 + check_size_for_overflow(size); + T *result = reinterpret_cast(conditional_aligned_malloc(sizeof(T)*size)); + if(NumTraits::RequireInitialization) + { + EIGEN_TRY + { + construct_elements_of_array(result, size); + } + EIGEN_CATCH(...) + { + conditional_aligned_free(result); + EIGEN_THROW; + } + } + return result; +} + +template inline T* conditional_aligned_realloc_new_auto(T* pts, std::size_t new_size, std::size_t old_size) +{ + check_size_for_overflow(new_size); + check_size_for_overflow(old_size); + if(NumTraits::RequireInitialization && (new_size < old_size)) + destruct_elements_of_array(pts+new_size, old_size-new_size); + T *result = reinterpret_cast(conditional_aligned_realloc(reinterpret_cast(pts), sizeof(T)*new_size, sizeof(T)*old_size)); + if(NumTraits::RequireInitialization && (new_size > old_size)) + { + EIGEN_TRY + { + construct_elements_of_array(result+old_size, new_size-old_size); + } + EIGEN_CATCH(...) + { + conditional_aligned_free(result); + EIGEN_THROW; + } + } + return result; +} + +template EIGEN_DEVICE_FUNC inline void conditional_aligned_delete_auto(T *ptr, std::size_t size) +{ + if(NumTraits::RequireInitialization) + destruct_elements_of_array(ptr, size); + conditional_aligned_free(ptr); +} + +/****************************************************************************/ + +/** \internal Returns the index of the first element of the array that is well aligned with respect to the requested \a Alignment. + * + * \tparam Alignment requested alignment in Bytes. + * \param array the address of the start of the array + * \param size the size of the array + * + * \note If no element of the array is well aligned or the requested alignment is not a multiple of a scalar, + * the size of the array is returned. For example with SSE, the requested alignment is typically 16-bytes. If + * packet size for the given scalar type is 1, then everything is considered well-aligned. + * + * \note Otherwise, if the Alignment is larger that the scalar size, we rely on the assumptions that sizeof(Scalar) is a + * power of 2. On the other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for + * example with Scalar=double on certain 32-bit platforms, see bug #79. + * + * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h. + * \sa first_default_aligned() + */ +template +EIGEN_DEVICE_FUNC inline Index first_aligned(const Scalar* array, Index size) +{ + const Index ScalarSize = sizeof(Scalar); + const Index AlignmentSize = Alignment / ScalarSize; + const Index AlignmentMask = AlignmentSize-1; + + if(AlignmentSize<=1) + { + // Either the requested alignment if smaller than a scalar, or it exactly match a 1 scalar + // so that all elements of the array have the same alignment. + return 0; + } + else if( (UIntPtr(array) & (sizeof(Scalar)-1)) || (Alignment%ScalarSize)!=0) + { + // The array is not aligned to the size of a single scalar, or the requested alignment is not a multiple of the scalar size. + // Consequently, no element of the array is well aligned. + return size; + } + else + { + Index first = (AlignmentSize - (Index((UIntPtr(array)/sizeof(Scalar))) & AlignmentMask)) & AlignmentMask; + return (first < size) ? first : size; + } +} + +/** \internal Returns the index of the first element of the array that is well aligned with respect the largest packet requirement. + * \sa first_aligned(Scalar*,Index) and first_default_aligned(DenseBase) */ +template +EIGEN_DEVICE_FUNC inline Index first_default_aligned(const Scalar* array, Index size) +{ + typedef typename packet_traits::type DefaultPacketType; + return first_aligned::alignment>(array, size); +} + +/** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size + */ +template +inline Index first_multiple(Index size, Index base) +{ + return ((size+base-1)/base)*base; +} + +// std::copy is much slower than memcpy, so let's introduce a smart_copy which +// use memcpy on trivial types, i.e., on types that does not require an initialization ctor. +template struct smart_copy_helper; + +template EIGEN_DEVICE_FUNC void smart_copy(const T* start, const T* end, T* target) +{ + smart_copy_helper::RequireInitialization>::run(start, end, target); +} + +template struct smart_copy_helper { + EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target) + { + IntPtr size = IntPtr(end)-IntPtr(start); + if(size==0) return; + eigen_internal_assert(start!=0 && end!=0 && target!=0); + std::memcpy(target, start, size); + } +}; + +template struct smart_copy_helper { + EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target) + { std::copy(start, end, target); } +}; + +// intelligent memmove. falls back to std::memmove for POD types, uses std::copy otherwise. +template struct smart_memmove_helper; + +template void smart_memmove(const T* start, const T* end, T* target) +{ + smart_memmove_helper::RequireInitialization>::run(start, end, target); +} + +template struct smart_memmove_helper { + static inline void run(const T* start, const T* end, T* target) + { + IntPtr size = IntPtr(end)-IntPtr(start); + if(size==0) return; + eigen_internal_assert(start!=0 && end!=0 && target!=0); + std::memmove(target, start, size); + } +}; + +template struct smart_memmove_helper { + static inline void run(const T* start, const T* end, T* target) + { + if (UIntPtr(target) < UIntPtr(start)) + { + std::copy(start, end, target); + } + else + { + std::ptrdiff_t count = (std::ptrdiff_t(end)-std::ptrdiff_t(start)) / sizeof(T); + std::copy_backward(start, end, target + count); + } + } +}; + + +/***************************************************************************** +*** Implementation of runtime stack allocation (falling back to malloc) *** +*****************************************************************************/ + +// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA +// to the appropriate stack allocation function +#ifndef EIGEN_ALLOCA + #if EIGEN_OS_LINUX || EIGEN_OS_MAC || (defined alloca) + #define EIGEN_ALLOCA alloca + #elif EIGEN_COMP_MSVC + #define EIGEN_ALLOCA _alloca + #endif +#endif + +// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data +// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions. +template class aligned_stack_memory_handler : noncopyable +{ + public: + /* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size. + * Note that \a ptr can be 0 regardless of the other parameters. + * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits::RequireInitialization). + * In this case, the buffer elements will also be destructed when this handler will be destructed. + * Finally, if \a dealloc is true, then the pointer \a ptr is freed. + **/ + aligned_stack_memory_handler(T* ptr, std::size_t size, bool dealloc) + : m_ptr(ptr), m_size(size), m_deallocate(dealloc) + { + if(NumTraits::RequireInitialization && m_ptr) + Eigen::internal::construct_elements_of_array(m_ptr, size); + } + ~aligned_stack_memory_handler() + { + if(NumTraits::RequireInitialization && m_ptr) + Eigen::internal::destruct_elements_of_array(m_ptr, m_size); + if(m_deallocate) + Eigen::internal::aligned_free(m_ptr); + } + protected: + T* m_ptr; + std::size_t m_size; + bool m_deallocate; +}; + +template class scoped_array : noncopyable +{ + T* m_ptr; +public: + explicit scoped_array(std::ptrdiff_t size) + { + m_ptr = new T[size]; + } + ~scoped_array() + { + delete[] m_ptr; + } + T& operator[](std::ptrdiff_t i) { return m_ptr[i]; } + const T& operator[](std::ptrdiff_t i) const { return m_ptr[i]; } + T* &ptr() { return m_ptr; } + const T* ptr() const { return m_ptr; } + operator const T*() const { return m_ptr; } +}; + +template void swap(scoped_array &a,scoped_array &b) +{ + std::swap(a.ptr(),b.ptr()); +} + +} // end namespace internal + +/** \internal + * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack + * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform + * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap. + * The allocated buffer is automatically deleted when exiting the scope of this declaration. + * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs. + * Here is an example: + * \code + * { + * ei_declare_aligned_stack_constructed_variable(float,data,size,0); + * // use data[0] to data[size-1] + * } + * \endcode + * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token. + */ +#ifdef EIGEN_ALLOCA + + #if EIGEN_DEFAULT_ALIGN_BYTES>0 + // We always manually re-align the result of EIGEN_ALLOCA. + // If alloca is already aligned, the compiler should be smart enough to optimize away the re-alignment. + #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast((internal::UIntPtr(EIGEN_ALLOCA(SIZE+EIGEN_DEFAULT_ALIGN_BYTES-1)) + EIGEN_DEFAULT_ALIGN_BYTES-1) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + #else + #define EIGEN_ALIGNED_ALLOCA(SIZE) EIGEN_ALLOCA(SIZE) + #endif + + #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \ + Eigen::internal::check_size_for_overflow(SIZE); \ + TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \ + : reinterpret_cast( \ + (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \ + : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \ + Eigen::internal::aligned_stack_memory_handler EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT) + +#else + + #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \ + Eigen::internal::check_size_for_overflow(SIZE); \ + TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \ + Eigen::internal::aligned_stack_memory_handler EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true) + +#endif + + +/***************************************************************************** +*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF] *** +*****************************************************************************/ + +#if EIGEN_MAX_ALIGN_BYTES!=0 + #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ + void* operator new(std::size_t size, const std::nothrow_t&) EIGEN_NO_THROW { \ + EIGEN_TRY { return Eigen::internal::conditional_aligned_malloc(size); } \ + EIGEN_CATCH (...) { return 0; } \ + } + #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \ + void *operator new(std::size_t size) { \ + return Eigen::internal::conditional_aligned_malloc(size); \ + } \ + void *operator new[](std::size_t size) { \ + return Eigen::internal::conditional_aligned_malloc(size); \ + } \ + void operator delete(void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free(ptr); } \ + void operator delete[](void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free(ptr); } \ + void operator delete(void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free(ptr); } \ + void operator delete[](void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free(ptr); } \ + /* in-place new and delete. since (at least afaik) there is no actual */ \ + /* memory allocated we can safely let the default implementation handle */ \ + /* this particular case. */ \ + static void *operator new(std::size_t size, void *ptr) { return ::operator new(size,ptr); } \ + static void *operator new[](std::size_t size, void* ptr) { return ::operator new[](size,ptr); } \ + void operator delete(void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete(memory,ptr); } \ + void operator delete[](void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete[](memory,ptr); } \ + /* nothrow-new (returns zero instead of std::bad_alloc) */ \ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ + void operator delete(void *ptr, const std::nothrow_t&) EIGEN_NO_THROW { \ + Eigen::internal::conditional_aligned_free(ptr); \ + } \ + typedef void eigen_aligned_operator_new_marker_type; +#else + #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) +#endif + +#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true) +#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%EIGEN_MAX_ALIGN_BYTES==0))) + +/****************************************************************************/ + +/** \class aligned_allocator +* \ingroup Core_Module +* +* \brief STL compatible allocator to use with types requiring a non standrad alignment. +* +* The memory is aligned as for dynamically aligned matrix/array types such as MatrixXd. +* By default, it will thus provide at least 16 bytes alignment and more in following cases: +* - 32 bytes alignment if AVX is enabled. +* - 64 bytes alignment if AVX512 is enabled. +* +* This can be controled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented +* \link TopicPreprocessorDirectivesPerformance there \endlink. +* +* Example: +* \code +* // Matrix4f requires 16 bytes alignment: +* std::map< int, Matrix4f, std::less, +* aligned_allocator > > my_map_mat4; +* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator: +* std::map< int, Vector3f > my_map_vec3; +* \endcode +* +* \sa \blank \ref TopicStlContainers. +*/ +template +class aligned_allocator : public std::allocator +{ +public: + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; + + template + struct rebind + { + typedef aligned_allocator other; + }; + + aligned_allocator() : std::allocator() {} + + aligned_allocator(const aligned_allocator& other) : std::allocator(other) {} + + template + aligned_allocator(const aligned_allocator& other) : std::allocator(other) {} + + ~aligned_allocator() {} + + pointer allocate(size_type num, const void* /*hint*/ = 0) + { + internal::check_size_for_overflow(num); + size_type size = num * sizeof(T); +#if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_LEAST(7,0) + // workaround gcc bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87544 + // It triggered eigen/Eigen/src/Core/util/Memory.h:189:12: warning: argument 1 value '18446744073709551612' exceeds maximum object size 9223372036854775807 + if(size>=std::size_t((std::numeric_limits::max)())) + return 0; + else +#endif + return static_cast( internal::aligned_malloc(size) ); + } + + void deallocate(pointer p, size_type /*num*/) + { + internal::aligned_free(p); + } +}; + +//---------- Cache sizes ---------- + +#if !defined(EIGEN_NO_CPUID) +# if EIGEN_COMP_GNUC && EIGEN_ARCH_i386_OR_x86_64 +# if defined(__PIC__) && EIGEN_ARCH_i386 + // Case for x86 with PIC +# define EIGEN_CPUID(abcd,func,id) \ + __asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id)); +# elif defined(__PIC__) && EIGEN_ARCH_x86_64 + // Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model. + // However, we cannot detect which code model is used, and the xchg overhead is negligible anyway. +# define EIGEN_CPUID(abcd,func,id) \ + __asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id)); +# else + // Case for x86_64 or x86 w/o PIC +# define EIGEN_CPUID(abcd,func,id) \ + __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) ); +# endif +# elif EIGEN_COMP_MSVC +# if (EIGEN_COMP_MSVC > 1500) && EIGEN_ARCH_i386_OR_x86_64 +# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id) +# endif +# endif +#endif + +namespace internal { + +#ifdef EIGEN_CPUID + +inline bool cpuid_is_vendor(int abcd[4], const int vendor[3]) +{ + return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2]; +} + +inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) +{ + int abcd[4]; + l1 = l2 = l3 = 0; + int cache_id = 0; + int cache_type = 0; + do { + abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; + EIGEN_CPUID(abcd,0x4,cache_id); + cache_type = (abcd[0] & 0x0F) >> 0; + if(cache_type==1||cache_type==3) // data or unified cache + { + int cache_level = (abcd[0] & 0xE0) >> 5; // A[7:5] + int ways = (abcd[1] & 0xFFC00000) >> 22; // B[31:22] + int partitions = (abcd[1] & 0x003FF000) >> 12; // B[21:12] + int line_size = (abcd[1] & 0x00000FFF) >> 0; // B[11:0] + int sets = (abcd[2]); // C[31:0] + + int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1); + + switch(cache_level) + { + case 1: l1 = cache_size; break; + case 2: l2 = cache_size; break; + case 3: l3 = cache_size; break; + default: break; + } + } + cache_id++; + } while(cache_type>0 && cache_id<16); +} + +inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3) +{ + int abcd[4]; + abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; + l1 = l2 = l3 = 0; + EIGEN_CPUID(abcd,0x00000002,0); + unsigned char * bytes = reinterpret_cast(abcd)+2; + bool check_for_p2_core2 = false; + for(int i=0; i<14; ++i) + { + switch(bytes[i]) + { + case 0x0A: l1 = 8; break; // 0Ah data L1 cache, 8 KB, 2 ways, 32 byte lines + case 0x0C: l1 = 16; break; // 0Ch data L1 cache, 16 KB, 4 ways, 32 byte lines + case 0x0E: l1 = 24; break; // 0Eh data L1 cache, 24 KB, 6 ways, 64 byte lines + case 0x10: l1 = 16; break; // 10h data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64) + case 0x15: l1 = 16; break; // 15h code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64) + case 0x2C: l1 = 32; break; // 2Ch data L1 cache, 32 KB, 8 ways, 64 byte lines + case 0x30: l1 = 32; break; // 30h code L1 cache, 32 KB, 8 ways, 64 byte lines + case 0x60: l1 = 16; break; // 60h data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored + case 0x66: l1 = 8; break; // 66h data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored + case 0x67: l1 = 16; break; // 67h data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored + case 0x68: l1 = 32; break; // 68h data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored + case 0x1A: l2 = 96; break; // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64) + case 0x22: l3 = 512; break; // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored + case 0x23: l3 = 1024; break; // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored + case 0x25: l3 = 2048; break; // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored + case 0x29: l3 = 4096; break; // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored + case 0x39: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored + case 0x3A: l2 = 192; break; // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored + case 0x3B: l2 = 128; break; // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored + case 0x3C: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored + case 0x3D: l2 = 384; break; // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored + case 0x3E: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored + case 0x40: l2 = 0; break; // no integrated L2 cache (P6 core) or L3 cache (P4 core) + case 0x41: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 32 byte lines + case 0x42: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 32 byte lines + case 0x43: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 32 byte lines + case 0x44: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines + case 0x45: l2 = 2048; break; // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines + case 0x46: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines + case 0x47: l3 = 8192; break; // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines + case 0x48: l2 = 3072; break; // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines + case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2 + case 0x4A: l3 = 6144; break; // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines + case 0x4B: l3 = 8192; break; // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines + case 0x4C: l3 = 12288; break; // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines + case 0x4D: l3 = 16384; break; // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines + case 0x4E: l2 = 6144; break; // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines + case 0x78: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines + case 0x79: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored + case 0x7A: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored + case 0x7B: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored + case 0x7C: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored + case 0x7D: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines + case 0x7E: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64) + case 0x7F: l2 = 512; break; // code and data L2 cache, 512 KB, 2 ways, 64 byte lines + case 0x80: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines + case 0x81: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 32 byte lines + case 0x82: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 32 byte lines + case 0x83: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 32 byte lines + case 0x84: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines + case 0x85: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines + case 0x86: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines + case 0x87: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines + case 0x88: l3 = 2048; break; // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64) + case 0x89: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64) + case 0x8A: l3 = 8192; break; // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64) + case 0x8D: l3 = 3072; break; // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64) + + default: break; + } + } + if(check_for_p2_core2 && l2 == l3) + l3 = 0; + l1 *= 1024; + l2 *= 1024; + l3 *= 1024; +} + +inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs) +{ + if(max_std_funcs>=4) + queryCacheSizes_intel_direct(l1,l2,l3); + else + queryCacheSizes_intel_codes(l1,l2,l3); +} + +inline void queryCacheSizes_amd(int& l1, int& l2, int& l3) +{ + int abcd[4]; + abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; + EIGEN_CPUID(abcd,0x80000005,0); + l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB + abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; + EIGEN_CPUID(abcd,0x80000006,0); + l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB + l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB +} +#endif + +/** \internal + * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */ +inline void queryCacheSizes(int& l1, int& l2, int& l3) +{ + #ifdef EIGEN_CPUID + int abcd[4]; + const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e}; + const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163}; + const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!" + + // identify the CPU vendor + EIGEN_CPUID(abcd,0x0,0); + int max_std_funcs = abcd[1]; + if(cpuid_is_vendor(abcd,GenuineIntel)) + queryCacheSizes_intel(l1,l2,l3,max_std_funcs); + else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_)) + queryCacheSizes_amd(l1,l2,l3); + else + // by default let's use Intel's API + queryCacheSizes_intel(l1,l2,l3,max_std_funcs); + + // here is the list of other vendors: +// ||cpuid_is_vendor(abcd,"VIA VIA VIA ") +// ||cpuid_is_vendor(abcd,"CyrixInstead") +// ||cpuid_is_vendor(abcd,"CentaurHauls") +// ||cpuid_is_vendor(abcd,"GenuineTMx86") +// ||cpuid_is_vendor(abcd,"TransmetaCPU") +// ||cpuid_is_vendor(abcd,"RiseRiseRise") +// ||cpuid_is_vendor(abcd,"Geode by NSC") +// ||cpuid_is_vendor(abcd,"SiS SiS SiS ") +// ||cpuid_is_vendor(abcd,"UMC UMC UMC ") +// ||cpuid_is_vendor(abcd,"NexGenDriven") + #else + l1 = l2 = l3 = -1; + #endif +} + +/** \internal + * \returns the size in Bytes of the L1 data cache */ +inline int queryL1CacheSize() +{ + int l1(-1), l2, l3; + queryCacheSizes(l1,l2,l3); + return l1; +} + +/** \internal + * \returns the size in Bytes of the L2 or L3 cache if this later is present */ +inline int queryTopLevelCacheSize() +{ + int l1, l2(-1), l3(-1); + queryCacheSizes(l1,l2,l3); + return (std::max)(l2,l3); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MEMORY_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Meta.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Meta.h new file mode 100644 index 000000000..d31e95411 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/Meta.h @@ -0,0 +1,534 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_META_H +#define EIGEN_META_H + +#if defined(__CUDA_ARCH__) +#include +#include +#endif + +#if EIGEN_COMP_ICC>=1600 && __cplusplus >= 201103L +#include +#endif + +namespace Eigen { + +typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex; + +/** + * \brief The Index type as used for the API. + * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. + * \sa \blank \ref TopicPreprocessorDirectives, StorageIndex. + */ + +typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE Index; + +namespace internal { + +/** \internal + * \file Meta.h + * This file contains generic metaprogramming classes which are not specifically related to Eigen. + * \note In case you wonder, yes we're aware that Boost already provides all these features, + * we however don't want to add a dependency to Boost. + */ + +// Only recent versions of ICC complain about using ptrdiff_t to hold pointers, +// and older versions do not provide *intptr_t types. +#if EIGEN_COMP_ICC>=1600 && __cplusplus >= 201103L +typedef std::intptr_t IntPtr; +typedef std::uintptr_t UIntPtr; +#else +typedef std::ptrdiff_t IntPtr; +typedef std::size_t UIntPtr; +#endif + +struct true_type { enum { value = 1 }; }; +struct false_type { enum { value = 0 }; }; + +template +struct conditional { typedef Then type; }; + +template +struct conditional { typedef Else type; }; + +template struct is_same { enum { value = 0 }; }; +template struct is_same { enum { value = 1 }; }; + +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; + +template struct remove_pointer { typedef T type; }; +template struct remove_pointer { typedef T type; }; +template struct remove_pointer { typedef T type; }; + +template struct remove_const { typedef T type; }; +template struct remove_const { typedef T type; }; +template struct remove_const { typedef T type[]; }; +template struct remove_const { typedef T type[Size]; }; + +template struct remove_all { typedef T type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; + +template struct is_arithmetic { enum { value = false }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic{ enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; + +template struct is_integral { enum { value = false }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; + +#if EIGEN_HAS_CXX11 +using std::make_unsigned; +#else +// TODO: Possibly improve this implementation of make_unsigned. +// It is currently used only by +// template struct random_default_impl. +template struct make_unsigned; +template<> struct make_unsigned { typedef unsigned char type; }; +template<> struct make_unsigned { typedef unsigned char type; }; +template<> struct make_unsigned { typedef unsigned char type; }; +template<> struct make_unsigned { typedef unsigned short type; }; +template<> struct make_unsigned { typedef unsigned short type; }; +template<> struct make_unsigned { typedef unsigned int type; }; +template<> struct make_unsigned { typedef unsigned int type; }; +template<> struct make_unsigned { typedef unsigned long type; }; +template<> struct make_unsigned { typedef unsigned long type; }; +#if EIGEN_COMP_MSVC +template<> struct make_unsigned { typedef unsigned __int64 type; }; +template<> struct make_unsigned { typedef unsigned __int64 type; }; +#endif +#endif + +template struct add_const { typedef const T type; }; +template struct add_const { typedef T& type; }; + +template struct is_const { enum { value = 0 }; }; +template struct is_const { enum { value = 1 }; }; + +template struct add_const_on_value_type { typedef const T type; }; +template struct add_const_on_value_type { typedef T const& type; }; +template struct add_const_on_value_type { typedef T const* type; }; +template struct add_const_on_value_type { typedef T const* const type; }; +template struct add_const_on_value_type { typedef T const* const type; }; + + +template +struct is_convertible_impl +{ +private: + struct any_conversion + { + template any_conversion(const volatile T&); + template any_conversion(T&); + }; + struct yes {int a[1];}; + struct no {int a[2];}; + + static yes test(const To&, int); + static no test(any_conversion, ...); + +public: + static From ms_from; +#ifdef __INTEL_COMPILER + #pragma warning push + #pragma warning ( disable : 2259 ) +#endif + enum { value = sizeof(test(ms_from, 0))==sizeof(yes) }; +#ifdef __INTEL_COMPILER + #pragma warning pop +#endif +}; + +template +struct is_convertible +{ + enum { value = is_convertible_impl::type, + typename remove_all::type>::value }; +}; + +/** \internal Allows to enable/disable an overload + * according to a compile time condition. + */ +template struct enable_if; + +template struct enable_if +{ typedef T type; }; + +#if defined(__CUDA_ARCH__) +#if !defined(__FLT_EPSILON__) +#define __FLT_EPSILON__ FLT_EPSILON +#define __DBL_EPSILON__ DBL_EPSILON +#endif + +namespace device { + +template struct numeric_limits +{ + EIGEN_DEVICE_FUNC + static T epsilon() { return 0; } + static T (max)() { assert(false && "Highest not supported for this type"); } + static T (min)() { assert(false && "Lowest not supported for this type"); } + static T infinity() { assert(false && "Infinity not supported for this type"); } + static T quiet_NaN() { assert(false && "quiet_NaN not supported for this type"); } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC + static float epsilon() { return __FLT_EPSILON__; } + EIGEN_DEVICE_FUNC + static float (max)() { return CUDART_MAX_NORMAL_F; } + EIGEN_DEVICE_FUNC + static float (min)() { return FLT_MIN; } + EIGEN_DEVICE_FUNC + static float infinity() { return CUDART_INF_F; } + EIGEN_DEVICE_FUNC + static float quiet_NaN() { return CUDART_NAN_F; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC + static double epsilon() { return __DBL_EPSILON__; } + EIGEN_DEVICE_FUNC + static double (max)() { return DBL_MAX; } + EIGEN_DEVICE_FUNC + static double (min)() { return DBL_MIN; } + EIGEN_DEVICE_FUNC + static double infinity() { return CUDART_INF; } + EIGEN_DEVICE_FUNC + static double quiet_NaN() { return CUDART_NAN; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC + static int epsilon() { return 0; } + EIGEN_DEVICE_FUNC + static int (max)() { return INT_MAX; } + EIGEN_DEVICE_FUNC + static int (min)() { return INT_MIN; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC + static unsigned int epsilon() { return 0; } + EIGEN_DEVICE_FUNC + static unsigned int (max)() { return UINT_MAX; } + EIGEN_DEVICE_FUNC + static unsigned int (min)() { return 0; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC + static long epsilon() { return 0; } + EIGEN_DEVICE_FUNC + static long (max)() { return LONG_MAX; } + EIGEN_DEVICE_FUNC + static long (min)() { return LONG_MIN; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC + static unsigned long epsilon() { return 0; } + EIGEN_DEVICE_FUNC + static unsigned long (max)() { return ULONG_MAX; } + EIGEN_DEVICE_FUNC + static unsigned long (min)() { return 0; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC + static long long epsilon() { return 0; } + EIGEN_DEVICE_FUNC + static long long (max)() { return LLONG_MAX; } + EIGEN_DEVICE_FUNC + static long long (min)() { return LLONG_MIN; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC + static unsigned long long epsilon() { return 0; } + EIGEN_DEVICE_FUNC + static unsigned long long (max)() { return ULLONG_MAX; } + EIGEN_DEVICE_FUNC + static unsigned long long (min)() { return 0; } +}; + +} + +#endif + +/** \internal + * A base class do disable default copy ctor and copy assignement operator. + */ +class noncopyable +{ + EIGEN_DEVICE_FUNC noncopyable(const noncopyable&); + EIGEN_DEVICE_FUNC const noncopyable& operator=(const noncopyable&); +protected: + EIGEN_DEVICE_FUNC noncopyable() {} + EIGEN_DEVICE_FUNC ~noncopyable() {} +}; + +/** \internal + * Convenient struct to get the result type of a unary or binary functor. + * + * It supports both the current STL mechanism (using the result_type member) as well as + * upcoming next STL generation (using a templated result member). + * If none of these members is provided, then the type of the first argument is returned. FIXME, that behavior is a pretty bad hack. + */ +#if EIGEN_HAS_STD_RESULT_OF +template struct result_of { + typedef typename std::result_of::type type1; + typedef typename remove_all::type type; +}; +#else +template struct result_of { }; + +struct has_none {int a[1];}; +struct has_std_result_type {int a[2];}; +struct has_tr1_result {int a[3];}; + +template +struct unary_result_of_select {typedef typename internal::remove_all::type type;}; + +template +struct unary_result_of_select {typedef typename Func::result_type type;}; + +template +struct unary_result_of_select {typedef typename Func::template result::type type;}; + +template +struct result_of { + template + static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + template + static has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static has_none testFunctor(...); + + // note that the following indirection is needed for gcc-3.3 + enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; + typedef typename unary_result_of_select::type type; +}; + +template +struct binary_result_of_select {typedef typename internal::remove_all::type type;}; + +template +struct binary_result_of_select +{typedef typename Func::result_type type;}; + +template +struct binary_result_of_select +{typedef typename Func::template result::type type;}; + +template +struct result_of { + template + static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + template + static has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static has_none testFunctor(...); + + // note that the following indirection is needed for gcc-3.3 + enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; + typedef typename binary_result_of_select::type type; +}; + +template +struct ternary_result_of_select {typedef typename internal::remove_all::type type;}; + +template +struct ternary_result_of_select +{typedef typename Func::result_type type;}; + +template +struct ternary_result_of_select +{typedef typename Func::template result::type type;}; + +template +struct result_of { + template + static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + template + static has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static has_none testFunctor(...); + + // note that the following indirection is needed for gcc-3.3 + enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; + typedef typename ternary_result_of_select::type type; +}; +#endif + +struct meta_yes { char a[1]; }; +struct meta_no { char a[2]; }; + +// Check whether T::ReturnType does exist +template +struct has_ReturnType +{ + template static meta_yes testFunctor(typename C::ReturnType const *); + template static meta_no testFunctor(...); + + enum { value = sizeof(testFunctor(0)) == sizeof(meta_yes) }; +}; + +template const T* return_ptr(); + +template +struct has_nullary_operator +{ + template static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr()->operator()())>0)>::type * = 0); + static meta_no testFunctor(...); + + enum { value = sizeof(testFunctor(static_cast(0))) == sizeof(meta_yes) }; +}; + +template +struct has_unary_operator +{ + template static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr()->operator()(IndexType(0)))>0)>::type * = 0); + static meta_no testFunctor(...); + + enum { value = sizeof(testFunctor(static_cast(0))) == sizeof(meta_yes) }; +}; + +template +struct has_binary_operator +{ + template static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr()->operator()(IndexType(0),IndexType(0)))>0)>::type * = 0); + static meta_no testFunctor(...); + + enum { value = sizeof(testFunctor(static_cast(0))) == sizeof(meta_yes) }; +}; + +/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer. + * Usage example: \code meta_sqrt<1023>::ret \endcode + */ +template Y))) > + // use ?: instead of || just to shut up a stupid gcc 4.3 warning +class meta_sqrt +{ + enum { + MidX = (InfX+SupX)/2, + TakeInf = MidX*MidX > Y ? 1 : 0, + NewInf = int(TakeInf) ? InfX : int(MidX), + NewSup = int(TakeInf) ? int(MidX) : SupX + }; + public: + enum { ret = meta_sqrt::ret }; +}; + +template +class meta_sqrt { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; }; + + +/** \internal Computes the least common multiple of two positive integer A and B + * at compile-time. It implements a naive algorithm testing all multiples of A. + * It thus works better if A>=B. + */ +template +struct meta_least_common_multiple +{ + enum { ret = meta_least_common_multiple::ret }; +}; +template +struct meta_least_common_multiple +{ + enum { ret = A*K }; +}; + +/** \internal determines whether the product of two numeric types is allowed and what the return type is */ +template struct scalar_product_traits +{ + enum { Defined = 0 }; +}; + +// FIXME quick workaround around current limitation of result_of +// template +// struct result_of(ArgType0,ArgType1)> { +// typedef typename scalar_product_traits::type, typename remove_all::type>::ReturnType type; +// }; + +} // end namespace internal + +namespace numext { + +#if defined(__CUDA_ARCH__) +template EIGEN_DEVICE_FUNC void swap(T &a, T &b) { T tmp = b; b = a; a = tmp; } +#else +template EIGEN_STRONG_INLINE void swap(T &a, T &b) { std::swap(a,b); } +#endif + +#if defined(__CUDA_ARCH__) +using internal::device::numeric_limits; +#else +using std::numeric_limits; +#endif + +// Integer division with rounding up. +// T is assumed to be an integer type with a>=0, and b>0 +template +T div_ceil(const T &a, const T &b) +{ + return (a+b-1) / b; +} + +// The aim of the following functions is to bypass -Wfloat-equal warnings +// when we really want a strict equality comparison on floating points. +template EIGEN_STRONG_INLINE +bool equal_strict(const X& x,const Y& y) { return x == y; } + +template<> EIGEN_STRONG_INLINE +bool equal_strict(const float& x,const float& y) { return std::equal_to()(x,y); } + +template<> EIGEN_STRONG_INLINE +bool equal_strict(const double& x,const double& y) { return std::equal_to()(x,y); } + +template EIGEN_STRONG_INLINE +bool not_equal_strict(const X& x,const Y& y) { return x != y; } + +template<> EIGEN_STRONG_INLINE +bool not_equal_strict(const float& x,const float& y) { return std::not_equal_to()(x,y); } + +template<> EIGEN_STRONG_INLINE +bool not_equal_strict(const double& x,const double& y) { return std::not_equal_to()(x,y); } + +} // end namespace numext + +} // end namespace Eigen + +#endif // EIGEN_META_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/NonMPL2.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/NonMPL2.h new file mode 100644 index 000000000..1af67cf18 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/NonMPL2.h @@ -0,0 +1,3 @@ +#ifdef EIGEN_MPL2_ONLY +#error Including non-MPL2 code in EIGEN_MPL2_ONLY mode +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h new file mode 100644 index 000000000..ecc82b7c8 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h @@ -0,0 +1,27 @@ +#ifdef EIGEN_WARNINGS_DISABLED +#undef EIGEN_WARNINGS_DISABLED + +#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #ifdef _MSC_VER + #pragma warning( pop ) + #elif defined __INTEL_COMPILER + #pragma warning pop + #elif defined __clang__ + #pragma clang diagnostic pop + #elif defined __GNUC__ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) + #pragma GCC diagnostic pop + #endif + + #if defined __NVCC__ +// Don't reenable the diagnostic messages, as it turns out these messages need +// to be disabled at the point of the template instantiation (i.e the user code) +// otherwise they'll be triggered by nvcc. +// #pragma diag_default code_is_unreachable +// #pragma diag_default initialization_not_reachable +// #pragma diag_default 2651 +// #pragma diag_default 2653 + #endif + +#endif + +#endif // EIGEN_WARNINGS_DISABLED diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/StaticAssert.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/StaticAssert.h new file mode 100644 index 000000000..500e47792 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/StaticAssert.h @@ -0,0 +1,218 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STATIC_ASSERT_H +#define EIGEN_STATIC_ASSERT_H + +/* Some notes on Eigen's static assertion mechanism: + * + * - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean + * expression, and MSG an enum listed in struct internal::static_assertion + * + * - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time) + * in that case, the static assertion is converted to the following runtime assert: + * eigen_assert(CONDITION && "MSG") + * + * - currently EIGEN_STATIC_ASSERT can only be used in function scope + * + */ + +#ifndef EIGEN_STATIC_ASSERT +#ifndef EIGEN_NO_STATIC_ASSERT + + #if EIGEN_MAX_CPP_VER>=11 && (__has_feature(cxx_static_assert) || (defined(__cplusplus) && __cplusplus >= 201103L) || (EIGEN_COMP_MSVC >= 1600)) + + // if native static_assert is enabled, let's use it + #define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG); + + #else // not CXX0X + + namespace Eigen { + + namespace internal { + + template + struct static_assertion {}; + + template<> + struct static_assertion + { + enum { + YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX=1, + YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES=1, + YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES=1, + THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE=1, + THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE=1, + THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE=1, + OUT_OF_RANGE_ACCESS=1, + YOU_MADE_A_PROGRAMMING_MISTAKE=1, + EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT=1, + EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE=1, + YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR=1, + YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR=1, + UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC=1, + THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES=1, + FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED=1, + NUMERIC_TYPE_MUST_BE_REAL=1, + COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED=1, + WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED=1, + THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE=1, + INVALID_MATRIX_PRODUCT=1, + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS=1, + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION=1, + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY=1, + THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES=1, + THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES=1, + INVALID_MATRIX_TEMPLATE_PARAMETERS=1, + INVALID_MATRIXBASE_TEMPLATE_PARAMETERS=1, + BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER=1, + THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX=1, + THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE=1, + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES=1, + YOU_ALREADY_SPECIFIED_THIS_STRIDE=1, + INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION=1, + THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD=1, + PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1=1, + THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS=1, + YOU_CANNOT_MIX_ARRAYS_AND_MATRICES=1, + YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION=1, + THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY=1, + YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT=1, + THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS=1, + THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS=1, + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL=1, + THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES=1, + YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED=1, + YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED=1, + THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE=1, + THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH=1, + OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG=1, + IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY=1, + STORAGE_LAYOUT_DOES_NOT_MATCH=1, + EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT__INVALID_COST_VALUE=1, + THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS=1, + MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY=1, + THIS_TYPE_IS_NOT_SUPPORTED=1, + STORAGE_KIND_MUST_MATCH=1, + STORAGE_INDEX_MUST_MATCH=1, + CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY=1, + SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY=1 + }; + }; + + } // end namespace internal + + } // end namespace Eigen + + // Specialized implementation for MSVC to avoid "conditional + // expression is constant" warnings. This implementation doesn't + // appear to work under GCC, hence the multiple implementations. + #if EIGEN_COMP_MSVC + + #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \ + {Eigen::internal::static_assertion::MSG;} + + #else + // In some cases clang interprets bool(CONDITION) as function declaration + #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \ + if (Eigen::internal::static_assertion(CONDITION)>::MSG) {} + + #endif + + #endif // not CXX0X + +#else // EIGEN_NO_STATIC_ASSERT + + #define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG); + +#endif // EIGEN_NO_STATIC_ASSERT +#endif // EIGEN_STATIC_ASSERT + +// static assertion failing if the type \a TYPE is not a vector type +#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \ + EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime, \ + YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX) + +// static assertion failing if the type \a TYPE is not fixed-size +#define EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) \ + EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \ + YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR) + +// static assertion failing if the type \a TYPE is not dynamic-size +#define EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) \ + EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime==Eigen::Dynamic, \ + YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR) + +// static assertion failing if the type \a TYPE is not a vector type of the given size +#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \ + EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \ + THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE) + +// static assertion failing if the type \a TYPE is not a vector type of the given size +#define EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS) \ + EIGEN_STATIC_ASSERT(TYPE::RowsAtCompileTime==ROWS && TYPE::ColsAtCompileTime==COLS, \ + THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE) + +// static assertion failing if the two vector expression types are not compatible (same fixed-size or dynamic size) +#define EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) \ + EIGEN_STATIC_ASSERT( \ + (int(TYPE0::SizeAtCompileTime)==Eigen::Dynamic \ + || int(TYPE1::SizeAtCompileTime)==Eigen::Dynamic \ + || int(TYPE0::SizeAtCompileTime)==int(TYPE1::SizeAtCompileTime)),\ + YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES) + +#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1) \ + ( \ + (int(Eigen::internal::size_of_xpr_at_compile_time::ret)==0 && int(Eigen::internal::size_of_xpr_at_compile_time::ret)==0) \ + || (\ + (int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \ + && (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))\ + ) \ + ) + +#define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \ + EIGEN_STATIC_ASSERT(!NumTraits::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES) + + +// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes +#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \ + EIGEN_STATIC_ASSERT( \ + EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\ + YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES) + +#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \ + EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Dynamic) && \ + (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Dynamic), \ + THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS) + +#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \ + EIGEN_STATIC_ASSERT(Eigen::internal::is_lvalue::value, \ + THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY) + +#define EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) \ + EIGEN_STATIC_ASSERT((Eigen::internal::is_same::XprKind, ArrayXpr>::value), \ + THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES) + +#define EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2) \ + EIGEN_STATIC_ASSERT((Eigen::internal::is_same::XprKind, \ + typename Eigen::internal::traits::XprKind \ + >::value), \ + YOU_CANNOT_MIX_ARRAYS_AND_MATRICES) + +// Check that a cost value is positive, and that is stay within a reasonable range +// TODO this check could be enabled for internal debugging only +#define EIGEN_INTERNAL_CHECK_COST_VALUE(C) \ + EIGEN_STATIC_ASSERT((C)>=0 && (C)<=HugeCost*HugeCost, EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT__INVALID_COST_VALUE); + +#endif // EIGEN_STATIC_ASSERT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/XprHelper.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/XprHelper.h new file mode 100644 index 000000000..bf424a00f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Core/util/XprHelper.h @@ -0,0 +1,824 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_XPRHELPER_H +#define EIGEN_XPRHELPER_H + +// just a workaround because GCC seems to not really like empty structs +// FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled +// so currently we simply disable this optimization for gcc 4.3 +#if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3) + #define EIGEN_EMPTY_STRUCT_CTOR(X) \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {} +#else + #define EIGEN_EMPTY_STRUCT_CTOR(X) +#endif + +namespace Eigen { + +namespace internal { + +template +EIGEN_DEVICE_FUNC +inline IndexDest convert_index(const IndexSrc& idx) { + // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away: + eigen_internal_assert(idx <= NumTraits::highest() && "Index value to big for target type"); + return IndexDest(idx); +} + + +// promote_scalar_arg is an helper used in operation between an expression and a scalar, like: +// expression * scalar +// Its role is to determine how the type T of the scalar operand should be promoted given the scalar type ExprScalar of the given expression. +// The IsSupported template parameter must be provided by the caller as: internal::has_ReturnType >::value using the proper order for ExprScalar and T. +// Then the logic is as follows: +// - if the operation is natively supported as defined by IsSupported, then the scalar type is not promoted, and T is returned. +// - otherwise, NumTraits::Literal is returned if T is implicitly convertible to NumTraits::Literal AND that this does not imply a float to integer conversion. +// - otherwise, ExprScalar is returned if T is implicitly convertible to ExprScalar AND that this does not imply a float to integer conversion. +// - In all other cases, the promoted type is not defined, and the respective operation is thus invalid and not available (SFINAE). +template +struct promote_scalar_arg; + +template +struct promote_scalar_arg +{ + typedef T type; +}; + +// Recursively check safe conversion to PromotedType, and then ExprScalar if they are different. +template::value, + bool IsSafe = NumTraits::IsInteger || !NumTraits::IsInteger> +struct promote_scalar_arg_unsupported; + +// Start recursion with NumTraits::Literal +template +struct promote_scalar_arg : promote_scalar_arg_unsupported::Literal> {}; + +// We found a match! +template +struct promote_scalar_arg_unsupported +{ + typedef PromotedType type; +}; + +// No match, but no real-to-integer issues, and ExprScalar and current PromotedType are different, +// so let's try to promote to ExprScalar +template +struct promote_scalar_arg_unsupported + : promote_scalar_arg_unsupported +{}; + +// Unsafe real-to-integer, let's stop. +template +struct promote_scalar_arg_unsupported {}; + +// T is not even convertible to ExprScalar, let's stop. +template +struct promote_scalar_arg_unsupported {}; + +//classes inheriting no_assignment_operator don't generate a default operator=. +class no_assignment_operator +{ + private: + no_assignment_operator& operator=(const no_assignment_operator&); + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(no_assignment_operator) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(no_assignment_operator) +}; + +/** \internal return the index type with the largest number of bits */ +template +struct promote_index_type +{ + typedef typename conditional<(sizeof(I1)::type type; +}; + +/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that + * can be accessed using value() and setValue(). + * Otherwise, this class is an empty structure and value() just returns the template parameter Value. + */ +template class variable_if_dynamic +{ + public: + EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {} +}; + +template class variable_if_dynamic +{ + T m_value; + EIGEN_DEVICE_FUNC variable_if_dynamic() { eigen_assert(false); } + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value) : m_value(value) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T value() const { return m_value; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; } +}; + +/** \internal like variable_if_dynamic but for DynamicIndex + */ +template class variable_if_dynamicindex +{ + public: + EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {} +}; + +template class variable_if_dynamicindex +{ + T m_value; + EIGEN_DEVICE_FUNC variable_if_dynamicindex() { eigen_assert(false); } + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T value) : m_value(value) {} + EIGEN_DEVICE_FUNC T EIGEN_STRONG_INLINE value() const { return m_value; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; } +}; + +template struct functor_traits +{ + enum + { + Cost = 10, + PacketAccess = false, + IsRepeatable = false + }; +}; + +template struct packet_traits; + +template struct unpacket_traits +{ + typedef T type; + typedef T half; + enum + { + size = 1, + alignment = 1 + }; +}; + +template::size)==0 || is_same::half>::value> +struct find_best_packet_helper; + +template< int Size, typename PacketType> +struct find_best_packet_helper +{ + typedef PacketType type; +}; + +template +struct find_best_packet_helper +{ + typedef typename find_best_packet_helper::half>::type type; +}; + +template +struct find_best_packet +{ + typedef typename find_best_packet_helper::type>::type type; +}; + +#if EIGEN_MAX_STATIC_ALIGN_BYTES>0 +template +struct compute_default_alignment_helper +{ + enum { value = 0 }; +}; + +template +struct compute_default_alignment_helper // Match +{ + enum { value = AlignmentBytes }; +}; + +template +struct compute_default_alignment_helper // Try-half +{ + // current packet too large, try with an half-packet + enum { value = compute_default_alignment_helper::value }; +}; +#else +// If static alignment is disabled, no need to bother. +// This also avoids a division by zero in "bool Match = bool((ArrayBytes%AlignmentBytes)==0)" +template +struct compute_default_alignment_helper +{ + enum { value = 0 }; +}; +#endif + +template struct compute_default_alignment { + enum { value = compute_default_alignment_helper::value }; +}; + +template struct compute_default_alignment { + enum { value = EIGEN_MAX_ALIGN_BYTES }; +}; + +template class make_proper_matrix_type +{ + enum { + IsColVector = _Cols==1 && _Rows!=1, + IsRowVector = _Rows==1 && _Cols!=1, + Options = IsColVector ? (_Options | ColMajor) & ~RowMajor + : IsRowVector ? (_Options | RowMajor) & ~ColMajor + : _Options + }; + public: + typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type; +}; + +template +class compute_matrix_flags +{ + enum { row_major_bit = Options&RowMajor ? RowMajorBit : 0 }; + public: + // FIXME currently we still have to handle DirectAccessBit at the expression level to handle DenseCoeffsBase<> + // and then propagate this information to the evaluator's flags. + // However, I (Gael) think that DirectAccessBit should only matter at the evaluation stage. + enum { ret = DirectAccessBit | LvalueBit | NestByRefBit | row_major_bit }; +}; + +template struct size_at_compile_time +{ + enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols }; +}; + +template struct size_of_xpr_at_compile_time +{ + enum { ret = size_at_compile_time::RowsAtCompileTime,traits::ColsAtCompileTime>::ret }; +}; + +/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type, + * whereas eval is a const reference in the case of a matrix + */ + +template::StorageKind> struct plain_matrix_type; +template struct plain_matrix_type_dense; +template struct plain_matrix_type +{ + typedef typename plain_matrix_type_dense::XprKind, traits::Flags>::type type; +}; +template struct plain_matrix_type +{ + typedef typename T::PlainObject type; +}; + +template struct plain_matrix_type_dense +{ + typedef Matrix::Scalar, + traits::RowsAtCompileTime, + traits::ColsAtCompileTime, + AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor), + traits::MaxRowsAtCompileTime, + traits::MaxColsAtCompileTime + > type; +}; + +template struct plain_matrix_type_dense +{ + typedef Array::Scalar, + traits::RowsAtCompileTime, + traits::ColsAtCompileTime, + AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor), + traits::MaxRowsAtCompileTime, + traits::MaxColsAtCompileTime + > type; +}; + +/* eval : the return type of eval(). For matrices, this is just a const reference + * in order to avoid a useless copy + */ + +template::StorageKind> struct eval; + +template struct eval +{ + typedef typename plain_matrix_type::type type; +// typedef typename T::PlainObject type; +// typedef T::Matrix::Scalar, +// traits::RowsAtCompileTime, +// traits::ColsAtCompileTime, +// AutoAlign | (traits::Flags&RowMajorBit ? RowMajor : ColMajor), +// traits::MaxRowsAtCompileTime, +// traits::MaxColsAtCompileTime +// > type; +}; + +template struct eval +{ + typedef typename plain_matrix_type::type type; +}; + +// for matrices, no need to evaluate, just use a const reference to avoid a useless copy +template +struct eval, Dense> +{ + typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type; +}; + +template +struct eval, Dense> +{ + typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type; +}; + + +/* similar to plain_matrix_type, but using the evaluator's Flags */ +template::StorageKind> struct plain_object_eval; + +template +struct plain_object_eval +{ + typedef typename plain_matrix_type_dense::XprKind, evaluator::Flags>::type type; +}; + + +/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major + */ +template struct plain_matrix_type_column_major +{ + enum { Rows = traits::RowsAtCompileTime, + Cols = traits::ColsAtCompileTime, + MaxRows = traits::MaxRowsAtCompileTime, + MaxCols = traits::MaxColsAtCompileTime + }; + typedef Matrix::Scalar, + Rows, + Cols, + (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor, + MaxRows, + MaxCols + > type; +}; + +/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major + */ +template struct plain_matrix_type_row_major +{ + enum { Rows = traits::RowsAtCompileTime, + Cols = traits::ColsAtCompileTime, + MaxRows = traits::MaxRowsAtCompileTime, + MaxCols = traits::MaxColsAtCompileTime + }; + typedef Matrix::Scalar, + Rows, + Cols, + (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor, + MaxRows, + MaxCols + > type; +}; + +/** \internal The reference selector for template expressions. The idea is that we don't + * need to use references for expressions since they are light weight proxy + * objects which should generate no copying overhead. */ +template +struct ref_selector +{ + typedef typename conditional< + bool(traits::Flags & NestByRefBit), + T const&, + const T + >::type type; + + typedef typename conditional< + bool(traits::Flags & NestByRefBit), + T &, + T + >::type non_const_type; +}; + +/** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */ +template +struct transfer_constness +{ + typedef typename conditional< + bool(internal::is_const::value), + typename internal::add_const_on_value_type::type, + T2 + >::type type; +}; + + +// However, we still need a mechanism to detect whether an expression which is evaluated multiple time +// has to be evaluated into a temporary. +// That's the purpose of this new nested_eval helper: +/** \internal Determines how a given expression should be nested when evaluated multiple times. + * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be + * evaluated into the bigger product expression. The choice is between nesting the expression b+c as-is, or + * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is + * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes + * many coefficient accesses in the nested expressions -- as is the case with matrix product for example. + * + * \tparam T the type of the expression being nested. + * \tparam n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression. + * \tparam PlainObject the type of the temporary if needed. + */ +template::type> struct nested_eval +{ + enum { + ScalarReadCost = NumTraits::Scalar>::ReadCost, + CoeffReadCost = evaluator::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a tempory? + // Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1. + // This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON + // for all evaluator creating a temporary. This flag is then propagated by the parent evaluators. + // Another solution could be to count the number of temps? + NAsInteger = n == Dynamic ? HugeCost : n, + CostEval = (NAsInteger+1) * ScalarReadCost + CoeffReadCost, + CostNoEval = NAsInteger * CoeffReadCost, + Evaluate = (int(evaluator::Flags) & EvalBeforeNestingBit) || (int(CostEval) < int(CostNoEval)) + }; + + typedef typename conditional::type>::type type; +}; + +template +EIGEN_DEVICE_FUNC +inline T* const_cast_ptr(const T* ptr) +{ + return const_cast(ptr); +} + +template::XprKind> +struct dense_xpr_base +{ + /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */ +}; + +template +struct dense_xpr_base +{ + typedef MatrixBase type; +}; + +template +struct dense_xpr_base +{ + typedef ArrayBase type; +}; + +template::XprKind, typename StorageKind = typename traits::StorageKind> +struct generic_xpr_base; + +template +struct generic_xpr_base +{ + typedef typename dense_xpr_base::type type; +}; + +template struct cast_return_type +{ + typedef typename XprType::Scalar CurrentScalarType; + typedef typename remove_all::type _CastType; + typedef typename _CastType::Scalar NewScalarType; + typedef typename conditional::value, + const XprType&,CastType>::type type; +}; + +template struct promote_storage_type; + +template struct promote_storage_type +{ + typedef A ret; +}; +template struct promote_storage_type +{ + typedef A ret; +}; +template struct promote_storage_type +{ + typedef A ret; +}; + +/** \internal Specify the "storage kind" of applying a coefficient-wise + * binary operations between two expressions of kinds A and B respectively. + * The template parameter Functor permits to specialize the resulting storage kind wrt to + * the functor. + * The default rules are as follows: + * \code + * A op A -> A + * A op dense -> dense + * dense op B -> dense + * sparse op dense -> sparse + * dense op sparse -> sparse + * \endcode + */ +template struct cwise_promote_storage_type; + +template struct cwise_promote_storage_type { typedef A ret; }; +template struct cwise_promote_storage_type { typedef Dense ret; }; +template struct cwise_promote_storage_type { typedef Dense ret; }; +template struct cwise_promote_storage_type { typedef Dense ret; }; +template struct cwise_promote_storage_type { typedef Sparse ret; }; +template struct cwise_promote_storage_type { typedef Sparse ret; }; + +template struct cwise_promote_storage_order { + enum { value = LhsOrder }; +}; + +template struct cwise_promote_storage_order { enum { value = RhsOrder }; }; +template struct cwise_promote_storage_order { enum { value = LhsOrder }; }; +template struct cwise_promote_storage_order { enum { value = Order }; }; + + +/** \internal Specify the "storage kind" of multiplying an expression of kind A with kind B. + * The template parameter ProductTag permits to specialize the resulting storage kind wrt to + * some compile-time properties of the product: GemmProduct, GemvProduct, OuterProduct, InnerProduct. + * The default rules are as follows: + * \code + * K * K -> K + * dense * K -> dense + * K * dense -> dense + * diag * K -> K + * K * diag -> K + * Perm * K -> K + * K * Perm -> K + * \endcode + */ +template struct product_promote_storage_type; + +template struct product_promote_storage_type { typedef A ret;}; +template struct product_promote_storage_type { typedef Dense ret;}; +template struct product_promote_storage_type { typedef Dense ret; }; +template struct product_promote_storage_type { typedef Dense ret; }; + +template struct product_promote_storage_type { typedef A ret; }; +template struct product_promote_storage_type { typedef B ret; }; +template struct product_promote_storage_type { typedef Dense ret; }; +template struct product_promote_storage_type { typedef Dense ret; }; + +template struct product_promote_storage_type { typedef A ret; }; +template struct product_promote_storage_type { typedef B ret; }; +template struct product_promote_storage_type { typedef Dense ret; }; +template struct product_promote_storage_type { typedef Dense ret; }; + +/** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type. + * \tparam Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType. + */ +template +struct plain_row_type +{ + typedef Matrix MatrixRowType; + typedef Array ArrayRowType; + + typedef typename conditional< + is_same< typename traits::XprKind, MatrixXpr >::value, + MatrixRowType, + ArrayRowType + >::type type; +}; + +template +struct plain_col_type +{ + typedef Matrix MatrixColType; + typedef Array ArrayColType; + + typedef typename conditional< + is_same< typename traits::XprKind, MatrixXpr >::value, + MatrixColType, + ArrayColType + >::type type; +}; + +template +struct plain_diag_type +{ + enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime), + max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime) + }; + typedef Matrix MatrixDiagType; + typedef Array ArrayDiagType; + + typedef typename conditional< + is_same< typename traits::XprKind, MatrixXpr >::value, + MatrixDiagType, + ArrayDiagType + >::type type; +}; + +template +struct plain_constant_type +{ + enum { Options = (traits::Flags&RowMajorBit)?RowMajor:0 }; + + typedef Array::RowsAtCompileTime, traits::ColsAtCompileTime, + Options, traits::MaxRowsAtCompileTime,traits::MaxColsAtCompileTime> array_type; + + typedef Matrix::RowsAtCompileTime, traits::ColsAtCompileTime, + Options, traits::MaxRowsAtCompileTime,traits::MaxColsAtCompileTime> matrix_type; + + typedef CwiseNullaryOp, const typename conditional::XprKind, MatrixXpr >::value, matrix_type, array_type>::type > type; +}; + +template +struct is_lvalue +{ + enum { value = (!bool(is_const::value)) && + bool(traits::Flags & LvalueBit) }; +}; + +template struct is_diagonal +{ enum { ret = false }; }; + +template struct is_diagonal > +{ enum { ret = true }; }; + +template struct is_diagonal > +{ enum { ret = true }; }; + +template struct is_diagonal > +{ enum { ret = true }; }; + +template struct glue_shapes; +template<> struct glue_shapes { typedef TriangularShape type; }; + +template +bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if::ret&&has_direct_access::ret, T1>::type * = 0) +{ + return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride()); +} + +template +bool is_same_dense(const T1 &, const T2 &, typename enable_if::ret&&has_direct_access::ret), T1>::type * = 0) +{ + return false; +} + +// Internal helper defining the cost of a scalar division for the type T. +// The default heuristic can be specialized for each scalar type and architecture. +template +struct scalar_div_cost { + enum { value = 8*NumTraits::MulCost }; +}; + +template +struct scalar_div_cost, Vectorized> { + enum { value = 2*scalar_div_cost::value + + 6*NumTraits::MulCost + + 3*NumTraits::AddCost + }; +}; + + +template +struct scalar_div_cost::type> { enum { value = 24 }; }; +template +struct scalar_div_cost::type> { enum { value = 21 }; }; + + +#ifdef EIGEN_DEBUG_ASSIGN +std::string demangle_traversal(int t) +{ + if(t==DefaultTraversal) return "DefaultTraversal"; + if(t==LinearTraversal) return "LinearTraversal"; + if(t==InnerVectorizedTraversal) return "InnerVectorizedTraversal"; + if(t==LinearVectorizedTraversal) return "LinearVectorizedTraversal"; + if(t==SliceVectorizedTraversal) return "SliceVectorizedTraversal"; + return "?"; +} +std::string demangle_unrolling(int t) +{ + if(t==NoUnrolling) return "NoUnrolling"; + if(t==InnerUnrolling) return "InnerUnrolling"; + if(t==CompleteUnrolling) return "CompleteUnrolling"; + return "?"; +} +std::string demangle_flags(int f) +{ + std::string res; + if(f&RowMajorBit) res += " | RowMajor"; + if(f&PacketAccessBit) res += " | Packet"; + if(f&LinearAccessBit) res += " | Linear"; + if(f&LvalueBit) res += " | Lvalue"; + if(f&DirectAccessBit) res += " | Direct"; + if(f&NestByRefBit) res += " | NestByRef"; + if(f&NoPreferredStorageOrderBit) res += " | NoPreferredStorageOrderBit"; + + return res; +} +#endif + +} // end namespace internal + + +/** \class ScalarBinaryOpTraits + * \ingroup Core_Module + * + * \brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is. + * + * This class permits to control the scalar return type of any binary operation performed on two different scalar types through (partial) template specializations. + * + * For instance, let \c U1, \c U2 and \c U3 be three user defined scalar types for which most operations between instances of \c U1 and \c U2 returns an \c U3. + * You can let %Eigen knows that by defining: + \code + template + struct ScalarBinaryOpTraits { typedef U3 ReturnType; }; + template + struct ScalarBinaryOpTraits { typedef U3 ReturnType; }; + \endcode + * You can then explicitly disable some particular operations to get more explicit error messages: + \code + template<> + struct ScalarBinaryOpTraits > {}; + \endcode + * Or customize the return type for individual operation: + \code + template<> + struct ScalarBinaryOpTraits > { typedef U1 ReturnType; }; + \endcode + * + * By default, the following generic combinations are supported: + + + + + +
ScalarAScalarBBinaryOpReturnTypeNote
\c T \c T \c * \c T
\c NumTraits::Real \c T \c * \c T Only if \c NumTraits::IsComplex
\c T \c NumTraits::Real \c * \c T Only if \c NumTraits::IsComplex
+ * + * \sa CwiseBinaryOp + */ +template > +struct ScalarBinaryOpTraits +#ifndef EIGEN_PARSED_BY_DOXYGEN + // for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class. + : internal::scalar_product_traits +#endif // EIGEN_PARSED_BY_DOXYGEN +{}; + +template +struct ScalarBinaryOpTraits +{ + typedef T ReturnType; +}; + +template +struct ScalarBinaryOpTraits::IsComplex,T>::type>::Real, BinaryOp> +{ + typedef T ReturnType; +}; +template +struct ScalarBinaryOpTraits::IsComplex,T>::type>::Real, T, BinaryOp> +{ + typedef T ReturnType; +}; + +// For Matrix * Permutation +template +struct ScalarBinaryOpTraits +{ + typedef T ReturnType; +}; + +// For Permutation * Matrix +template +struct ScalarBinaryOpTraits +{ + typedef T ReturnType; +}; + +// for Permutation*Permutation +template +struct ScalarBinaryOpTraits +{ + typedef void ReturnType; +}; + +// We require Lhs and Rhs to have "compatible" scalar types. +// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths. +// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to +// add together a float matrix and a double matrix. +#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \ + EIGEN_STATIC_ASSERT((Eigen::internal::has_ReturnType >::value), \ + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + +} // end namespace Eigen + +#endif // EIGEN_XPRHELPER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h new file mode 100644 index 000000000..dc5fae06a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h @@ -0,0 +1,346 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Claire Maurice +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2010,2012 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H +#define EIGEN_COMPLEX_EIGEN_SOLVER_H + +#include "./ComplexSchur.h" + +namespace Eigen { + +/** \eigenvalues_module \ingroup Eigenvalues_Module + * + * + * \class ComplexEigenSolver + * + * \brief Computes eigenvalues and eigenvectors of general complex matrices + * + * \tparam _MatrixType the type of the matrix of which we are + * computing the eigendecomposition; this is expected to be an + * instantiation of the Matrix class template. + * + * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars + * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v + * \f$. If \f$ D \f$ is a diagonal matrix with the eigenvalues on + * the diagonal, and \f$ V \f$ is a matrix with the eigenvectors as + * its columns, then \f$ A V = V D \f$. The matrix \f$ V \f$ is + * almost always invertible, in which case we have \f$ A = V D V^{-1} + * \f$. This is called the eigendecomposition. + * + * The main function in this class is compute(), which computes the + * eigenvalues and eigenvectors of a given function. The + * documentation for that function contains an example showing the + * main features of the class. + * + * \sa class EigenSolver, class SelfAdjointEigenSolver + */ +template class ComplexEigenSolver +{ + public: + + /** \brief Synonym for the template parameter \p _MatrixType. */ + typedef _MatrixType MatrixType; + + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + Options = MatrixType::Options, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + /** \brief Scalar type for matrices of type #MatrixType. */ + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + + /** \brief Complex scalar type for #MatrixType. + * + * This is \c std::complex if #Scalar is real (e.g., + * \c float or \c double) and just \c Scalar if #Scalar is + * complex. + */ + typedef std::complex ComplexScalar; + + /** \brief Type for vector of eigenvalues as returned by eigenvalues(). + * + * This is a column vector with entries of type #ComplexScalar. + * The length of the vector is the size of #MatrixType. + */ + typedef Matrix EigenvalueType; + + /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). + * + * This is a square matrix with entries of type #ComplexScalar. + * The size is the same as the size of #MatrixType. + */ + typedef Matrix EigenvectorType; + + /** \brief Default constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via compute(). + */ + ComplexEigenSolver() + : m_eivec(), + m_eivalues(), + m_schur(), + m_isInitialized(false), + m_eigenvectorsOk(false), + m_matX() + {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa ComplexEigenSolver() + */ + explicit ComplexEigenSolver(Index size) + : m_eivec(size, size), + m_eivalues(size), + m_schur(size), + m_isInitialized(false), + m_eigenvectorsOk(false), + m_matX(size, size) + {} + + /** \brief Constructor; computes eigendecomposition of given matrix. + * + * \param[in] matrix Square matrix whose eigendecomposition is to be computed. + * \param[in] computeEigenvectors If true, both the eigenvectors and the + * eigenvalues are computed; if false, only the eigenvalues are + * computed. + * + * This constructor calls compute() to compute the eigendecomposition. + */ + template + explicit ComplexEigenSolver(const EigenBase& matrix, bool computeEigenvectors = true) + : m_eivec(matrix.rows(),matrix.cols()), + m_eivalues(matrix.cols()), + m_schur(matrix.rows()), + m_isInitialized(false), + m_eigenvectorsOk(false), + m_matX(matrix.rows(),matrix.cols()) + { + compute(matrix.derived(), computeEigenvectors); + } + + /** \brief Returns the eigenvectors of given matrix. + * + * \returns A const reference to the matrix whose columns are the eigenvectors. + * + * \pre Either the constructor + * ComplexEigenSolver(const MatrixType& matrix, bool) or the member + * function compute(const MatrixType& matrix, bool) has been called before + * to compute the eigendecomposition of a matrix, and + * \p computeEigenvectors was set to true (the default). + * + * This function returns a matrix whose columns are the eigenvectors. Column + * \f$ k \f$ is an eigenvector corresponding to eigenvalue number \f$ k + * \f$ as returned by eigenvalues(). The eigenvectors are normalized to + * have (Euclidean) norm equal to one. The matrix returned by this + * function is the matrix \f$ V \f$ in the eigendecomposition \f$ A = V D + * V^{-1} \f$, if it exists. + * + * Example: \include ComplexEigenSolver_eigenvectors.cpp + * Output: \verbinclude ComplexEigenSolver_eigenvectors.out + */ + const EigenvectorType& eigenvectors() const + { + eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + return m_eivec; + } + + /** \brief Returns the eigenvalues of given matrix. + * + * \returns A const reference to the column vector containing the eigenvalues. + * + * \pre Either the constructor + * ComplexEigenSolver(const MatrixType& matrix, bool) or the member + * function compute(const MatrixType& matrix, bool) has been called before + * to compute the eigendecomposition of a matrix. + * + * This function returns a column vector containing the + * eigenvalues. Eigenvalues are repeated according to their + * algebraic multiplicity, so there are as many eigenvalues as + * rows in the matrix. The eigenvalues are not sorted in any particular + * order. + * + * Example: \include ComplexEigenSolver_eigenvalues.cpp + * Output: \verbinclude ComplexEigenSolver_eigenvalues.out + */ + const EigenvalueType& eigenvalues() const + { + eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); + return m_eivalues; + } + + /** \brief Computes eigendecomposition of given matrix. + * + * \param[in] matrix Square matrix whose eigendecomposition is to be computed. + * \param[in] computeEigenvectors If true, both the eigenvectors and the + * eigenvalues are computed; if false, only the eigenvalues are + * computed. + * \returns Reference to \c *this + * + * This function computes the eigenvalues of the complex matrix \p matrix. + * The eigenvalues() function can be used to retrieve them. If + * \p computeEigenvectors is true, then the eigenvectors are also computed + * and can be retrieved by calling eigenvectors(). + * + * The matrix is first reduced to Schur form using the + * ComplexSchur class. The Schur decomposition is then used to + * compute the eigenvalues and eigenvectors. + * + * The cost of the computation is dominated by the cost of the + * Schur decomposition, which is \f$ O(n^3) \f$ where \f$ n \f$ + * is the size of the matrix. + * + * Example: \include ComplexEigenSolver_compute.cpp + * Output: \verbinclude ComplexEigenSolver_compute.out + */ + template + ComplexEigenSolver& compute(const EigenBase& matrix, bool computeEigenvectors = true); + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, \c NoConvergence otherwise. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); + return m_schur.info(); + } + + /** \brief Sets the maximum number of iterations allowed. */ + ComplexEigenSolver& setMaxIterations(Index maxIters) + { + m_schur.setMaxIterations(maxIters); + return *this; + } + + /** \brief Returns the maximum number of iterations. */ + Index getMaxIterations() + { + return m_schur.getMaxIterations(); + } + + protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + EigenvectorType m_eivec; + EigenvalueType m_eivalues; + ComplexSchur m_schur; + bool m_isInitialized; + bool m_eigenvectorsOk; + EigenvectorType m_matX; + + private: + void doComputeEigenvectors(RealScalar matrixnorm); + void sortEigenvalues(bool computeEigenvectors); +}; + + +template +template +ComplexEigenSolver& +ComplexEigenSolver::compute(const EigenBase& matrix, bool computeEigenvectors) +{ + check_template_parameters(); + + // this code is inspired from Jampack + eigen_assert(matrix.cols() == matrix.rows()); + + // Do a complex Schur decomposition, A = U T U^* + // The eigenvalues are on the diagonal of T. + m_schur.compute(matrix.derived(), computeEigenvectors); + + if(m_schur.info() == Success) + { + m_eivalues = m_schur.matrixT().diagonal(); + if(computeEigenvectors) + doComputeEigenvectors(m_schur.matrixT().norm()); + sortEigenvalues(computeEigenvectors); + } + + m_isInitialized = true; + m_eigenvectorsOk = computeEigenvectors; + return *this; +} + + +template +void ComplexEigenSolver::doComputeEigenvectors(RealScalar matrixnorm) +{ + const Index n = m_eivalues.size(); + + matrixnorm = numext::maxi(matrixnorm,(std::numeric_limits::min)()); + + // Compute X such that T = X D X^(-1), where D is the diagonal of T. + // The matrix X is unit triangular. + m_matX = EigenvectorType::Zero(n, n); + for(Index k=n-1 ; k>=0 ; k--) + { + m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0); + // Compute X(i,k) using the (i,k) entry of the equation X T = D X + for(Index i=k-1 ; i>=0 ; i--) + { + m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k); + if(k-i-1>0) + m_matX.coeffRef(i,k) -= (m_schur.matrixT().row(i).segment(i+1,k-i-1) * m_matX.col(k).segment(i+1,k-i-1)).value(); + ComplexScalar z = m_schur.matrixT().coeff(i,i) - m_schur.matrixT().coeff(k,k); + if(z==ComplexScalar(0)) + { + // If the i-th and k-th eigenvalue are equal, then z equals 0. + // Use a small value instead, to prevent division by zero. + numext::real_ref(z) = NumTraits::epsilon() * matrixnorm; + } + m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z; + } + } + + // Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1) + m_eivec.noalias() = m_schur.matrixU() * m_matX; + // .. and normalize the eigenvectors + for(Index k=0 ; k +void ComplexEigenSolver::sortEigenvalues(bool computeEigenvectors) +{ + const Index n = m_eivalues.size(); + for (Index i=0; i +// Copyright (C) 2010,2012 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_SCHUR_H +#define EIGEN_COMPLEX_SCHUR_H + +#include "./HessenbergDecomposition.h" + +namespace Eigen { + +namespace internal { +template struct complex_schur_reduce_to_hessenberg; +} + +/** \eigenvalues_module \ingroup Eigenvalues_Module + * + * + * \class ComplexSchur + * + * \brief Performs a complex Schur decomposition of a real or complex square matrix + * + * \tparam _MatrixType the type of the matrix of which we are + * computing the Schur decomposition; this is expected to be an + * instantiation of the Matrix class template. + * + * Given a real or complex square matrix A, this class computes the + * Schur decomposition: \f$ A = U T U^*\f$ where U is a unitary + * complex matrix, and T is a complex upper triangular matrix. The + * diagonal of the matrix T corresponds to the eigenvalues of the + * matrix A. + * + * Call the function compute() to compute the Schur decomposition of + * a given matrix. Alternatively, you can use the + * ComplexSchur(const MatrixType&, bool) constructor which computes + * the Schur decomposition at construction time. Once the + * decomposition is computed, you can use the matrixU() and matrixT() + * functions to retrieve the matrices U and V in the decomposition. + * + * \note This code is inspired from Jampack + * + * \sa class RealSchur, class EigenSolver, class ComplexEigenSolver + */ +template class ComplexSchur +{ + public: + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + Options = MatrixType::Options, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + /** \brief Scalar type for matrices of type \p _MatrixType. */ + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + + /** \brief Complex scalar type for \p _MatrixType. + * + * This is \c std::complex if #Scalar is real (e.g., + * \c float or \c double) and just \c Scalar if #Scalar is + * complex. + */ + typedef std::complex ComplexScalar; + + /** \brief Type for the matrices in the Schur decomposition. + * + * This is a square matrix with entries of type #ComplexScalar. + * The size is the same as the size of \p _MatrixType. + */ + typedef Matrix ComplexMatrixType; + + /** \brief Default constructor. + * + * \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed. + * + * The default constructor is useful in cases in which the user + * intends to perform decompositions via compute(). The \p size + * parameter is only used as a hint. It is not an error to give a + * wrong \p size, but it may impair performance. + * + * \sa compute() for an example. + */ + explicit ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) + : m_matT(size,size), + m_matU(size,size), + m_hess(size), + m_isInitialized(false), + m_matUisUptodate(false), + m_maxIters(-1) + {} + + /** \brief Constructor; computes Schur decomposition of given matrix. + * + * \param[in] matrix Square matrix whose Schur decomposition is to be computed. + * \param[in] computeU If true, both T and U are computed; if false, only T is computed. + * + * This constructor calls compute() to compute the Schur decomposition. + * + * \sa matrixT() and matrixU() for examples. + */ + template + explicit ComplexSchur(const EigenBase& matrix, bool computeU = true) + : m_matT(matrix.rows(),matrix.cols()), + m_matU(matrix.rows(),matrix.cols()), + m_hess(matrix.rows()), + m_isInitialized(false), + m_matUisUptodate(false), + m_maxIters(-1) + { + compute(matrix.derived(), computeU); + } + + /** \brief Returns the unitary matrix in the Schur decomposition. + * + * \returns A const reference to the matrix U. + * + * It is assumed that either the constructor + * ComplexSchur(const MatrixType& matrix, bool computeU) or the + * member function compute(const MatrixType& matrix, bool computeU) + * has been called before to compute the Schur decomposition of a + * matrix, and that \p computeU was set to true (the default + * value). + * + * Example: \include ComplexSchur_matrixU.cpp + * Output: \verbinclude ComplexSchur_matrixU.out + */ + const ComplexMatrixType& matrixU() const + { + eigen_assert(m_isInitialized && "ComplexSchur is not initialized."); + eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the ComplexSchur decomposition."); + return m_matU; + } + + /** \brief Returns the triangular matrix in the Schur decomposition. + * + * \returns A const reference to the matrix T. + * + * It is assumed that either the constructor + * ComplexSchur(const MatrixType& matrix, bool computeU) or the + * member function compute(const MatrixType& matrix, bool computeU) + * has been called before to compute the Schur decomposition of a + * matrix. + * + * Note that this function returns a plain square matrix. If you want to reference + * only the upper triangular part, use: + * \code schur.matrixT().triangularView() \endcode + * + * Example: \include ComplexSchur_matrixT.cpp + * Output: \verbinclude ComplexSchur_matrixT.out + */ + const ComplexMatrixType& matrixT() const + { + eigen_assert(m_isInitialized && "ComplexSchur is not initialized."); + return m_matT; + } + + /** \brief Computes Schur decomposition of given matrix. + * + * \param[in] matrix Square matrix whose Schur decomposition is to be computed. + * \param[in] computeU If true, both T and U are computed; if false, only T is computed. + + * \returns Reference to \c *this + * + * The Schur decomposition is computed by first reducing the + * matrix to Hessenberg form using the class + * HessenbergDecomposition. The Hessenberg matrix is then reduced + * to triangular form by performing QR iterations with a single + * shift. The cost of computing the Schur decomposition depends + * on the number of iterations; as a rough guide, it may be taken + * on the number of iterations; as a rough guide, it may be taken + * to be \f$25n^3\f$ complex flops, or \f$10n^3\f$ complex flops + * if \a computeU is false. + * + * Example: \include ComplexSchur_compute.cpp + * Output: \verbinclude ComplexSchur_compute.out + * + * \sa compute(const MatrixType&, bool, Index) + */ + template + ComplexSchur& compute(const EigenBase& matrix, bool computeU = true); + + /** \brief Compute Schur decomposition from a given Hessenberg matrix + * \param[in] matrixH Matrix in Hessenberg form H + * \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T + * \param computeU Computes the matriX U of the Schur vectors + * \return Reference to \c *this + * + * This routine assumes that the matrix is already reduced in Hessenberg form matrixH + * using either the class HessenbergDecomposition or another mean. + * It computes the upper quasi-triangular matrix T of the Schur decomposition of H + * When computeU is true, this routine computes the matrix U such that + * A = U T U^T = (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix + * + * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix + * is not available, the user should give an identity matrix (Q.setIdentity()) + * + * \sa compute(const MatrixType&, bool) + */ + template + ComplexSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU=true); + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, \c NoConvergence otherwise. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "ComplexSchur is not initialized."); + return m_info; + } + + /** \brief Sets the maximum number of iterations allowed. + * + * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size + * of the matrix. + */ + ComplexSchur& setMaxIterations(Index maxIters) + { + m_maxIters = maxIters; + return *this; + } + + /** \brief Returns the maximum number of iterations. */ + Index getMaxIterations() + { + return m_maxIters; + } + + /** \brief Maximum number of iterations per row. + * + * If not otherwise specified, the maximum number of iterations is this number times the size of the + * matrix. It is currently set to 30. + */ + static const int m_maxIterationsPerRow = 30; + + protected: + ComplexMatrixType m_matT, m_matU; + HessenbergDecomposition m_hess; + ComputationInfo m_info; + bool m_isInitialized; + bool m_matUisUptodate; + Index m_maxIters; + + private: + bool subdiagonalEntryIsNeglegible(Index i); + ComplexScalar computeShift(Index iu, Index iter); + void reduceToTriangularForm(bool computeU); + friend struct internal::complex_schur_reduce_to_hessenberg::IsComplex>; +}; + +/** If m_matT(i+1,i) is neglegible in floating point arithmetic + * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and + * return true, else return false. */ +template +inline bool ComplexSchur::subdiagonalEntryIsNeglegible(Index i) +{ + RealScalar d = numext::norm1(m_matT.coeff(i,i)) + numext::norm1(m_matT.coeff(i+1,i+1)); + RealScalar sd = numext::norm1(m_matT.coeff(i+1,i)); + if (internal::isMuchSmallerThan(sd, d, NumTraits::epsilon())) + { + m_matT.coeffRef(i+1,i) = ComplexScalar(0); + return true; + } + return false; +} + + +/** Compute the shift in the current QR iteration. */ +template +typename ComplexSchur::ComplexScalar ComplexSchur::computeShift(Index iu, Index iter) +{ + using std::abs; + if (iter == 10 || iter == 20) + { + // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f + return abs(numext::real(m_matT.coeff(iu,iu-1))) + abs(numext::real(m_matT.coeff(iu-1,iu-2))); + } + + // compute the shift as one of the eigenvalues of t, the 2x2 + // diagonal block on the bottom of the active submatrix + Matrix t = m_matT.template block<2,2>(iu-1,iu-1); + RealScalar normt = t.cwiseAbs().sum(); + t /= normt; // the normalization by sf is to avoid under/overflow + + ComplexScalar b = t.coeff(0,1) * t.coeff(1,0); + ComplexScalar c = t.coeff(0,0) - t.coeff(1,1); + ComplexScalar disc = sqrt(c*c + RealScalar(4)*b); + ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b; + ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1); + ComplexScalar eival1 = (trace + disc) / RealScalar(2); + ComplexScalar eival2 = (trace - disc) / RealScalar(2); + + if(numext::norm1(eival1) > numext::norm1(eival2)) + eival2 = det / eival1; + else + eival1 = det / eival2; + + // choose the eigenvalue closest to the bottom entry of the diagonal + if(numext::norm1(eival1-t.coeff(1,1)) < numext::norm1(eival2-t.coeff(1,1))) + return normt * eival1; + else + return normt * eival2; +} + + +template +template +ComplexSchur& ComplexSchur::compute(const EigenBase& matrix, bool computeU) +{ + m_matUisUptodate = false; + eigen_assert(matrix.cols() == matrix.rows()); + + if(matrix.cols() == 1) + { + m_matT = matrix.derived().template cast(); + if(computeU) m_matU = ComplexMatrixType::Identity(1,1); + m_info = Success; + m_isInitialized = true; + m_matUisUptodate = computeU; + return *this; + } + + internal::complex_schur_reduce_to_hessenberg::IsComplex>::run(*this, matrix.derived(), computeU); + computeFromHessenberg(m_matT, m_matU, computeU); + return *this; +} + +template +template +ComplexSchur& ComplexSchur::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU) +{ + m_matT = matrixH; + if(computeU) + m_matU = matrixQ; + reduceToTriangularForm(computeU); + return *this; +} +namespace internal { + +/* Reduce given matrix to Hessenberg form */ +template +struct complex_schur_reduce_to_hessenberg +{ + // this is the implementation for the case IsComplex = true + static void run(ComplexSchur& _this, const MatrixType& matrix, bool computeU) + { + _this.m_hess.compute(matrix); + _this.m_matT = _this.m_hess.matrixH(); + if(computeU) _this.m_matU = _this.m_hess.matrixQ(); + } +}; + +template +struct complex_schur_reduce_to_hessenberg +{ + static void run(ComplexSchur& _this, const MatrixType& matrix, bool computeU) + { + typedef typename ComplexSchur::ComplexScalar ComplexScalar; + + // Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar + _this.m_hess.compute(matrix); + _this.m_matT = _this.m_hess.matrixH().template cast(); + if(computeU) + { + // This may cause an allocation which seems to be avoidable + MatrixType Q = _this.m_hess.matrixQ(); + _this.m_matU = Q.template cast(); + } + } +}; + +} // end namespace internal + +// Reduce the Hessenberg matrix m_matT to triangular form by QR iteration. +template +void ComplexSchur::reduceToTriangularForm(bool computeU) +{ + Index maxIters = m_maxIters; + if (maxIters == -1) + maxIters = m_maxIterationsPerRow * m_matT.rows(); + + // The matrix m_matT is divided in three parts. + // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. + // Rows il,...,iu is the part we are working on (the active submatrix). + // Rows iu+1,...,end are already brought in triangular form. + Index iu = m_matT.cols() - 1; + Index il; + Index iter = 0; // number of iterations we are working on the (iu,iu) element + Index totalIter = 0; // number of iterations for whole matrix + + while(true) + { + // find iu, the bottom row of the active submatrix + while(iu > 0) + { + if(!subdiagonalEntryIsNeglegible(iu-1)) break; + iter = 0; + --iu; + } + + // if iu is zero then we are done; the whole matrix is triangularized + if(iu==0) break; + + // if we spent too many iterations, we give up + iter++; + totalIter++; + if(totalIter > maxIters) break; + + // find il, the top row of the active submatrix + il = iu-1; + while(il > 0 && !subdiagonalEntryIsNeglegible(il-1)) + { + --il; + } + + /* perform the QR step using Givens rotations. The first rotation + creates a bulge; the (il+2,il) element becomes nonzero. This + bulge is chased down to the bottom of the active submatrix. */ + + ComplexScalar shift = computeShift(iu, iter); + JacobiRotation rot; + rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il)); + m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint()); + m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot); + if(computeU) m_matU.applyOnTheRight(il, il+1, rot); + + for(Index i=il+1 ; i template inline \ +ComplexSchur >& \ +ComplexSchur >::compute(const EigenBase& matrix, bool computeU) \ +{ \ + typedef Matrix MatrixType; \ + typedef MatrixType::RealScalar RealScalar; \ + typedef std::complex ComplexScalar; \ +\ + eigen_assert(matrix.cols() == matrix.rows()); \ +\ + m_matUisUptodate = false; \ + if(matrix.cols() == 1) \ + { \ + m_matT = matrix.derived().template cast(); \ + if(computeU) m_matU = ComplexMatrixType::Identity(1,1); \ + m_info = Success; \ + m_isInitialized = true; \ + m_matUisUptodate = computeU; \ + return *this; \ + } \ + lapack_int n = internal::convert_index(matrix.cols()), sdim, info; \ + lapack_int matrix_order = LAPACKE_COLROW; \ + char jobvs, sort='N'; \ + LAPACK_##LAPACKE_PREFIX_U##_SELECT1 select = 0; \ + jobvs = (computeU) ? 'V' : 'N'; \ + m_matU.resize(n, n); \ + lapack_int ldvs = internal::convert_index(m_matU.outerStride()); \ + m_matT = matrix; \ + lapack_int lda = internal::convert_index(m_matT.outerStride()); \ + Matrix w; \ + w.resize(n, 1);\ + info = LAPACKE_##LAPACKE_PREFIX##gees( matrix_order, jobvs, sort, select, n, (LAPACKE_TYPE*)m_matT.data(), lda, &sdim, (LAPACKE_TYPE*)w.data(), (LAPACKE_TYPE*)m_matU.data(), ldvs ); \ + if(info == 0) \ + m_info = Success; \ + else \ + m_info = NoConvergence; \ +\ + m_isInitialized = true; \ + m_matUisUptodate = computeU; \ + return *this; \ +\ +} + +EIGEN_LAPACKE_SCHUR_COMPLEX(dcomplex, lapack_complex_double, z, Z, ColMajor, LAPACK_COL_MAJOR) +EIGEN_LAPACKE_SCHUR_COMPLEX(scomplex, lapack_complex_float, c, C, ColMajor, LAPACK_COL_MAJOR) +EIGEN_LAPACKE_SCHUR_COMPLEX(dcomplex, lapack_complex_double, z, Z, RowMajor, LAPACK_ROW_MAJOR) +EIGEN_LAPACKE_SCHUR_COMPLEX(scomplex, lapack_complex_float, c, C, RowMajor, LAPACK_ROW_MAJOR) + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_SCHUR_LAPACKE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/EigenSolver.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/EigenSolver.h new file mode 100644 index 000000000..f205b185d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/EigenSolver.h @@ -0,0 +1,622 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2010,2012 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EIGENSOLVER_H +#define EIGEN_EIGENSOLVER_H + +#include "./RealSchur.h" + +namespace Eigen { + +/** \eigenvalues_module \ingroup Eigenvalues_Module + * + * + * \class EigenSolver + * + * \brief Computes eigenvalues and eigenvectors of general matrices + * + * \tparam _MatrixType the type of the matrix of which we are computing the + * eigendecomposition; this is expected to be an instantiation of the Matrix + * class template. Currently, only real matrices are supported. + * + * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars + * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v \f$. If + * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and + * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V = + * V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we + * have \f$ A = V D V^{-1} \f$. This is called the eigendecomposition. + * + * The eigenvalues and eigenvectors of a matrix may be complex, even when the + * matrix is real. However, we can choose real matrices \f$ V \f$ and \f$ D + * \f$ satisfying \f$ A V = V D \f$, just like the eigendecomposition, if the + * matrix \f$ D \f$ is not required to be diagonal, but if it is allowed to + * have blocks of the form + * \f[ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f] + * (where \f$ u \f$ and \f$ v \f$ are real numbers) on the diagonal. These + * blocks correspond to complex eigenvalue pairs \f$ u \pm iv \f$. We call + * this variant of the eigendecomposition the pseudo-eigendecomposition. + * + * Call the function compute() to compute the eigenvalues and eigenvectors of + * a given matrix. Alternatively, you can use the + * EigenSolver(const MatrixType&, bool) constructor which computes the + * eigenvalues and eigenvectors at construction time. Once the eigenvalue and + * eigenvectors are computed, they can be retrieved with the eigenvalues() and + * eigenvectors() functions. The pseudoEigenvalueMatrix() and + * pseudoEigenvectors() methods allow the construction of the + * pseudo-eigendecomposition. + * + * The documentation for EigenSolver(const MatrixType&, bool) contains an + * example of the typical use of this class. + * + * \note The implementation is adapted from + * JAMA (public domain). + * Their code is based on EISPACK. + * + * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver + */ +template class EigenSolver +{ + public: + + /** \brief Synonym for the template parameter \p _MatrixType. */ + typedef _MatrixType MatrixType; + + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + Options = MatrixType::Options, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + /** \brief Scalar type for matrices of type #MatrixType. */ + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + + /** \brief Complex scalar type for #MatrixType. + * + * This is \c std::complex if #Scalar is real (e.g., + * \c float or \c double) and just \c Scalar if #Scalar is + * complex. + */ + typedef std::complex ComplexScalar; + + /** \brief Type for vector of eigenvalues as returned by eigenvalues(). + * + * This is a column vector with entries of type #ComplexScalar. + * The length of the vector is the size of #MatrixType. + */ + typedef Matrix EigenvalueType; + + /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). + * + * This is a square matrix with entries of type #ComplexScalar. + * The size is the same as the size of #MatrixType. + */ + typedef Matrix EigenvectorsType; + + /** \brief Default constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via EigenSolver::compute(const MatrixType&, bool). + * + * \sa compute() for an example. + */ + EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {} + + /** \brief Default constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa EigenSolver() + */ + explicit EigenSolver(Index size) + : m_eivec(size, size), + m_eivalues(size), + m_isInitialized(false), + m_eigenvectorsOk(false), + m_realSchur(size), + m_matT(size, size), + m_tmp(size) + {} + + /** \brief Constructor; computes eigendecomposition of given matrix. + * + * \param[in] matrix Square matrix whose eigendecomposition is to be computed. + * \param[in] computeEigenvectors If true, both the eigenvectors and the + * eigenvalues are computed; if false, only the eigenvalues are + * computed. + * + * This constructor calls compute() to compute the eigenvalues + * and eigenvectors. + * + * Example: \include EigenSolver_EigenSolver_MatrixType.cpp + * Output: \verbinclude EigenSolver_EigenSolver_MatrixType.out + * + * \sa compute() + */ + template + explicit EigenSolver(const EigenBase& matrix, bool computeEigenvectors = true) + : m_eivec(matrix.rows(), matrix.cols()), + m_eivalues(matrix.cols()), + m_isInitialized(false), + m_eigenvectorsOk(false), + m_realSchur(matrix.cols()), + m_matT(matrix.rows(), matrix.cols()), + m_tmp(matrix.cols()) + { + compute(matrix.derived(), computeEigenvectors); + } + + /** \brief Returns the eigenvectors of given matrix. + * + * \returns %Matrix whose columns are the (possibly complex) eigenvectors. + * + * \pre Either the constructor + * EigenSolver(const MatrixType&,bool) or the member function + * compute(const MatrixType&, bool) has been called before, and + * \p computeEigenvectors was set to true (the default). + * + * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding + * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The + * eigenvectors are normalized to have (Euclidean) norm equal to one. The + * matrix returned by this function is the matrix \f$ V \f$ in the + * eigendecomposition \f$ A = V D V^{-1} \f$, if it exists. + * + * Example: \include EigenSolver_eigenvectors.cpp + * Output: \verbinclude EigenSolver_eigenvectors.out + * + * \sa eigenvalues(), pseudoEigenvectors() + */ + EigenvectorsType eigenvectors() const; + + /** \brief Returns the pseudo-eigenvectors of given matrix. + * + * \returns Const reference to matrix whose columns are the pseudo-eigenvectors. + * + * \pre Either the constructor + * EigenSolver(const MatrixType&,bool) or the member function + * compute(const MatrixType&, bool) has been called before, and + * \p computeEigenvectors was set to true (the default). + * + * The real matrix \f$ V \f$ returned by this function and the + * block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix() + * satisfy \f$ AV = VD \f$. + * + * Example: \include EigenSolver_pseudoEigenvectors.cpp + * Output: \verbinclude EigenSolver_pseudoEigenvectors.out + * + * \sa pseudoEigenvalueMatrix(), eigenvectors() + */ + const MatrixType& pseudoEigenvectors() const + { + eigen_assert(m_isInitialized && "EigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + return m_eivec; + } + + /** \brief Returns the block-diagonal matrix in the pseudo-eigendecomposition. + * + * \returns A block-diagonal matrix. + * + * \pre Either the constructor + * EigenSolver(const MatrixType&,bool) or the member function + * compute(const MatrixType&, bool) has been called before. + * + * The matrix \f$ D \f$ returned by this function is real and + * block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2 + * blocks of the form + * \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$. + * These blocks are not sorted in any particular order. + * The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by + * pseudoEigenvectors() satisfy \f$ AV = VD \f$. + * + * \sa pseudoEigenvectors() for an example, eigenvalues() + */ + MatrixType pseudoEigenvalueMatrix() const; + + /** \brief Returns the eigenvalues of given matrix. + * + * \returns A const reference to the column vector containing the eigenvalues. + * + * \pre Either the constructor + * EigenSolver(const MatrixType&,bool) or the member function + * compute(const MatrixType&, bool) has been called before. + * + * The eigenvalues are repeated according to their algebraic multiplicity, + * so there are as many eigenvalues as rows in the matrix. The eigenvalues + * are not sorted in any particular order. + * + * Example: \include EigenSolver_eigenvalues.cpp + * Output: \verbinclude EigenSolver_eigenvalues.out + * + * \sa eigenvectors(), pseudoEigenvalueMatrix(), + * MatrixBase::eigenvalues() + */ + const EigenvalueType& eigenvalues() const + { + eigen_assert(m_isInitialized && "EigenSolver is not initialized."); + return m_eivalues; + } + + /** \brief Computes eigendecomposition of given matrix. + * + * \param[in] matrix Square matrix whose eigendecomposition is to be computed. + * \param[in] computeEigenvectors If true, both the eigenvectors and the + * eigenvalues are computed; if false, only the eigenvalues are + * computed. + * \returns Reference to \c *this + * + * This function computes the eigenvalues of the real matrix \p matrix. + * The eigenvalues() function can be used to retrieve them. If + * \p computeEigenvectors is true, then the eigenvectors are also computed + * and can be retrieved by calling eigenvectors(). + * + * The matrix is first reduced to real Schur form using the RealSchur + * class. The Schur decomposition is then used to compute the eigenvalues + * and eigenvectors. + * + * The cost of the computation is dominated by the cost of the + * Schur decomposition, which is very approximately \f$ 25n^3 \f$ + * (where \f$ n \f$ is the size of the matrix) if \p computeEigenvectors + * is true, and \f$ 10n^3 \f$ if \p computeEigenvectors is false. + * + * This method reuses of the allocated data in the EigenSolver object. + * + * Example: \include EigenSolver_compute.cpp + * Output: \verbinclude EigenSolver_compute.out + */ + template + EigenSolver& compute(const EigenBase& matrix, bool computeEigenvectors = true); + + /** \returns NumericalIssue if the input contains INF or NaN values or overflow occured. Returns Success otherwise. */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "EigenSolver is not initialized."); + return m_info; + } + + /** \brief Sets the maximum number of iterations allowed. */ + EigenSolver& setMaxIterations(Index maxIters) + { + m_realSchur.setMaxIterations(maxIters); + return *this; + } + + /** \brief Returns the maximum number of iterations. */ + Index getMaxIterations() + { + return m_realSchur.getMaxIterations(); + } + + private: + void doComputeEigenvectors(); + + protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL); + } + + MatrixType m_eivec; + EigenvalueType m_eivalues; + bool m_isInitialized; + bool m_eigenvectorsOk; + ComputationInfo m_info; + RealSchur m_realSchur; + MatrixType m_matT; + + typedef Matrix ColumnVectorType; + ColumnVectorType m_tmp; +}; + +template +MatrixType EigenSolver::pseudoEigenvalueMatrix() const +{ + eigen_assert(m_isInitialized && "EigenSolver is not initialized."); + const RealScalar precision = RealScalar(2)*NumTraits::epsilon(); + Index n = m_eivalues.rows(); + MatrixType matD = MatrixType::Zero(n,n); + for (Index i=0; i(i,i) << numext::real(m_eivalues.coeff(i)), numext::imag(m_eivalues.coeff(i)), + -numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i)); + ++i; + } + } + return matD; +} + +template +typename EigenSolver::EigenvectorsType EigenSolver::eigenvectors() const +{ + eigen_assert(m_isInitialized && "EigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + const RealScalar precision = RealScalar(2)*NumTraits::epsilon(); + Index n = m_eivec.cols(); + EigenvectorsType matV(n,n); + for (Index j=0; j(); + matV.col(j).normalize(); + } + else + { + // we have a pair of complex eigen values + for (Index i=0; i +template +EigenSolver& +EigenSolver::compute(const EigenBase& matrix, bool computeEigenvectors) +{ + check_template_parameters(); + + using std::sqrt; + using std::abs; + using numext::isfinite; + eigen_assert(matrix.cols() == matrix.rows()); + + // Reduce to real Schur form. + m_realSchur.compute(matrix.derived(), computeEigenvectors); + + m_info = m_realSchur.info(); + + if (m_info == Success) + { + m_matT = m_realSchur.matrixT(); + if (computeEigenvectors) + m_eivec = m_realSchur.matrixU(); + + // Compute eigenvalues from matT + m_eivalues.resize(matrix.cols()); + Index i = 0; + while (i < matrix.cols()) + { + if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0)) + { + m_eivalues.coeffRef(i) = m_matT.coeff(i, i); + if(!(isfinite)(m_eivalues.coeffRef(i))) + { + m_isInitialized = true; + m_eigenvectorsOk = false; + m_info = NumericalIssue; + return *this; + } + ++i; + } + else + { + Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1)); + Scalar z; + // Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); + // without overflow + { + Scalar t0 = m_matT.coeff(i+1, i); + Scalar t1 = m_matT.coeff(i, i+1); + Scalar maxval = numext::maxi(abs(p),numext::maxi(abs(t0),abs(t1))); + t0 /= maxval; + t1 /= maxval; + Scalar p0 = p/maxval; + z = maxval * sqrt(abs(p0 * p0 + t0 * t1)); + } + + m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z); + m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z); + if(!((isfinite)(m_eivalues.coeffRef(i)) && (isfinite)(m_eivalues.coeffRef(i+1)))) + { + m_isInitialized = true; + m_eigenvectorsOk = false; + m_info = NumericalIssue; + return *this; + } + i += 2; + } + } + + // Compute eigenvectors. + if (computeEigenvectors) + doComputeEigenvectors(); + } + + m_isInitialized = true; + m_eigenvectorsOk = computeEigenvectors; + + return *this; +} + + +template +void EigenSolver::doComputeEigenvectors() +{ + using std::abs; + const Index size = m_eivec.cols(); + const Scalar eps = NumTraits::epsilon(); + + // inefficient! this is already computed in RealSchur + Scalar norm(0); + for (Index j = 0; j < size; ++j) + { + norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum(); + } + + // Backsubstitute to find vectors of upper triangular form + if (norm == Scalar(0)) + { + return; + } + + for (Index n = size-1; n >= 0; n--) + { + Scalar p = m_eivalues.coeff(n).real(); + Scalar q = m_eivalues.coeff(n).imag(); + + // Scalar vector + if (q == Scalar(0)) + { + Scalar lastr(0), lastw(0); + Index l = n; + + m_matT.coeffRef(n,n) = Scalar(1); + for (Index i = n-1; i >= 0; i--) + { + Scalar w = m_matT.coeff(i,i) - p; + Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1)); + + if (m_eivalues.coeff(i).imag() < Scalar(0)) + { + lastw = w; + lastr = r; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == Scalar(0)) + { + if (w != Scalar(0)) + m_matT.coeffRef(i,n) = -r / w; + else + m_matT.coeffRef(i,n) = -r / (eps * norm); + } + else // Solve real equations + { + Scalar x = m_matT.coeff(i,i+1); + Scalar y = m_matT.coeff(i+1,i); + Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); + Scalar t = (x * lastr - lastw * r) / denom; + m_matT.coeffRef(i,n) = t; + if (abs(x) > abs(lastw)) + m_matT.coeffRef(i+1,n) = (-r - w * t) / x; + else + m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw; + } + + // Overflow control + Scalar t = abs(m_matT.coeff(i,n)); + if ((eps * t) * t > Scalar(1)) + m_matT.col(n).tail(size-i) /= t; + } + } + } + else if (q < Scalar(0) && n > 0) // Complex vector + { + Scalar lastra(0), lastsa(0), lastw(0); + Index l = n-1; + + // Last vector component imaginary so matrix is triangular + if (abs(m_matT.coeff(n,n-1)) > abs(m_matT.coeff(n-1,n))) + { + m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1); + m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1); + } + else + { + ComplexScalar cc = ComplexScalar(Scalar(0),-m_matT.coeff(n-1,n)) / ComplexScalar(m_matT.coeff(n-1,n-1)-p,q); + m_matT.coeffRef(n-1,n-1) = numext::real(cc); + m_matT.coeffRef(n-1,n) = numext::imag(cc); + } + m_matT.coeffRef(n,n-1) = Scalar(0); + m_matT.coeffRef(n,n) = Scalar(1); + for (Index i = n-2; i >= 0; i--) + { + Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1)); + Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1)); + Scalar w = m_matT.coeff(i,i) - p; + + if (m_eivalues.coeff(i).imag() < Scalar(0)) + { + lastw = w; + lastra = ra; + lastsa = sa; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == RealScalar(0)) + { + ComplexScalar cc = ComplexScalar(-ra,-sa) / ComplexScalar(w,q); + m_matT.coeffRef(i,n-1) = numext::real(cc); + m_matT.coeffRef(i,n) = numext::imag(cc); + } + else + { + // Solve complex equations + Scalar x = m_matT.coeff(i,i+1); + Scalar y = m_matT.coeff(i+1,i); + Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; + Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; + if ((vr == Scalar(0)) && (vi == Scalar(0))) + vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw)); + + ComplexScalar cc = ComplexScalar(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra) / ComplexScalar(vr,vi); + m_matT.coeffRef(i,n-1) = numext::real(cc); + m_matT.coeffRef(i,n) = numext::imag(cc); + if (abs(x) > (abs(lastw) + abs(q))) + { + m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x; + m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x; + } + else + { + cc = ComplexScalar(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n)) / ComplexScalar(lastw,q); + m_matT.coeffRef(i+1,n-1) = numext::real(cc); + m_matT.coeffRef(i+1,n) = numext::imag(cc); + } + } + + // Overflow control + Scalar t = numext::maxi(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n))); + if ((eps * t) * t > Scalar(1)) + m_matT.block(i, n-1, size-i, 2) /= t; + + } + } + + // We handled a pair of complex conjugate eigenvalues, so need to skip them both + n--; + } + else + { + eigen_assert(0 && "Internal bug in EigenSolver (INF or NaN has not been detected)"); // this should not happen + } + } + + // Back transformation to get eigenvectors of original matrix + for (Index j = size-1; j >= 0; j--) + { + m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1); + m_eivec.col(j) = m_tmp; + } +} + +} // end namespace Eigen + +#endif // EIGEN_EIGENSOLVER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h new file mode 100644 index 000000000..87d789b3f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h @@ -0,0 +1,418 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012-2016 Gael Guennebaud +// Copyright (C) 2010,2012 Jitse Niesen +// Copyright (C) 2016 Tobias Wood +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERALIZEDEIGENSOLVER_H +#define EIGEN_GENERALIZEDEIGENSOLVER_H + +#include "./RealQZ.h" + +namespace Eigen { + +/** \eigenvalues_module \ingroup Eigenvalues_Module + * + * + * \class GeneralizedEigenSolver + * + * \brief Computes the generalized eigenvalues and eigenvectors of a pair of general matrices + * + * \tparam _MatrixType the type of the matrices of which we are computing the + * eigen-decomposition; this is expected to be an instantiation of the Matrix + * class template. Currently, only real matrices are supported. + * + * The generalized eigenvalues and eigenvectors of a matrix pair \f$ A \f$ and \f$ B \f$ are scalars + * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda Bv \f$. If + * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and + * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V = + * B V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we + * have \f$ A = B V D V^{-1} \f$. This is called the generalized eigen-decomposition. + * + * The generalized eigenvalues and eigenvectors of a matrix pair may be complex, even when the + * matrices are real. Moreover, the generalized eigenvalue might be infinite if the matrix B is + * singular. To workaround this difficulty, the eigenvalues are provided as a pair of complex \f$ \alpha \f$ + * and real \f$ \beta \f$ such that: \f$ \lambda_i = \alpha_i / \beta_i \f$. If \f$ \beta_i \f$ is (nearly) zero, + * then one can consider the well defined left eigenvalue \f$ \mu = \beta_i / \alpha_i\f$ such that: + * \f$ \mu_i A v_i = B v_i \f$, or even \f$ \mu_i u_i^T A = u_i^T B \f$ where \f$ u_i \f$ is + * called the left eigenvector. + * + * Call the function compute() to compute the generalized eigenvalues and eigenvectors of + * a given matrix pair. Alternatively, you can use the + * GeneralizedEigenSolver(const MatrixType&, const MatrixType&, bool) constructor which computes the + * eigenvalues and eigenvectors at construction time. Once the eigenvalue and + * eigenvectors are computed, they can be retrieved with the eigenvalues() and + * eigenvectors() functions. + * + * Here is an usage example of this class: + * Example: \include GeneralizedEigenSolver.cpp + * Output: \verbinclude GeneralizedEigenSolver.out + * + * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver + */ +template class GeneralizedEigenSolver +{ + public: + + /** \brief Synonym for the template parameter \p _MatrixType. */ + typedef _MatrixType MatrixType; + + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + Options = MatrixType::Options, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + /** \brief Scalar type for matrices of type #MatrixType. */ + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + + /** \brief Complex scalar type for #MatrixType. + * + * This is \c std::complex if #Scalar is real (e.g., + * \c float or \c double) and just \c Scalar if #Scalar is + * complex. + */ + typedef std::complex ComplexScalar; + + /** \brief Type for vector of real scalar values eigenvalues as returned by betas(). + * + * This is a column vector with entries of type #Scalar. + * The length of the vector is the size of #MatrixType. + */ + typedef Matrix VectorType; + + /** \brief Type for vector of complex scalar values eigenvalues as returned by alphas(). + * + * This is a column vector with entries of type #ComplexScalar. + * The length of the vector is the size of #MatrixType. + */ + typedef Matrix ComplexVectorType; + + /** \brief Expression type for the eigenvalues as returned by eigenvalues(). + */ + typedef CwiseBinaryOp,ComplexVectorType,VectorType> EigenvalueType; + + /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). + * + * This is a square matrix with entries of type #ComplexScalar. + * The size is the same as the size of #MatrixType. + */ + typedef Matrix EigenvectorsType; + + /** \brief Default constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via EigenSolver::compute(const MatrixType&, bool). + * + * \sa compute() for an example. + */ + GeneralizedEigenSolver() + : m_eivec(), + m_alphas(), + m_betas(), + m_valuesOkay(false), + m_vectorsOkay(false), + m_realQZ() + {} + + /** \brief Default constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa GeneralizedEigenSolver() + */ + explicit GeneralizedEigenSolver(Index size) + : m_eivec(size, size), + m_alphas(size), + m_betas(size), + m_valuesOkay(false), + m_vectorsOkay(false), + m_realQZ(size), + m_tmp(size) + {} + + /** \brief Constructor; computes the generalized eigendecomposition of given matrix pair. + * + * \param[in] A Square matrix whose eigendecomposition is to be computed. + * \param[in] B Square matrix whose eigendecomposition is to be computed. + * \param[in] computeEigenvectors If true, both the eigenvectors and the + * eigenvalues are computed; if false, only the eigenvalues are computed. + * + * This constructor calls compute() to compute the generalized eigenvalues + * and eigenvectors. + * + * \sa compute() + */ + GeneralizedEigenSolver(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true) + : m_eivec(A.rows(), A.cols()), + m_alphas(A.cols()), + m_betas(A.cols()), + m_valuesOkay(false), + m_vectorsOkay(false), + m_realQZ(A.cols()), + m_tmp(A.cols()) + { + compute(A, B, computeEigenvectors); + } + + /* \brief Returns the computed generalized eigenvectors. + * + * \returns %Matrix whose columns are the (possibly complex) right eigenvectors. + * i.e. the eigenvectors that solve (A - l*B)x = 0. The ordering matches the eigenvalues. + * + * \pre Either the constructor + * GeneralizedEigenSolver(const MatrixType&,const MatrixType&, bool) or the member function + * compute(const MatrixType&, const MatrixType& bool) has been called before, and + * \p computeEigenvectors was set to true (the default). + * + * \sa eigenvalues() + */ + EigenvectorsType eigenvectors() const { + eigen_assert(m_vectorsOkay && "Eigenvectors for GeneralizedEigenSolver were not calculated."); + return m_eivec; + } + + /** \brief Returns an expression of the computed generalized eigenvalues. + * + * \returns An expression of the column vector containing the eigenvalues. + * + * It is a shortcut for \code this->alphas().cwiseQuotient(this->betas()); \endcode + * Not that betas might contain zeros. It is therefore not recommended to use this function, + * but rather directly deal with the alphas and betas vectors. + * + * \pre Either the constructor + * GeneralizedEigenSolver(const MatrixType&,const MatrixType&,bool) or the member function + * compute(const MatrixType&,const MatrixType&,bool) has been called before. + * + * The eigenvalues are repeated according to their algebraic multiplicity, + * so there are as many eigenvalues as rows in the matrix. The eigenvalues + * are not sorted in any particular order. + * + * \sa alphas(), betas(), eigenvectors() + */ + EigenvalueType eigenvalues() const + { + eigen_assert(m_valuesOkay && "GeneralizedEigenSolver is not initialized."); + return EigenvalueType(m_alphas,m_betas); + } + + /** \returns A const reference to the vectors containing the alpha values + * + * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j). + * + * \sa betas(), eigenvalues() */ + ComplexVectorType alphas() const + { + eigen_assert(m_valuesOkay && "GeneralizedEigenSolver is not initialized."); + return m_alphas; + } + + /** \returns A const reference to the vectors containing the beta values + * + * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j). + * + * \sa alphas(), eigenvalues() */ + VectorType betas() const + { + eigen_assert(m_valuesOkay && "GeneralizedEigenSolver is not initialized."); + return m_betas; + } + + /** \brief Computes generalized eigendecomposition of given matrix. + * + * \param[in] A Square matrix whose eigendecomposition is to be computed. + * \param[in] B Square matrix whose eigendecomposition is to be computed. + * \param[in] computeEigenvectors If true, both the eigenvectors and the + * eigenvalues are computed; if false, only the eigenvalues are + * computed. + * \returns Reference to \c *this + * + * This function computes the eigenvalues of the real matrix \p matrix. + * The eigenvalues() function can be used to retrieve them. If + * \p computeEigenvectors is true, then the eigenvectors are also computed + * and can be retrieved by calling eigenvectors(). + * + * The matrix is first reduced to real generalized Schur form using the RealQZ + * class. The generalized Schur decomposition is then used to compute the eigenvalues + * and eigenvectors. + * + * The cost of the computation is dominated by the cost of the + * generalized Schur decomposition. + * + * This method reuses of the allocated data in the GeneralizedEigenSolver object. + */ + GeneralizedEigenSolver& compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true); + + ComputationInfo info() const + { + eigen_assert(m_valuesOkay && "EigenSolver is not initialized."); + return m_realQZ.info(); + } + + /** Sets the maximal number of iterations allowed. + */ + GeneralizedEigenSolver& setMaxIterations(Index maxIters) + { + m_realQZ.setMaxIterations(maxIters); + return *this; + } + + protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL); + } + + EigenvectorsType m_eivec; + ComplexVectorType m_alphas; + VectorType m_betas; + bool m_valuesOkay, m_vectorsOkay; + RealQZ m_realQZ; + ComplexVectorType m_tmp; +}; + +template +GeneralizedEigenSolver& +GeneralizedEigenSolver::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors) +{ + check_template_parameters(); + + using std::sqrt; + using std::abs; + eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows()); + Index size = A.cols(); + m_valuesOkay = false; + m_vectorsOkay = false; + // Reduce to generalized real Schur form: + // A = Q S Z and B = Q T Z + m_realQZ.compute(A, B, computeEigenvectors); + if (m_realQZ.info() == Success) + { + // Resize storage + m_alphas.resize(size); + m_betas.resize(size); + if (computeEigenvectors) + { + m_eivec.resize(size,size); + m_tmp.resize(size); + } + + // Aliases: + Map v(reinterpret_cast(m_tmp.data()), size); + ComplexVectorType &cv = m_tmp; + const MatrixType &mS = m_realQZ.matrixS(); + const MatrixType &mT = m_realQZ.matrixT(); + + Index i = 0; + while (i < size) + { + if (i == size - 1 || mS.coeff(i+1, i) == Scalar(0)) + { + // Real eigenvalue + m_alphas.coeffRef(i) = mS.diagonal().coeff(i); + m_betas.coeffRef(i) = mT.diagonal().coeff(i); + if (computeEigenvectors) + { + v.setConstant(Scalar(0.0)); + v.coeffRef(i) = Scalar(1.0); + // For singular eigenvalues do nothing more + if(abs(m_betas.coeffRef(i)) >= (std::numeric_limits::min)()) + { + // Non-singular eigenvalue + const Scalar alpha = real(m_alphas.coeffRef(i)); + const Scalar beta = m_betas.coeffRef(i); + for (Index j = i-1; j >= 0; j--) + { + const Index st = j+1; + const Index sz = i-j; + if (j > 0 && mS.coeff(j, j-1) != Scalar(0)) + { + // 2x2 block + Matrix rhs = (alpha*mT.template block<2,Dynamic>(j-1,st,2,sz) - beta*mS.template block<2,Dynamic>(j-1,st,2,sz)) .lazyProduct( v.segment(st,sz) ); + Matrix lhs = beta * mS.template block<2,2>(j-1,j-1) - alpha * mT.template block<2,2>(j-1,j-1); + v.template segment<2>(j-1) = lhs.partialPivLu().solve(rhs); + j--; + } + else + { + v.coeffRef(j) = -v.segment(st,sz).transpose().cwiseProduct(beta*mS.block(j,st,1,sz) - alpha*mT.block(j,st,1,sz)).sum() / (beta*mS.coeffRef(j,j) - alpha*mT.coeffRef(j,j)); + } + } + } + m_eivec.col(i).real().noalias() = m_realQZ.matrixZ().transpose() * v; + m_eivec.col(i).real().normalize(); + m_eivec.col(i).imag().setConstant(0); + } + ++i; + } + else + { + // We need to extract the generalized eigenvalues of the pair of a general 2x2 block S and a positive diagonal 2x2 block T + // Then taking beta=T_00*T_11, we can avoid any division, and alpha is the eigenvalues of A = (U^-1 * S * U) * diag(T_11,T_00): + + // T = [a 0] + // [0 b] + RealScalar a = mT.diagonal().coeff(i), + b = mT.diagonal().coeff(i+1); + const RealScalar beta = m_betas.coeffRef(i) = m_betas.coeffRef(i+1) = a*b; + + // ^^ NOTE: using diagonal()(i) instead of coeff(i,i) workarounds a MSVC bug. + Matrix S2 = mS.template block<2,2>(i,i) * Matrix(b,a).asDiagonal(); + + Scalar p = Scalar(0.5) * (S2.coeff(0,0) - S2.coeff(1,1)); + Scalar z = sqrt(abs(p * p + S2.coeff(1,0) * S2.coeff(0,1))); + const ComplexScalar alpha = ComplexScalar(S2.coeff(1,1) + p, (beta > 0) ? z : -z); + m_alphas.coeffRef(i) = conj(alpha); + m_alphas.coeffRef(i+1) = alpha; + + if (computeEigenvectors) { + // Compute eigenvector in position (i+1) and then position (i) is just the conjugate + cv.setZero(); + cv.coeffRef(i+1) = Scalar(1.0); + // here, the "static_cast" workaound expression template issues. + cv.coeffRef(i) = -(static_cast(beta*mS.coeffRef(i,i+1)) - alpha*mT.coeffRef(i,i+1)) + / (static_cast(beta*mS.coeffRef(i,i)) - alpha*mT.coeffRef(i,i)); + for (Index j = i-1; j >= 0; j--) + { + const Index st = j+1; + const Index sz = i+1-j; + if (j > 0 && mS.coeff(j, j-1) != Scalar(0)) + { + // 2x2 block + Matrix rhs = (alpha*mT.template block<2,Dynamic>(j-1,st,2,sz) - beta*mS.template block<2,Dynamic>(j-1,st,2,sz)) .lazyProduct( cv.segment(st,sz) ); + Matrix lhs = beta * mS.template block<2,2>(j-1,j-1) - alpha * mT.template block<2,2>(j-1,j-1); + cv.template segment<2>(j-1) = lhs.partialPivLu().solve(rhs); + j--; + } else { + cv.coeffRef(j) = cv.segment(st,sz).transpose().cwiseProduct(beta*mS.block(j,st,1,sz) - alpha*mT.block(j,st,1,sz)).sum() + / (alpha*mT.coeffRef(j,j) - static_cast(beta*mS.coeffRef(j,j))); + } + } + m_eivec.col(i+1).noalias() = (m_realQZ.matrixZ().transpose() * cv); + m_eivec.col(i+1).normalize(); + m_eivec.col(i) = m_eivec.col(i+1).conjugate(); + } + i += 2; + } + } + + m_valuesOkay = true; + m_vectorsOkay = computeEigenvectors; + } + return *this; +} + +} // end namespace Eigen + +#endif // EIGEN_GENERALIZEDEIGENSOLVER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h new file mode 100644 index 000000000..5f6bb8289 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h @@ -0,0 +1,226 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2010 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H +#define EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H + +#include "./Tridiagonalization.h" + +namespace Eigen { + +/** \eigenvalues_module \ingroup Eigenvalues_Module + * + * + * \class GeneralizedSelfAdjointEigenSolver + * + * \brief Computes eigenvalues and eigenvectors of the generalized selfadjoint eigen problem + * + * \tparam _MatrixType the type of the matrix of which we are computing the + * eigendecomposition; this is expected to be an instantiation of the Matrix + * class template. + * + * This class solves the generalized eigenvalue problem + * \f$ Av = \lambda Bv \f$. In this case, the matrix \f$ A \f$ should be + * selfadjoint and the matrix \f$ B \f$ should be positive definite. + * + * Only the \b lower \b triangular \b part of the input matrix is referenced. + * + * Call the function compute() to compute the eigenvalues and eigenvectors of + * a given matrix. Alternatively, you can use the + * GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int) + * constructor which computes the eigenvalues and eigenvectors at construction time. + * Once the eigenvalue and eigenvectors are computed, they can be retrieved with the eigenvalues() + * and eigenvectors() functions. + * + * The documentation for GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int) + * contains an example of the typical use of this class. + * + * \sa class SelfAdjointEigenSolver, class EigenSolver, class ComplexEigenSolver + */ +template +class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixType> +{ + typedef SelfAdjointEigenSolver<_MatrixType> Base; + public: + + typedef _MatrixType MatrixType; + + /** \brief Default constructor for fixed-size matrices. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via compute(). This constructor + * can only be used if \p _MatrixType is a fixed-size matrix; use + * GeneralizedSelfAdjointEigenSolver(Index) for dynamic-size matrices. + */ + GeneralizedSelfAdjointEigenSolver() : Base() {} + + /** \brief Constructor, pre-allocates memory for dynamic-size matrices. + * + * \param [in] size Positive integer, size of the matrix whose + * eigenvalues and eigenvectors will be computed. + * + * This constructor is useful for dynamic-size matrices, when the user + * intends to perform decompositions via compute(). The \p size + * parameter is only used as a hint. It is not an error to give a wrong + * \p size, but it may impair performance. + * + * \sa compute() for an example + */ + explicit GeneralizedSelfAdjointEigenSolver(Index size) + : Base(size) + {} + + /** \brief Constructor; computes generalized eigendecomposition of given matrix pencil. + * + * \param[in] matA Selfadjoint matrix in matrix pencil. + * Only the lower triangular part of the matrix is referenced. + * \param[in] matB Positive-definite matrix in matrix pencil. + * Only the lower triangular part of the matrix is referenced. + * \param[in] options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}. + * Default is #ComputeEigenvectors|#Ax_lBx. + * + * This constructor calls compute(const MatrixType&, const MatrixType&, int) + * to compute the eigenvalues and (if requested) the eigenvectors of the + * generalized eigenproblem \f$ Ax = \lambda B x \f$ with \a matA the + * selfadjoint matrix \f$ A \f$ and \a matB the positive definite matrix + * \f$ B \f$. Each eigenvector \f$ x \f$ satisfies the property + * \f$ x^* B x = 1 \f$. The eigenvectors are computed if + * \a options contains ComputeEigenvectors. + * + * In addition, the two following variants can be solved via \p options: + * - \c ABx_lx: \f$ ABx = \lambda x \f$ + * - \c BAx_lx: \f$ BAx = \lambda x \f$ + * + * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp + * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.out + * + * \sa compute(const MatrixType&, const MatrixType&, int) + */ + GeneralizedSelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, + int options = ComputeEigenvectors|Ax_lBx) + : Base(matA.cols()) + { + compute(matA, matB, options); + } + + /** \brief Computes generalized eigendecomposition of given matrix pencil. + * + * \param[in] matA Selfadjoint matrix in matrix pencil. + * Only the lower triangular part of the matrix is referenced. + * \param[in] matB Positive-definite matrix in matrix pencil. + * Only the lower triangular part of the matrix is referenced. + * \param[in] options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}. + * Default is #ComputeEigenvectors|#Ax_lBx. + * + * \returns Reference to \c *this + * + * Accoring to \p options, this function computes eigenvalues and (if requested) + * the eigenvectors of one of the following three generalized eigenproblems: + * - \c Ax_lBx: \f$ Ax = \lambda B x \f$ + * - \c ABx_lx: \f$ ABx = \lambda x \f$ + * - \c BAx_lx: \f$ BAx = \lambda x \f$ + * with \a matA the selfadjoint matrix \f$ A \f$ and \a matB the positive definite + * matrix \f$ B \f$. + * In addition, each eigenvector \f$ x \f$ satisfies the property \f$ x^* B x = 1 \f$. + * + * The eigenvalues() function can be used to retrieve + * the eigenvalues. If \p options contains ComputeEigenvectors, then the + * eigenvectors are also computed and can be retrieved by calling + * eigenvectors(). + * + * The implementation uses LLT to compute the Cholesky decomposition + * \f$ B = LL^* \f$ and computes the classical eigendecomposition + * of the selfadjoint matrix \f$ L^{-1} A (L^*)^{-1} \f$ if \p options contains Ax_lBx + * and of \f$ L^{*} A L \f$ otherwise. This solves the + * generalized eigenproblem, because any solution of the generalized + * eigenproblem \f$ Ax = \lambda B x \f$ corresponds to a solution + * \f$ L^{-1} A (L^*)^{-1} (L^* x) = \lambda (L^* x) \f$ of the + * eigenproblem for \f$ L^{-1} A (L^*)^{-1} \f$. Similar statements + * can be made for the two other variants. + * + * Example: \include SelfAdjointEigenSolver_compute_MatrixType2.cpp + * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType2.out + * + * \sa GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int) + */ + GeneralizedSelfAdjointEigenSolver& compute(const MatrixType& matA, const MatrixType& matB, + int options = ComputeEigenvectors|Ax_lBx); + + protected: + +}; + + +template +GeneralizedSelfAdjointEigenSolver& GeneralizedSelfAdjointEigenSolver:: +compute(const MatrixType& matA, const MatrixType& matB, int options) +{ + eigen_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows()); + eigen_assert((options&~(EigVecMask|GenEigMask))==0 + && (options&EigVecMask)!=EigVecMask + && ((options&GenEigMask)==0 || (options&GenEigMask)==Ax_lBx + || (options&GenEigMask)==ABx_lx || (options&GenEigMask)==BAx_lx) + && "invalid option parameter"); + + bool computeEigVecs = ((options&EigVecMask)==0) || ((options&EigVecMask)==ComputeEigenvectors); + + // Compute the cholesky decomposition of matB = L L' = U'U + LLT cholB(matB); + + int type = (options&GenEigMask); + if(type==0) + type = Ax_lBx; + + if(type==Ax_lBx) + { + // compute C = inv(L) A inv(L') + MatrixType matC = matA.template selfadjointView(); + cholB.matrixL().template solveInPlace(matC); + cholB.matrixU().template solveInPlace(matC); + + Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly ); + + // transform back the eigen vectors: evecs = inv(U) * evecs + if(computeEigVecs) + cholB.matrixU().solveInPlace(Base::m_eivec); + } + else if(type==ABx_lx) + { + // compute C = L' A L + MatrixType matC = matA.template selfadjointView(); + matC = matC * cholB.matrixL(); + matC = cholB.matrixU() * matC; + + Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly); + + // transform back the eigen vectors: evecs = inv(U) * evecs + if(computeEigVecs) + cholB.matrixU().solveInPlace(Base::m_eivec); + } + else if(type==BAx_lx) + { + // compute C = L' A L + MatrixType matC = matA.template selfadjointView(); + matC = matC * cholB.matrixL(); + matC = cholB.matrixU() * matC; + + Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly); + + // transform back the eigen vectors: evecs = L * evecs + if(computeEigVecs) + Base::m_eivec = cholB.matrixL() * Base::m_eivec; + } + + return *this; +} + +} // end namespace Eigen + +#endif // EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h new file mode 100644 index 000000000..f647f69b0 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h @@ -0,0 +1,374 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2010 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_HESSENBERGDECOMPOSITION_H +#define EIGEN_HESSENBERGDECOMPOSITION_H + +namespace Eigen { + +namespace internal { + +template struct HessenbergDecompositionMatrixHReturnType; +template +struct traits > +{ + typedef MatrixType ReturnType; +}; + +} + +/** \eigenvalues_module \ingroup Eigenvalues_Module + * + * + * \class HessenbergDecomposition + * + * \brief Reduces a square matrix to Hessenberg form by an orthogonal similarity transformation + * + * \tparam _MatrixType the type of the matrix of which we are computing the Hessenberg decomposition + * + * This class performs an Hessenberg decomposition of a matrix \f$ A \f$. In + * the real case, the Hessenberg decomposition consists of an orthogonal + * matrix \f$ Q \f$ and a Hessenberg matrix \f$ H \f$ such that \f$ A = Q H + * Q^T \f$. An orthogonal matrix is a matrix whose inverse equals its + * transpose (\f$ Q^{-1} = Q^T \f$). A Hessenberg matrix has zeros below the + * subdiagonal, so it is almost upper triangular. The Hessenberg decomposition + * of a complex matrix is \f$ A = Q H Q^* \f$ with \f$ Q \f$ unitary (that is, + * \f$ Q^{-1} = Q^* \f$). + * + * Call the function compute() to compute the Hessenberg decomposition of a + * given matrix. Alternatively, you can use the + * HessenbergDecomposition(const MatrixType&) constructor which computes the + * Hessenberg decomposition at construction time. Once the decomposition is + * computed, you can use the matrixH() and matrixQ() functions to construct + * the matrices H and Q in the decomposition. + * + * The documentation for matrixH() contains an example of the typical use of + * this class. + * + * \sa class ComplexSchur, class Tridiagonalization, \ref QR_Module "QR Module" + */ +template class HessenbergDecomposition +{ + public: + + /** \brief Synonym for the template parameter \p _MatrixType. */ + typedef _MatrixType MatrixType; + + enum { + Size = MatrixType::RowsAtCompileTime, + SizeMinusOne = Size == Dynamic ? Dynamic : Size - 1, + Options = MatrixType::Options, + MaxSize = MatrixType::MaxRowsAtCompileTime, + MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : MaxSize - 1 + }; + + /** \brief Scalar type for matrices of type #MatrixType. */ + typedef typename MatrixType::Scalar Scalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + + /** \brief Type for vector of Householder coefficients. + * + * This is column vector with entries of type #Scalar. The length of the + * vector is one less than the size of #MatrixType, if it is a fixed-side + * type. + */ + typedef Matrix CoeffVectorType; + + /** \brief Return type of matrixQ() */ + typedef HouseholderSequence::type> HouseholderSequenceType; + + typedef internal::HessenbergDecompositionMatrixHReturnType MatrixHReturnType; + + /** \brief Default constructor; the decomposition will be computed later. + * + * \param [in] size The size of the matrix whose Hessenberg decomposition will be computed. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via compute(). The \p size parameter is only + * used as a hint. It is not an error to give a wrong \p size, but it may + * impair performance. + * + * \sa compute() for an example. + */ + explicit HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size) + : m_matrix(size,size), + m_temp(size), + m_isInitialized(false) + { + if(size>1) + m_hCoeffs.resize(size-1); + } + + /** \brief Constructor; computes Hessenberg decomposition of given matrix. + * + * \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed. + * + * This constructor calls compute() to compute the Hessenberg + * decomposition. + * + * \sa matrixH() for an example. + */ + template + explicit HessenbergDecomposition(const EigenBase& matrix) + : m_matrix(matrix.derived()), + m_temp(matrix.rows()), + m_isInitialized(false) + { + if(matrix.rows()<2) + { + m_isInitialized = true; + return; + } + m_hCoeffs.resize(matrix.rows()-1,1); + _compute(m_matrix, m_hCoeffs, m_temp); + m_isInitialized = true; + } + + /** \brief Computes Hessenberg decomposition of given matrix. + * + * \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed. + * \returns Reference to \c *this + * + * The Hessenberg decomposition is computed by bringing the columns of the + * matrix successively in the required form using Householder reflections + * (see, e.g., Algorithm 7.4.2 in Golub \& Van Loan, %Matrix + * Computations). The cost is \f$ 10n^3/3 \f$ flops, where \f$ n \f$ + * denotes the size of the given matrix. + * + * This method reuses of the allocated data in the HessenbergDecomposition + * object. + * + * Example: \include HessenbergDecomposition_compute.cpp + * Output: \verbinclude HessenbergDecomposition_compute.out + */ + template + HessenbergDecomposition& compute(const EigenBase& matrix) + { + m_matrix = matrix.derived(); + if(matrix.rows()<2) + { + m_isInitialized = true; + return *this; + } + m_hCoeffs.resize(matrix.rows()-1,1); + _compute(m_matrix, m_hCoeffs, m_temp); + m_isInitialized = true; + return *this; + } + + /** \brief Returns the Householder coefficients. + * + * \returns a const reference to the vector of Householder coefficients + * + * \pre Either the constructor HessenbergDecomposition(const MatrixType&) + * or the member function compute(const MatrixType&) has been called + * before to compute the Hessenberg decomposition of a matrix. + * + * The Householder coefficients allow the reconstruction of the matrix + * \f$ Q \f$ in the Hessenberg decomposition from the packed data. + * + * \sa packedMatrix(), \ref Householder_Module "Householder module" + */ + const CoeffVectorType& householderCoefficients() const + { + eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); + return m_hCoeffs; + } + + /** \brief Returns the internal representation of the decomposition + * + * \returns a const reference to a matrix with the internal representation + * of the decomposition. + * + * \pre Either the constructor HessenbergDecomposition(const MatrixType&) + * or the member function compute(const MatrixType&) has been called + * before to compute the Hessenberg decomposition of a matrix. + * + * The returned matrix contains the following information: + * - the upper part and lower sub-diagonal represent the Hessenberg matrix H + * - the rest of the lower part contains the Householder vectors that, combined with + * Householder coefficients returned by householderCoefficients(), + * allows to reconstruct the matrix Q as + * \f$ Q = H_{N-1} \ldots H_1 H_0 \f$. + * Here, the matrices \f$ H_i \f$ are the Householder transformations + * \f$ H_i = (I - h_i v_i v_i^T) \f$ + * where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and + * \f$ v_i \f$ is the Householder vector defined by + * \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$ + * with M the matrix returned by this function. + * + * See LAPACK for further details on this packed storage. + * + * Example: \include HessenbergDecomposition_packedMatrix.cpp + * Output: \verbinclude HessenbergDecomposition_packedMatrix.out + * + * \sa householderCoefficients() + */ + const MatrixType& packedMatrix() const + { + eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); + return m_matrix; + } + + /** \brief Reconstructs the orthogonal matrix Q in the decomposition + * + * \returns object representing the matrix Q + * + * \pre Either the constructor HessenbergDecomposition(const MatrixType&) + * or the member function compute(const MatrixType&) has been called + * before to compute the Hessenberg decomposition of a matrix. + * + * This function returns a light-weight object of template class + * HouseholderSequence. You can either apply it directly to a matrix or + * you can convert it to a matrix of type #MatrixType. + * + * \sa matrixH() for an example, class HouseholderSequence + */ + HouseholderSequenceType matrixQ() const + { + eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); + return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate()) + .setLength(m_matrix.rows() - 1) + .setShift(1); + } + + /** \brief Constructs the Hessenberg matrix H in the decomposition + * + * \returns expression object representing the matrix H + * + * \pre Either the constructor HessenbergDecomposition(const MatrixType&) + * or the member function compute(const MatrixType&) has been called + * before to compute the Hessenberg decomposition of a matrix. + * + * The object returned by this function constructs the Hessenberg matrix H + * when it is assigned to a matrix or otherwise evaluated. The matrix H is + * constructed from the packed matrix as returned by packedMatrix(): The + * upper part (including the subdiagonal) of the packed matrix contains + * the matrix H. It may sometimes be better to directly use the packed + * matrix instead of constructing the matrix H. + * + * Example: \include HessenbergDecomposition_matrixH.cpp + * Output: \verbinclude HessenbergDecomposition_matrixH.out + * + * \sa matrixQ(), packedMatrix() + */ + MatrixHReturnType matrixH() const + { + eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); + return MatrixHReturnType(*this); + } + + private: + + typedef Matrix VectorType; + typedef typename NumTraits::Real RealScalar; + static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp); + + protected: + MatrixType m_matrix; + CoeffVectorType m_hCoeffs; + VectorType m_temp; + bool m_isInitialized; +}; + +/** \internal + * Performs a tridiagonal decomposition of \a matA in place. + * + * \param matA the input selfadjoint matrix + * \param hCoeffs returned Householder coefficients + * + * The result is written in the lower triangular part of \a matA. + * + * Implemented from Golub's "%Matrix Computations", algorithm 8.3.1. + * + * \sa packedMatrix() + */ +template +void HessenbergDecomposition::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp) +{ + eigen_assert(matA.rows()==matA.cols()); + Index n = matA.rows(); + temp.resize(n); + for (Index i = 0; i struct HessenbergDecompositionMatrixHReturnType +: public ReturnByValue > +{ + public: + /** \brief Constructor. + * + * \param[in] hess Hessenberg decomposition + */ + HessenbergDecompositionMatrixHReturnType(const HessenbergDecomposition& hess) : m_hess(hess) { } + + /** \brief Hessenberg matrix in decomposition. + * + * \param[out] result Hessenberg matrix in decomposition \p hess which + * was passed to the constructor + */ + template + inline void evalTo(ResultType& result) const + { + result = m_hess.packedMatrix(); + Index n = result.rows(); + if (n>2) + result.bottomLeftCorner(n-2, n-2).template triangularView().setZero(); + } + + Index rows() const { return m_hess.packedMatrix().rows(); } + Index cols() const { return m_hess.packedMatrix().cols(); } + + protected: + const HessenbergDecomposition& m_hess; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_HESSENBERGDECOMPOSITION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h new file mode 100644 index 000000000..e4e426071 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h @@ -0,0 +1,158 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2010 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIXBASEEIGENVALUES_H +#define EIGEN_MATRIXBASEEIGENVALUES_H + +namespace Eigen { + +namespace internal { + +template +struct eigenvalues_selector +{ + // this is the implementation for the case IsComplex = true + static inline typename MatrixBase::EigenvaluesReturnType const + run(const MatrixBase& m) + { + typedef typename Derived::PlainObject PlainObject; + PlainObject m_eval(m); + return ComplexEigenSolver(m_eval, false).eigenvalues(); + } +}; + +template +struct eigenvalues_selector +{ + static inline typename MatrixBase::EigenvaluesReturnType const + run(const MatrixBase& m) + { + typedef typename Derived::PlainObject PlainObject; + PlainObject m_eval(m); + return EigenSolver(m_eval, false).eigenvalues(); + } +}; + +} // end namespace internal + +/** \brief Computes the eigenvalues of a matrix + * \returns Column vector containing the eigenvalues. + * + * \eigenvalues_module + * This function computes the eigenvalues with the help of the EigenSolver + * class (for real matrices) or the ComplexEigenSolver class (for complex + * matrices). + * + * The eigenvalues are repeated according to their algebraic multiplicity, + * so there are as many eigenvalues as rows in the matrix. + * + * The SelfAdjointView class provides a better algorithm for selfadjoint + * matrices. + * + * Example: \include MatrixBase_eigenvalues.cpp + * Output: \verbinclude MatrixBase_eigenvalues.out + * + * \sa EigenSolver::eigenvalues(), ComplexEigenSolver::eigenvalues(), + * SelfAdjointView::eigenvalues() + */ +template +inline typename MatrixBase::EigenvaluesReturnType +MatrixBase::eigenvalues() const +{ + return internal::eigenvalues_selector::IsComplex>::run(derived()); +} + +/** \brief Computes the eigenvalues of a matrix + * \returns Column vector containing the eigenvalues. + * + * \eigenvalues_module + * This function computes the eigenvalues with the help of the + * SelfAdjointEigenSolver class. The eigenvalues are repeated according to + * their algebraic multiplicity, so there are as many eigenvalues as rows in + * the matrix. + * + * Example: \include SelfAdjointView_eigenvalues.cpp + * Output: \verbinclude SelfAdjointView_eigenvalues.out + * + * \sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues() + */ +template +inline typename SelfAdjointView::EigenvaluesReturnType +SelfAdjointView::eigenvalues() const +{ + PlainObject thisAsMatrix(*this); + return SelfAdjointEigenSolver(thisAsMatrix, false).eigenvalues(); +} + + + +/** \brief Computes the L2 operator norm + * \returns Operator norm of the matrix. + * + * \eigenvalues_module + * This function computes the L2 operator norm of a matrix, which is also + * known as the spectral norm. The norm of a matrix \f$ A \f$ is defined to be + * \f[ \|A\|_2 = \max_x \frac{\|Ax\|_2}{\|x\|_2} \f] + * where the maximum is over all vectors and the norm on the right is the + * Euclidean vector norm. The norm equals the largest singular value, which is + * the square root of the largest eigenvalue of the positive semi-definite + * matrix \f$ A^*A \f$. + * + * The current implementation uses the eigenvalues of \f$ A^*A \f$, as computed + * by SelfAdjointView::eigenvalues(), to compute the operator norm of a + * matrix. The SelfAdjointView class provides a better algorithm for + * selfadjoint matrices. + * + * Example: \include MatrixBase_operatorNorm.cpp + * Output: \verbinclude MatrixBase_operatorNorm.out + * + * \sa SelfAdjointView::eigenvalues(), SelfAdjointView::operatorNorm() + */ +template +inline typename MatrixBase::RealScalar +MatrixBase::operatorNorm() const +{ + using std::sqrt; + typename Derived::PlainObject m_eval(derived()); + // FIXME if it is really guaranteed that the eigenvalues are already sorted, + // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough. + return sqrt((m_eval*m_eval.adjoint()) + .eval() + .template selfadjointView() + .eigenvalues() + .maxCoeff() + ); +} + +/** \brief Computes the L2 operator norm + * \returns Operator norm of the matrix. + * + * \eigenvalues_module + * This function computes the L2 operator norm of a self-adjoint matrix. For a + * self-adjoint matrix, the operator norm is the largest eigenvalue. + * + * The current implementation uses the eigenvalues of the matrix, as computed + * by eigenvalues(), to compute the operator norm of the matrix. + * + * Example: \include SelfAdjointView_operatorNorm.cpp + * Output: \verbinclude SelfAdjointView_operatorNorm.out + * + * \sa eigenvalues(), MatrixBase::operatorNorm() + */ +template +inline typename SelfAdjointView::RealScalar +SelfAdjointView::operatorNorm() const +{ + return eigenvalues().cwiseAbs().maxCoeff(); +} + +} // end namespace Eigen + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/RealQZ.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/RealQZ.h new file mode 100644 index 000000000..b3a910dd9 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/RealQZ.h @@ -0,0 +1,654 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Alexey Korepanov +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REAL_QZ_H +#define EIGEN_REAL_QZ_H + +namespace Eigen { + + /** \eigenvalues_module \ingroup Eigenvalues_Module + * + * + * \class RealQZ + * + * \brief Performs a real QZ decomposition of a pair of square matrices + * + * \tparam _MatrixType the type of the matrix of which we are computing the + * real QZ decomposition; this is expected to be an instantiation of the + * Matrix class template. + * + * Given a real square matrices A and B, this class computes the real QZ + * decomposition: \f$ A = Q S Z \f$, \f$ B = Q T Z \f$ where Q and Z are + * real orthogonal matrixes, T is upper-triangular matrix, and S is upper + * quasi-triangular matrix. An orthogonal matrix is a matrix whose + * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular + * matrix is a block-triangular matrix whose diagonal consists of 1-by-1 + * blocks and 2-by-2 blocks where further reduction is impossible due to + * complex eigenvalues. + * + * The eigenvalues of the pencil \f$ A - z B \f$ can be obtained from + * 1x1 and 2x2 blocks on the diagonals of S and T. + * + * Call the function compute() to compute the real QZ decomposition of a + * given pair of matrices. Alternatively, you can use the + * RealQZ(const MatrixType& B, const MatrixType& B, bool computeQZ) + * constructor which computes the real QZ decomposition at construction + * time. Once the decomposition is computed, you can use the matrixS(), + * matrixT(), matrixQ() and matrixZ() functions to retrieve the matrices + * S, T, Q and Z in the decomposition. If computeQZ==false, some time + * is saved by not computing matrices Q and Z. + * + * Example: \include RealQZ_compute.cpp + * Output: \include RealQZ_compute.out + * + * \note The implementation is based on the algorithm in "Matrix Computations" + * by Gene H. Golub and Charles F. Van Loan, and a paper "An algorithm for + * generalized eigenvalue problems" by C.B.Moler and G.W.Stewart. + * + * \sa class RealSchur, class ComplexSchur, class EigenSolver, class ComplexEigenSolver + */ + + template class RealQZ + { + public: + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + Options = MatrixType::Options, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + typedef typename MatrixType::Scalar Scalar; + typedef std::complex::Real> ComplexScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + + typedef Matrix EigenvalueType; + typedef Matrix ColumnVectorType; + + /** \brief Default constructor. + * + * \param [in] size Positive integer, size of the matrix whose QZ decomposition will be computed. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via compute(). The \p size parameter is only + * used as a hint. It is not an error to give a wrong \p size, but it may + * impair performance. + * + * \sa compute() for an example. + */ + explicit RealQZ(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) : + m_S(size, size), + m_T(size, size), + m_Q(size, size), + m_Z(size, size), + m_workspace(size*2), + m_maxIters(400), + m_isInitialized(false) + { } + + /** \brief Constructor; computes real QZ decomposition of given matrices + * + * \param[in] A Matrix A. + * \param[in] B Matrix B. + * \param[in] computeQZ If false, A and Z are not computed. + * + * This constructor calls compute() to compute the QZ decomposition. + */ + RealQZ(const MatrixType& A, const MatrixType& B, bool computeQZ = true) : + m_S(A.rows(),A.cols()), + m_T(A.rows(),A.cols()), + m_Q(A.rows(),A.cols()), + m_Z(A.rows(),A.cols()), + m_workspace(A.rows()*2), + m_maxIters(400), + m_isInitialized(false) { + compute(A, B, computeQZ); + } + + /** \brief Returns matrix Q in the QZ decomposition. + * + * \returns A const reference to the matrix Q. + */ + const MatrixType& matrixQ() const { + eigen_assert(m_isInitialized && "RealQZ is not initialized."); + eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition."); + return m_Q; + } + + /** \brief Returns matrix Z in the QZ decomposition. + * + * \returns A const reference to the matrix Z. + */ + const MatrixType& matrixZ() const { + eigen_assert(m_isInitialized && "RealQZ is not initialized."); + eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition."); + return m_Z; + } + + /** \brief Returns matrix S in the QZ decomposition. + * + * \returns A const reference to the matrix S. + */ + const MatrixType& matrixS() const { + eigen_assert(m_isInitialized && "RealQZ is not initialized."); + return m_S; + } + + /** \brief Returns matrix S in the QZ decomposition. + * + * \returns A const reference to the matrix S. + */ + const MatrixType& matrixT() const { + eigen_assert(m_isInitialized && "RealQZ is not initialized."); + return m_T; + } + + /** \brief Computes QZ decomposition of given matrix. + * + * \param[in] A Matrix A. + * \param[in] B Matrix B. + * \param[in] computeQZ If false, A and Z are not computed. + * \returns Reference to \c *this + */ + RealQZ& compute(const MatrixType& A, const MatrixType& B, bool computeQZ = true); + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, \c NoConvergence otherwise. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "RealQZ is not initialized."); + return m_info; + } + + /** \brief Returns number of performed QR-like iterations. + */ + Index iterations() const + { + eigen_assert(m_isInitialized && "RealQZ is not initialized."); + return m_global_iter; + } + + /** Sets the maximal number of iterations allowed to converge to one eigenvalue + * or decouple the problem. + */ + RealQZ& setMaxIterations(Index maxIters) + { + m_maxIters = maxIters; + return *this; + } + + private: + + MatrixType m_S, m_T, m_Q, m_Z; + Matrix m_workspace; + ComputationInfo m_info; + Index m_maxIters; + bool m_isInitialized; + bool m_computeQZ; + Scalar m_normOfT, m_normOfS; + Index m_global_iter; + + typedef Matrix Vector3s; + typedef Matrix Vector2s; + typedef Matrix Matrix2s; + typedef JacobiRotation JRs; + + void hessenbergTriangular(); + void computeNorms(); + Index findSmallSubdiagEntry(Index iu); + Index findSmallDiagEntry(Index f, Index l); + void splitOffTwoRows(Index i); + void pushDownZero(Index z, Index f, Index l); + void step(Index f, Index l, Index iter); + + }; // RealQZ + + /** \internal Reduces S and T to upper Hessenberg - triangular form */ + template + void RealQZ::hessenbergTriangular() + { + + const Index dim = m_S.cols(); + + // perform QR decomposition of T, overwrite T with R, save Q + HouseholderQR qrT(m_T); + m_T = qrT.matrixQR(); + m_T.template triangularView().setZero(); + m_Q = qrT.householderQ(); + // overwrite S with Q* S + m_S.applyOnTheLeft(m_Q.adjoint()); + // init Z as Identity + if (m_computeQZ) + m_Z = MatrixType::Identity(dim,dim); + // reduce S to upper Hessenberg with Givens rotations + for (Index j=0; j<=dim-3; j++) { + for (Index i=dim-1; i>=j+2; i--) { + JRs G; + // kill S(i,j) + if(m_S.coeff(i,j) != 0) + { + G.makeGivens(m_S.coeff(i-1,j), m_S.coeff(i,j), &m_S.coeffRef(i-1, j)); + m_S.coeffRef(i,j) = Scalar(0.0); + m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint()); + m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint()); + // update Q + if (m_computeQZ) + m_Q.applyOnTheRight(i-1,i,G); + } + // kill T(i,i-1) + if(m_T.coeff(i,i-1)!=Scalar(0)) + { + G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i)); + m_T.coeffRef(i,i-1) = Scalar(0.0); + m_S.applyOnTheRight(i,i-1,G); + m_T.topRows(i).applyOnTheRight(i,i-1,G); + // update Z + if (m_computeQZ) + m_Z.applyOnTheLeft(i,i-1,G.adjoint()); + } + } + } + } + + /** \internal Computes vector L1 norms of S and T when in Hessenberg-Triangular form already */ + template + inline void RealQZ::computeNorms() + { + const Index size = m_S.cols(); + m_normOfS = Scalar(0.0); + m_normOfT = Scalar(0.0); + for (Index j = 0; j < size; ++j) + { + m_normOfS += m_S.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum(); + m_normOfT += m_T.row(j).segment(j, size - j).cwiseAbs().sum(); + } + } + + + /** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */ + template + inline Index RealQZ::findSmallSubdiagEntry(Index iu) + { + using std::abs; + Index res = iu; + while (res > 0) + { + Scalar s = abs(m_S.coeff(res-1,res-1)) + abs(m_S.coeff(res,res)); + if (s == Scalar(0.0)) + s = m_normOfS; + if (abs(m_S.coeff(res,res-1)) < NumTraits::epsilon() * s) + break; + res--; + } + return res; + } + + /** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1) */ + template + inline Index RealQZ::findSmallDiagEntry(Index f, Index l) + { + using std::abs; + Index res = l; + while (res >= f) { + if (abs(m_T.coeff(res,res)) <= NumTraits::epsilon() * m_normOfT) + break; + res--; + } + return res; + } + + /** \internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */ + template + inline void RealQZ::splitOffTwoRows(Index i) + { + using std::abs; + using std::sqrt; + const Index dim=m_S.cols(); + if (abs(m_S.coeff(i+1,i))==Scalar(0)) + return; + Index j = findSmallDiagEntry(i,i+1); + if (j==i-1) + { + // block of (S T^{-1}) + Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView(). + template solve(m_S.template block<2,2>(i,i)); + Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1)); + Scalar q = p*p + STi(1,0)*STi(0,1); + if (q>=0) { + Scalar z = sqrt(q); + // one QR-like iteration for ABi - lambda I + // is enough - when we know exact eigenvalue in advance, + // convergence is immediate + JRs G; + if (p>=0) + G.makeGivens(p + z, STi(1,0)); + else + G.makeGivens(p - z, STi(1,0)); + m_S.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint()); + m_T.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint()); + // update Q + if (m_computeQZ) + m_Q.applyOnTheRight(i,i+1,G); + + G.makeGivens(m_T.coeff(i+1,i+1), m_T.coeff(i+1,i)); + m_S.topRows(i+2).applyOnTheRight(i+1,i,G); + m_T.topRows(i+2).applyOnTheRight(i+1,i,G); + // update Z + if (m_computeQZ) + m_Z.applyOnTheLeft(i+1,i,G.adjoint()); + + m_S.coeffRef(i+1,i) = Scalar(0.0); + m_T.coeffRef(i+1,i) = Scalar(0.0); + } + } + else + { + pushDownZero(j,i,i+1); + } + } + + /** \internal use zero in T(z,z) to zero S(l,l-1), working in block f..l */ + template + inline void RealQZ::pushDownZero(Index z, Index f, Index l) + { + JRs G; + const Index dim = m_S.cols(); + for (Index zz=z; zzf ? (zz-1) : zz; + G.makeGivens(m_T.coeff(zz, zz+1), m_T.coeff(zz+1, zz+1)); + m_S.rightCols(dim-firstColS).applyOnTheLeft(zz,zz+1,G.adjoint()); + m_T.rightCols(dim-zz).applyOnTheLeft(zz,zz+1,G.adjoint()); + m_T.coeffRef(zz+1,zz+1) = Scalar(0.0); + // update Q + if (m_computeQZ) + m_Q.applyOnTheRight(zz,zz+1,G); + // kill S(zz+1, zz-1) + if (zz>f) + { + G.makeGivens(m_S.coeff(zz+1, zz), m_S.coeff(zz+1,zz-1)); + m_S.topRows(zz+2).applyOnTheRight(zz, zz-1,G); + m_T.topRows(zz+1).applyOnTheRight(zz, zz-1,G); + m_S.coeffRef(zz+1,zz-1) = Scalar(0.0); + // update Z + if (m_computeQZ) + m_Z.applyOnTheLeft(zz,zz-1,G.adjoint()); + } + } + // finally kill S(l,l-1) + G.makeGivens(m_S.coeff(l,l), m_S.coeff(l,l-1)); + m_S.applyOnTheRight(l,l-1,G); + m_T.applyOnTheRight(l,l-1,G); + m_S.coeffRef(l,l-1)=Scalar(0.0); + // update Z + if (m_computeQZ) + m_Z.applyOnTheLeft(l,l-1,G.adjoint()); + } + + /** \internal QR-like iterative step for block f..l */ + template + inline void RealQZ::step(Index f, Index l, Index iter) + { + using std::abs; + const Index dim = m_S.cols(); + + // x, y, z + Scalar x, y, z; + if (iter==10) + { + // Wilkinson ad hoc shift + const Scalar + a11=m_S.coeff(f+0,f+0), a12=m_S.coeff(f+0,f+1), + a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1), + b12=m_T.coeff(f+0,f+1), + b11i=Scalar(1.0)/m_T.coeff(f+0,f+0), + b22i=Scalar(1.0)/m_T.coeff(f+1,f+1), + a87=m_S.coeff(l-1,l-2), + a98=m_S.coeff(l-0,l-1), + b77i=Scalar(1.0)/m_T.coeff(l-2,l-2), + b88i=Scalar(1.0)/m_T.coeff(l-1,l-1); + Scalar ss = abs(a87*b77i) + abs(a98*b88i), + lpl = Scalar(1.5)*ss, + ll = ss*ss; + x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i + - a11*a21*b12*b11i*b11i*b22i; + y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i + - a21*a21*b12*b11i*b11i*b22i; + z = a21*a32*b11i*b22i; + } + else if (iter==16) + { + // another exceptional shift + x = m_S.coeff(f,f)/m_T.coeff(f,f)-m_S.coeff(l,l)/m_T.coeff(l,l) + m_S.coeff(l,l-1)*m_T.coeff(l-1,l) / + (m_T.coeff(l-1,l-1)*m_T.coeff(l,l)); + y = m_S.coeff(f+1,f)/m_T.coeff(f,f); + z = 0; + } + else if (iter>23 && !(iter%8)) + { + // extremely exceptional shift + x = internal::random(-1.0,1.0); + y = internal::random(-1.0,1.0); + z = internal::random(-1.0,1.0); + } + else + { + // Compute the shifts: (x,y,z,0...) = (AB^-1 - l1 I) (AB^-1 - l2 I) e1 + // where l1 and l2 are the eigenvalues of the 2x2 matrix C = U V^-1 where + // U and V are 2x2 bottom right sub matrices of A and B. Thus: + // = AB^-1AB^-1 + l1 l2 I - (l1+l2)(AB^-1) + // = AB^-1AB^-1 + det(M) - tr(M)(AB^-1) + // Since we are only interested in having x, y, z with a correct ratio, we have: + const Scalar + a11 = m_S.coeff(f,f), a12 = m_S.coeff(f,f+1), + a21 = m_S.coeff(f+1,f), a22 = m_S.coeff(f+1,f+1), + a32 = m_S.coeff(f+2,f+1), + + a88 = m_S.coeff(l-1,l-1), a89 = m_S.coeff(l-1,l), + a98 = m_S.coeff(l,l-1), a99 = m_S.coeff(l,l), + + b11 = m_T.coeff(f,f), b12 = m_T.coeff(f,f+1), + b22 = m_T.coeff(f+1,f+1), + + b88 = m_T.coeff(l-1,l-1), b89 = m_T.coeff(l-1,l), + b99 = m_T.coeff(l,l); + + x = ( (a88/b88 - a11/b11)*(a99/b99 - a11/b11) - (a89/b99)*(a98/b88) + (a98/b88)*(b89/b99)*(a11/b11) ) * (b11/a21) + + a12/b22 - (a11/b11)*(b12/b22); + y = (a22/b22-a11/b11) - (a21/b11)*(b12/b22) - (a88/b88-a11/b11) - (a99/b99-a11/b11) + (a98/b88)*(b89/b99); + z = a32/b22; + } + + JRs G; + + for (Index k=f; k<=l-2; k++) + { + // variables for Householder reflections + Vector2s essential2; + Scalar tau, beta; + + Vector3s hr(x,y,z); + + // Q_k to annihilate S(k+1,k-1) and S(k+2,k-1) + hr.makeHouseholderInPlace(tau, beta); + essential2 = hr.template bottomRows<2>(); + Index fc=(std::max)(k-1,Index(0)); // first col to update + m_S.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data()); + m_T.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data()); + if (m_computeQZ) + m_Q.template middleCols<3>(k).applyHouseholderOnTheRight(essential2, tau, m_workspace.data()); + if (k>f) + m_S.coeffRef(k+2,k-1) = m_S.coeffRef(k+1,k-1) = Scalar(0.0); + + // Z_{k1} to annihilate T(k+2,k+1) and T(k+2,k) + hr << m_T.coeff(k+2,k+2),m_T.coeff(k+2,k),m_T.coeff(k+2,k+1); + hr.makeHouseholderInPlace(tau, beta); + essential2 = hr.template bottomRows<2>(); + { + Index lr = (std::min)(k+4,dim); // last row to update + Map > tmp(m_workspace.data(),lr); + // S + tmp = m_S.template middleCols<2>(k).topRows(lr) * essential2; + tmp += m_S.col(k+2).head(lr); + m_S.col(k+2).head(lr) -= tau*tmp; + m_S.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint(); + // T + tmp = m_T.template middleCols<2>(k).topRows(lr) * essential2; + tmp += m_T.col(k+2).head(lr); + m_T.col(k+2).head(lr) -= tau*tmp; + m_T.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint(); + } + if (m_computeQZ) + { + // Z + Map > tmp(m_workspace.data(),dim); + tmp = essential2.adjoint()*(m_Z.template middleRows<2>(k)); + tmp += m_Z.row(k+2); + m_Z.row(k+2) -= tau*tmp; + m_Z.template middleRows<2>(k) -= essential2 * (tau*tmp); + } + m_T.coeffRef(k+2,k) = m_T.coeffRef(k+2,k+1) = Scalar(0.0); + + // Z_{k2} to annihilate T(k+1,k) + G.makeGivens(m_T.coeff(k+1,k+1), m_T.coeff(k+1,k)); + m_S.applyOnTheRight(k+1,k,G); + m_T.applyOnTheRight(k+1,k,G); + // update Z + if (m_computeQZ) + m_Z.applyOnTheLeft(k+1,k,G.adjoint()); + m_T.coeffRef(k+1,k) = Scalar(0.0); + + // update x,y,z + x = m_S.coeff(k+1,k); + y = m_S.coeff(k+2,k); + if (k < l-2) + z = m_S.coeff(k+3,k); + } // loop over k + + // Q_{n-1} to annihilate y = S(l,l-2) + G.makeGivens(x,y); + m_S.applyOnTheLeft(l-1,l,G.adjoint()); + m_T.applyOnTheLeft(l-1,l,G.adjoint()); + if (m_computeQZ) + m_Q.applyOnTheRight(l-1,l,G); + m_S.coeffRef(l,l-2) = Scalar(0.0); + + // Z_{n-1} to annihilate T(l,l-1) + G.makeGivens(m_T.coeff(l,l),m_T.coeff(l,l-1)); + m_S.applyOnTheRight(l,l-1,G); + m_T.applyOnTheRight(l,l-1,G); + if (m_computeQZ) + m_Z.applyOnTheLeft(l,l-1,G.adjoint()); + m_T.coeffRef(l,l-1) = Scalar(0.0); + } + + template + RealQZ& RealQZ::compute(const MatrixType& A_in, const MatrixType& B_in, bool computeQZ) + { + + const Index dim = A_in.cols(); + + eigen_assert (A_in.rows()==dim && A_in.cols()==dim + && B_in.rows()==dim && B_in.cols()==dim + && "Need square matrices of the same dimension"); + + m_isInitialized = true; + m_computeQZ = computeQZ; + m_S = A_in; m_T = B_in; + m_workspace.resize(dim*2); + m_global_iter = 0; + + // entrance point: hessenberg triangular decomposition + hessenbergTriangular(); + // compute L1 vector norms of T, S into m_normOfS, m_normOfT + computeNorms(); + + Index l = dim-1, + f, + local_iter = 0; + + while (l>0 && local_iter0) m_S.coeffRef(f,f-1) = Scalar(0.0); + if (f == l) // One root found + { + l--; + local_iter = 0; + } + else if (f == l-1) // Two roots found + { + splitOffTwoRows(f); + l -= 2; + local_iter = 0; + } + else // No convergence yet + { + // if there's zero on diagonal of T, we can isolate an eigenvalue with Givens rotations + Index z = findSmallDiagEntry(f,l); + if (z>=f) + { + // zero found + pushDownZero(z,f,l); + } + else + { + // We are sure now that S.block(f,f, l-f+1,l-f+1) is underuced upper-Hessenberg + // and T.block(f,f, l-f+1,l-f+1) is invertible uper-triangular, which allows to + // apply a QR-like iteration to rows and columns f..l. + step(f,l, local_iter); + local_iter++; + m_global_iter++; + } + } + } + // check if we converged before reaching iterations limit + m_info = (local_iter j_left, j_right; + internal::real_2x2_jacobi_svd(m_T, i, i+1, &j_left, &j_right); + + // Apply resulting Jacobi rotations + m_S.applyOnTheLeft(i,i+1,j_left); + m_S.applyOnTheRight(i,i+1,j_right); + m_T.applyOnTheLeft(i,i+1,j_left); + m_T.applyOnTheRight(i,i+1,j_right); + m_T(i+1,i) = m_T(i,i+1) = Scalar(0); + + if(m_computeQZ) { + m_Q.applyOnTheRight(i,i+1,j_left.transpose()); + m_Z.applyOnTheLeft(i,i+1,j_right.transpose()); + } + + i++; + } + } + } + + return *this; + } // end compute + +} // end namespace Eigen + +#endif //EIGEN_REAL_QZ diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/RealSchur.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/RealSchur.h new file mode 100644 index 000000000..17ea903f5 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/RealSchur.h @@ -0,0 +1,546 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2010,2012 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REAL_SCHUR_H +#define EIGEN_REAL_SCHUR_H + +#include "./HessenbergDecomposition.h" + +namespace Eigen { + +/** \eigenvalues_module \ingroup Eigenvalues_Module + * + * + * \class RealSchur + * + * \brief Performs a real Schur decomposition of a square matrix + * + * \tparam _MatrixType the type of the matrix of which we are computing the + * real Schur decomposition; this is expected to be an instantiation of the + * Matrix class template. + * + * Given a real square matrix A, this class computes the real Schur + * decomposition: \f$ A = U T U^T \f$ where U is a real orthogonal matrix and + * T is a real quasi-triangular matrix. An orthogonal matrix is a matrix whose + * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular + * matrix is a block-triangular matrix whose diagonal consists of 1-by-1 + * blocks and 2-by-2 blocks with complex eigenvalues. The eigenvalues of the + * blocks on the diagonal of T are the same as the eigenvalues of the matrix + * A, and thus the real Schur decomposition is used in EigenSolver to compute + * the eigendecomposition of a matrix. + * + * Call the function compute() to compute the real Schur decomposition of a + * given matrix. Alternatively, you can use the RealSchur(const MatrixType&, bool) + * constructor which computes the real Schur decomposition at construction + * time. Once the decomposition is computed, you can use the matrixU() and + * matrixT() functions to retrieve the matrices U and T in the decomposition. + * + * The documentation of RealSchur(const MatrixType&, bool) contains an example + * of the typical use of this class. + * + * \note The implementation is adapted from + * JAMA (public domain). + * Their code is based on EISPACK. + * + * \sa class ComplexSchur, class EigenSolver, class ComplexEigenSolver + */ +template class RealSchur +{ + public: + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + Options = MatrixType::Options, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + typedef typename MatrixType::Scalar Scalar; + typedef std::complex::Real> ComplexScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + + typedef Matrix EigenvalueType; + typedef Matrix ColumnVectorType; + + /** \brief Default constructor. + * + * \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via compute(). The \p size parameter is only + * used as a hint. It is not an error to give a wrong \p size, but it may + * impair performance. + * + * \sa compute() for an example. + */ + explicit RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) + : m_matT(size, size), + m_matU(size, size), + m_workspaceVector(size), + m_hess(size), + m_isInitialized(false), + m_matUisUptodate(false), + m_maxIters(-1) + { } + + /** \brief Constructor; computes real Schur decomposition of given matrix. + * + * \param[in] matrix Square matrix whose Schur decomposition is to be computed. + * \param[in] computeU If true, both T and U are computed; if false, only T is computed. + * + * This constructor calls compute() to compute the Schur decomposition. + * + * Example: \include RealSchur_RealSchur_MatrixType.cpp + * Output: \verbinclude RealSchur_RealSchur_MatrixType.out + */ + template + explicit RealSchur(const EigenBase& matrix, bool computeU = true) + : m_matT(matrix.rows(),matrix.cols()), + m_matU(matrix.rows(),matrix.cols()), + m_workspaceVector(matrix.rows()), + m_hess(matrix.rows()), + m_isInitialized(false), + m_matUisUptodate(false), + m_maxIters(-1) + { + compute(matrix.derived(), computeU); + } + + /** \brief Returns the orthogonal matrix in the Schur decomposition. + * + * \returns A const reference to the matrix U. + * + * \pre Either the constructor RealSchur(const MatrixType&, bool) or the + * member function compute(const MatrixType&, bool) has been called before + * to compute the Schur decomposition of a matrix, and \p computeU was set + * to true (the default value). + * + * \sa RealSchur(const MatrixType&, bool) for an example + */ + const MatrixType& matrixU() const + { + eigen_assert(m_isInitialized && "RealSchur is not initialized."); + eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition."); + return m_matU; + } + + /** \brief Returns the quasi-triangular matrix in the Schur decomposition. + * + * \returns A const reference to the matrix T. + * + * \pre Either the constructor RealSchur(const MatrixType&, bool) or the + * member function compute(const MatrixType&, bool) has been called before + * to compute the Schur decomposition of a matrix. + * + * \sa RealSchur(const MatrixType&, bool) for an example + */ + const MatrixType& matrixT() const + { + eigen_assert(m_isInitialized && "RealSchur is not initialized."); + return m_matT; + } + + /** \brief Computes Schur decomposition of given matrix. + * + * \param[in] matrix Square matrix whose Schur decomposition is to be computed. + * \param[in] computeU If true, both T and U are computed; if false, only T is computed. + * \returns Reference to \c *this + * + * The Schur decomposition is computed by first reducing the matrix to + * Hessenberg form using the class HessenbergDecomposition. The Hessenberg + * matrix is then reduced to triangular form by performing Francis QR + * iterations with implicit double shift. The cost of computing the Schur + * decomposition depends on the number of iterations; as a rough guide, it + * may be taken to be \f$25n^3\f$ flops if \a computeU is true and + * \f$10n^3\f$ flops if \a computeU is false. + * + * Example: \include RealSchur_compute.cpp + * Output: \verbinclude RealSchur_compute.out + * + * \sa compute(const MatrixType&, bool, Index) + */ + template + RealSchur& compute(const EigenBase& matrix, bool computeU = true); + + /** \brief Computes Schur decomposition of a Hessenberg matrix H = Z T Z^T + * \param[in] matrixH Matrix in Hessenberg form H + * \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T + * \param computeU Computes the matriX U of the Schur vectors + * \return Reference to \c *this + * + * This routine assumes that the matrix is already reduced in Hessenberg form matrixH + * using either the class HessenbergDecomposition or another mean. + * It computes the upper quasi-triangular matrix T of the Schur decomposition of H + * When computeU is true, this routine computes the matrix U such that + * A = U T U^T = (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix + * + * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix + * is not available, the user should give an identity matrix (Q.setIdentity()) + * + * \sa compute(const MatrixType&, bool) + */ + template + RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU); + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, \c NoConvergence otherwise. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "RealSchur is not initialized."); + return m_info; + } + + /** \brief Sets the maximum number of iterations allowed. + * + * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size + * of the matrix. + */ + RealSchur& setMaxIterations(Index maxIters) + { + m_maxIters = maxIters; + return *this; + } + + /** \brief Returns the maximum number of iterations. */ + Index getMaxIterations() + { + return m_maxIters; + } + + /** \brief Maximum number of iterations per row. + * + * If not otherwise specified, the maximum number of iterations is this number times the size of the + * matrix. It is currently set to 40. + */ + static const int m_maxIterationsPerRow = 40; + + private: + + MatrixType m_matT; + MatrixType m_matU; + ColumnVectorType m_workspaceVector; + HessenbergDecomposition m_hess; + ComputationInfo m_info; + bool m_isInitialized; + bool m_matUisUptodate; + Index m_maxIters; + + typedef Matrix Vector3s; + + Scalar computeNormOfT(); + Index findSmallSubdiagEntry(Index iu); + void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift); + void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo); + void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector); + void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace); +}; + + +template +template +RealSchur& RealSchur::compute(const EigenBase& matrix, bool computeU) +{ + const Scalar considerAsZero = (std::numeric_limits::min)(); + + eigen_assert(matrix.cols() == matrix.rows()); + Index maxIters = m_maxIters; + if (maxIters == -1) + maxIters = m_maxIterationsPerRow * matrix.rows(); + + Scalar scale = matrix.derived().cwiseAbs().maxCoeff(); + if(scale +template +RealSchur& RealSchur::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU) +{ + using std::abs; + + m_matT = matrixH; + if(computeU) + m_matU = matrixQ; + + Index maxIters = m_maxIters; + if (maxIters == -1) + maxIters = m_maxIterationsPerRow * matrixH.rows(); + m_workspaceVector.resize(m_matT.cols()); + Scalar* workspace = &m_workspaceVector.coeffRef(0); + + // The matrix m_matT is divided in three parts. + // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. + // Rows il,...,iu is the part we are working on (the active window). + // Rows iu+1,...,end are already brought in triangular form. + Index iu = m_matT.cols() - 1; + Index iter = 0; // iteration count for current eigenvalue + Index totalIter = 0; // iteration count for whole matrix + Scalar exshift(0); // sum of exceptional shifts + Scalar norm = computeNormOfT(); + + if(norm!=Scalar(0)) + { + while (iu >= 0) + { + Index il = findSmallSubdiagEntry(iu); + + // Check for convergence + if (il == iu) // One root found + { + m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift; + if (iu > 0) + m_matT.coeffRef(iu, iu-1) = Scalar(0); + iu--; + iter = 0; + } + else if (il == iu-1) // Two roots found + { + splitOffTwoRows(iu, computeU, exshift); + iu -= 2; + iter = 0; + } + else // No convergence yet + { + // The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG ) + Vector3s firstHouseholderVector = Vector3s::Zero(), shiftInfo; + computeShift(iu, iter, exshift, shiftInfo); + iter = iter + 1; + totalIter = totalIter + 1; + if (totalIter > maxIters) break; + Index im; + initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector); + performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace); + } + } + } + if(totalIter <= maxIters) + m_info = Success; + else + m_info = NoConvergence; + + m_isInitialized = true; + m_matUisUptodate = computeU; + return *this; +} + +/** \internal Computes and returns vector L1 norm of T */ +template +inline typename MatrixType::Scalar RealSchur::computeNormOfT() +{ + const Index size = m_matT.cols(); + // FIXME to be efficient the following would requires a triangular reduxion code + // Scalar norm = m_matT.upper().cwiseAbs().sum() + // + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum(); + Scalar norm(0); + for (Index j = 0; j < size; ++j) + norm += m_matT.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum(); + return norm; +} + +/** \internal Look for single small sub-diagonal element and returns its index */ +template +inline Index RealSchur::findSmallSubdiagEntry(Index iu) +{ + using std::abs; + Index res = iu; + while (res > 0) + { + Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res)); + if (abs(m_matT.coeff(res,res-1)) <= NumTraits::epsilon() * s) + break; + res--; + } + return res; +} + +/** \internal Update T given that rows iu-1 and iu decouple from the rest. */ +template +inline void RealSchur::splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift) +{ + using std::sqrt; + using std::abs; + const Index size = m_matT.cols(); + + // The eigenvalues of the 2x2 matrix [a b; c d] are + // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc + Scalar p = Scalar(0.5) * (m_matT.coeff(iu-1,iu-1) - m_matT.coeff(iu,iu)); + Scalar q = p * p + m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu); // q = tr^2 / 4 - det = discr/4 + m_matT.coeffRef(iu,iu) += exshift; + m_matT.coeffRef(iu-1,iu-1) += exshift; + + if (q >= Scalar(0)) // Two real eigenvalues + { + Scalar z = sqrt(abs(q)); + JacobiRotation rot; + if (p >= Scalar(0)) + rot.makeGivens(p + z, m_matT.coeff(iu, iu-1)); + else + rot.makeGivens(p - z, m_matT.coeff(iu, iu-1)); + + m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint()); + m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot); + m_matT.coeffRef(iu, iu-1) = Scalar(0); + if (computeU) + m_matU.applyOnTheRight(iu-1, iu, rot); + } + + if (iu > 1) + m_matT.coeffRef(iu-1, iu-2) = Scalar(0); +} + +/** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */ +template +inline void RealSchur::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo) +{ + using std::sqrt; + using std::abs; + shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu); + shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1); + shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu); + + // Wilkinson's original ad hoc shift + if (iter == 10) + { + exshift += shiftInfo.coeff(0); + for (Index i = 0; i <= iu; ++i) + m_matT.coeffRef(i,i) -= shiftInfo.coeff(0); + Scalar s = abs(m_matT.coeff(iu,iu-1)) + abs(m_matT.coeff(iu-1,iu-2)); + shiftInfo.coeffRef(0) = Scalar(0.75) * s; + shiftInfo.coeffRef(1) = Scalar(0.75) * s; + shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s; + } + + // MATLAB's new ad hoc shift + if (iter == 30) + { + Scalar s = (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0); + s = s * s + shiftInfo.coeff(2); + if (s > Scalar(0)) + { + s = sqrt(s); + if (shiftInfo.coeff(1) < shiftInfo.coeff(0)) + s = -s; + s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0); + s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s; + exshift += s; + for (Index i = 0; i <= iu; ++i) + m_matT.coeffRef(i,i) -= s; + shiftInfo.setConstant(Scalar(0.964)); + } + } +} + +/** \internal Compute index im at which Francis QR step starts and the first Householder vector. */ +template +inline void RealSchur::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector) +{ + using std::abs; + Vector3s& v = firstHouseholderVector; // alias to save typing + + for (im = iu-2; im >= il; --im) + { + const Scalar Tmm = m_matT.coeff(im,im); + const Scalar r = shiftInfo.coeff(0) - Tmm; + const Scalar s = shiftInfo.coeff(1) - Tmm; + v.coeffRef(0) = (r * s - shiftInfo.coeff(2)) / m_matT.coeff(im+1,im) + m_matT.coeff(im,im+1); + v.coeffRef(1) = m_matT.coeff(im+1,im+1) - Tmm - r - s; + v.coeffRef(2) = m_matT.coeff(im+2,im+1); + if (im == il) { + break; + } + const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2))); + const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1))); + if (abs(lhs) < NumTraits::epsilon() * rhs) + break; + } +} + +/** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */ +template +inline void RealSchur::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace) +{ + eigen_assert(im >= il); + eigen_assert(im <= iu-2); + + const Index size = m_matT.cols(); + + for (Index k = im; k <= iu-2; ++k) + { + bool firstIteration = (k == im); + + Vector3s v; + if (firstIteration) + v = firstHouseholderVector; + else + v = m_matT.template block<3,1>(k,k-1); + + Scalar tau, beta; + Matrix ess; + v.makeHouseholder(ess, tau, beta); + + if (beta != Scalar(0)) // if v is not zero + { + if (firstIteration && k > il) + m_matT.coeffRef(k,k-1) = -m_matT.coeff(k,k-1); + else if (!firstIteration) + m_matT.coeffRef(k,k-1) = beta; + + // These Householder transformations form the O(n^3) part of the algorithm + m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace); + m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace); + if (computeU) + m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace); + } + } + + Matrix v = m_matT.template block<2,1>(iu-1, iu-2); + Scalar tau, beta; + Matrix ess; + v.makeHouseholder(ess, tau, beta); + + if (beta != Scalar(0)) // if v is not zero + { + m_matT.coeffRef(iu-1, iu-2) = beta; + m_matT.block(iu-1, iu-1, 2, size-iu+1).applyHouseholderOnTheLeft(ess, tau, workspace); + m_matT.block(0, iu-1, iu+1, 2).applyHouseholderOnTheRight(ess, tau, workspace); + if (computeU) + m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace); + } + + // clean up pollution due to round-off errors + for (Index i = im+2; i <= iu; ++i) + { + m_matT.coeffRef(i,i-2) = Scalar(0); + if (i > im+2) + m_matT.coeffRef(i,i-3) = Scalar(0); + } +} + +} // end namespace Eigen + +#endif // EIGEN_REAL_SCHUR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h new file mode 100644 index 000000000..2c2251715 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h @@ -0,0 +1,77 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to LAPACKe + * Real Schur needed to real unsymmetrical eigenvalues/eigenvectors. + ******************************************************************************** +*/ + +#ifndef EIGEN_REAL_SCHUR_LAPACKE_H +#define EIGEN_REAL_SCHUR_LAPACKE_H + +namespace Eigen { + +/** \internal Specialization for the data types supported by LAPACKe */ + +#define EIGEN_LAPACKE_SCHUR_REAL(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, LAPACKE_PREFIX_U, EIGCOLROW, LAPACKE_COLROW) \ +template<> template inline \ +RealSchur >& \ +RealSchur >::compute(const EigenBase& matrix, bool computeU) \ +{ \ + eigen_assert(matrix.cols() == matrix.rows()); \ +\ + lapack_int n = internal::convert_index(matrix.cols()), sdim, info; \ + lapack_int matrix_order = LAPACKE_COLROW; \ + char jobvs, sort='N'; \ + LAPACK_##LAPACKE_PREFIX_U##_SELECT2 select = 0; \ + jobvs = (computeU) ? 'V' : 'N'; \ + m_matU.resize(n, n); \ + lapack_int ldvs = internal::convert_index(m_matU.outerStride()); \ + m_matT = matrix; \ + lapack_int lda = internal::convert_index(m_matT.outerStride()); \ + Matrix wr, wi; \ + wr.resize(n, 1); wi.resize(n, 1); \ + info = LAPACKE_##LAPACKE_PREFIX##gees( matrix_order, jobvs, sort, select, n, (LAPACKE_TYPE*)m_matT.data(), lda, &sdim, (LAPACKE_TYPE*)wr.data(), (LAPACKE_TYPE*)wi.data(), (LAPACKE_TYPE*)m_matU.data(), ldvs ); \ + if(info == 0) \ + m_info = Success; \ + else \ + m_info = NoConvergence; \ +\ + m_isInitialized = true; \ + m_matUisUptodate = computeU; \ + return *this; \ +\ +} + +EIGEN_LAPACKE_SCHUR_REAL(double, double, d, D, ColMajor, LAPACK_COL_MAJOR) +EIGEN_LAPACKE_SCHUR_REAL(float, float, s, S, ColMajor, LAPACK_COL_MAJOR) +EIGEN_LAPACKE_SCHUR_REAL(double, double, d, D, RowMajor, LAPACK_ROW_MAJOR) +EIGEN_LAPACKE_SCHUR_REAL(float, float, s, S, RowMajor, LAPACK_ROW_MAJOR) + +} // end namespace Eigen + +#endif // EIGEN_REAL_SCHUR_LAPACKE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h new file mode 100644 index 000000000..9ddd553f2 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h @@ -0,0 +1,870 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2010 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SELFADJOINTEIGENSOLVER_H +#define EIGEN_SELFADJOINTEIGENSOLVER_H + +#include "./Tridiagonalization.h" + +namespace Eigen { + +template +class GeneralizedSelfAdjointEigenSolver; + +namespace internal { +template struct direct_selfadjoint_eigenvalues; +template +ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec); +} + +/** \eigenvalues_module \ingroup Eigenvalues_Module + * + * + * \class SelfAdjointEigenSolver + * + * \brief Computes eigenvalues and eigenvectors of selfadjoint matrices + * + * \tparam _MatrixType the type of the matrix of which we are computing the + * eigendecomposition; this is expected to be an instantiation of the Matrix + * class template. + * + * A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real + * matrices, this means that the matrix is symmetric: it equals its + * transpose. This class computes the eigenvalues and eigenvectors of a + * selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors + * \f$ v \f$ such that \f$ Av = \lambda v \f$. The eigenvalues of a + * selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with + * the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the + * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint + * matrices, the matrix \f$ V \f$ is always invertible). This is called the + * eigendecomposition. + * + * The algorithm exploits the fact that the matrix is selfadjoint, making it + * faster and more accurate than the general purpose eigenvalue algorithms + * implemented in EigenSolver and ComplexEigenSolver. + * + * Only the \b lower \b triangular \b part of the input matrix is referenced. + * + * Call the function compute() to compute the eigenvalues and eigenvectors of + * a given matrix. Alternatively, you can use the + * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes + * the eigenvalues and eigenvectors at construction time. Once the eigenvalue + * and eigenvectors are computed, they can be retrieved with the eigenvalues() + * and eigenvectors() functions. + * + * The documentation for SelfAdjointEigenSolver(const MatrixType&, int) + * contains an example of the typical use of this class. + * + * To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and + * the likes, see the class GeneralizedSelfAdjointEigenSolver. + * + * \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver + */ +template class SelfAdjointEigenSolver +{ + public: + + typedef _MatrixType MatrixType; + enum { + Size = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + Options = MatrixType::Options, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + /** \brief Scalar type for matrices of type \p _MatrixType. */ + typedef typename MatrixType::Scalar Scalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + + typedef Matrix EigenvectorsType; + + /** \brief Real scalar type for \p _MatrixType. + * + * This is just \c Scalar if #Scalar is real (e.g., \c float or + * \c double), and the type of the real part of \c Scalar if #Scalar is + * complex. + */ + typedef typename NumTraits::Real RealScalar; + + friend struct internal::direct_selfadjoint_eigenvalues::IsComplex>; + + /** \brief Type for vector of eigenvalues as returned by eigenvalues(). + * + * This is a column vector with entries of type #RealScalar. + * The length of the vector is the size of \p _MatrixType. + */ + typedef typename internal::plain_col_type::type RealVectorType; + typedef Tridiagonalization TridiagonalizationType; + typedef typename TridiagonalizationType::SubDiagonalType SubDiagonalType; + + /** \brief Default constructor for fixed-size matrices. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via compute(). This constructor + * can only be used if \p _MatrixType is a fixed-size matrix; use + * SelfAdjointEigenSolver(Index) for dynamic-size matrices. + * + * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp + * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out + */ + EIGEN_DEVICE_FUNC + SelfAdjointEigenSolver() + : m_eivec(), + m_eivalues(), + m_subdiag(), + m_isInitialized(false) + { } + + /** \brief Constructor, pre-allocates memory for dynamic-size matrices. + * + * \param [in] size Positive integer, size of the matrix whose + * eigenvalues and eigenvectors will be computed. + * + * This constructor is useful for dynamic-size matrices, when the user + * intends to perform decompositions via compute(). The \p size + * parameter is only used as a hint. It is not an error to give a wrong + * \p size, but it may impair performance. + * + * \sa compute() for an example + */ + EIGEN_DEVICE_FUNC + explicit SelfAdjointEigenSolver(Index size) + : m_eivec(size, size), + m_eivalues(size), + m_subdiag(size > 1 ? size - 1 : 1), + m_isInitialized(false) + {} + + /** \brief Constructor; computes eigendecomposition of given matrix. + * + * \param[in] matrix Selfadjoint matrix whose eigendecomposition is to + * be computed. Only the lower triangular part of the matrix is referenced. + * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. + * + * This constructor calls compute(const MatrixType&, int) to compute the + * eigenvalues of the matrix \p matrix. The eigenvectors are computed if + * \p options equals #ComputeEigenvectors. + * + * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp + * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out + * + * \sa compute(const MatrixType&, int) + */ + template + EIGEN_DEVICE_FUNC + explicit SelfAdjointEigenSolver(const EigenBase& matrix, int options = ComputeEigenvectors) + : m_eivec(matrix.rows(), matrix.cols()), + m_eivalues(matrix.cols()), + m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1), + m_isInitialized(false) + { + compute(matrix.derived(), options); + } + + /** \brief Computes eigendecomposition of given matrix. + * + * \param[in] matrix Selfadjoint matrix whose eigendecomposition is to + * be computed. Only the lower triangular part of the matrix is referenced. + * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. + * \returns Reference to \c *this + * + * This function computes the eigenvalues of \p matrix. The eigenvalues() + * function can be used to retrieve them. If \p options equals #ComputeEigenvectors, + * then the eigenvectors are also computed and can be retrieved by + * calling eigenvectors(). + * + * This implementation uses a symmetric QR algorithm. The matrix is first + * reduced to tridiagonal form using the Tridiagonalization class. The + * tridiagonal matrix is then brought to diagonal form with implicit + * symmetric QR steps with Wilkinson shift. Details can be found in + * Section 8.3 of Golub \& Van Loan, %Matrix Computations. + * + * The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors + * are required and \f$ 4n^3/3 \f$ if they are not required. + * + * This method reuses the memory in the SelfAdjointEigenSolver object that + * was allocated when the object was constructed, if the size of the + * matrix does not change. + * + * Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp + * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out + * + * \sa SelfAdjointEigenSolver(const MatrixType&, int) + */ + template + EIGEN_DEVICE_FUNC + SelfAdjointEigenSolver& compute(const EigenBase& matrix, int options = ComputeEigenvectors); + + /** \brief Computes eigendecomposition of given matrix using a closed-form algorithm + * + * This is a variant of compute(const MatrixType&, int options) which + * directly solves the underlying polynomial equation. + * + * Currently only 2x2 and 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d). + * + * This method is usually significantly faster than the QR iterative algorithm + * but it might also be less accurate. It is also worth noting that + * for 3x3 matrices it involves trigonometric operations which are + * not necessarily available for all scalar types. + * + * For the 3x3 case, we observed the following worst case relative error regarding the eigenvalues: + * - double: 1e-8 + * - float: 1e-3 + * + * \sa compute(const MatrixType&, int options) + */ + EIGEN_DEVICE_FUNC + SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors); + + /** + *\brief Computes the eigen decomposition from a tridiagonal symmetric matrix + * + * \param[in] diag The vector containing the diagonal of the matrix. + * \param[in] subdiag The subdiagonal of the matrix. + * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. + * \returns Reference to \c *this + * + * This function assumes that the matrix has been reduced to tridiagonal form. + * + * \sa compute(const MatrixType&, int) for more information + */ + SelfAdjointEigenSolver& computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options=ComputeEigenvectors); + + /** \brief Returns the eigenvectors of given matrix. + * + * \returns A const reference to the matrix whose columns are the eigenvectors. + * + * \pre The eigenvectors have been computed before. + * + * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding + * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The + * eigenvectors are normalized to have (Euclidean) norm equal to one. If + * this object was used to solve the eigenproblem for the selfadjoint + * matrix \f$ A \f$, then the matrix returned by this function is the + * matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$. + * + * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp + * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out + * + * \sa eigenvalues() + */ + EIGEN_DEVICE_FUNC + const EigenvectorsType& eigenvectors() const + { + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + return m_eivec; + } + + /** \brief Returns the eigenvalues of given matrix. + * + * \returns A const reference to the column vector containing the eigenvalues. + * + * \pre The eigenvalues have been computed before. + * + * The eigenvalues are repeated according to their algebraic multiplicity, + * so there are as many eigenvalues as rows in the matrix. The eigenvalues + * are sorted in increasing order. + * + * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp + * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out + * + * \sa eigenvectors(), MatrixBase::eigenvalues() + */ + EIGEN_DEVICE_FUNC + const RealVectorType& eigenvalues() const + { + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + return m_eivalues; + } + + /** \brief Computes the positive-definite square root of the matrix. + * + * \returns the positive-definite square root of the matrix + * + * \pre The eigenvalues and eigenvectors of a positive-definite matrix + * have been computed before. + * + * The square root of a positive-definite matrix \f$ A \f$ is the + * positive-definite matrix whose square equals \f$ A \f$. This function + * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the + * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$. + * + * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp + * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out + * + * \sa operatorInverseSqrt(), MatrixFunctions Module + */ + EIGEN_DEVICE_FUNC + MatrixType operatorSqrt() const + { + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint(); + } + + /** \brief Computes the inverse square root of the matrix. + * + * \returns the inverse positive-definite square root of the matrix + * + * \pre The eigenvalues and eigenvectors of a positive-definite matrix + * have been computed before. + * + * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to + * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is + * cheaper than first computing the square root with operatorSqrt() and + * then its inverse with MatrixBase::inverse(). + * + * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp + * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out + * + * \sa operatorSqrt(), MatrixBase::inverse(), MatrixFunctions Module + */ + EIGEN_DEVICE_FUNC + MatrixType operatorInverseSqrt() const + { + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint(); + } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, \c NoConvergence otherwise. + */ + EIGEN_DEVICE_FUNC + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + return m_info; + } + + /** \brief Maximum number of iterations. + * + * The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n + * denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK). + */ + static const int m_maxIterations = 30; + + protected: + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + EigenvectorsType m_eivec; + RealVectorType m_eivalues; + typename TridiagonalizationType::SubDiagonalType m_subdiag; + ComputationInfo m_info; + bool m_isInitialized; + bool m_eigenvectorsOk; +}; + +namespace internal { +/** \internal + * + * \eigenvalues_module \ingroup Eigenvalues_Module + * + * Performs a QR step on a tridiagonal symmetric matrix represented as a + * pair of two vectors \a diag and \a subdiag. + * + * \param diag the diagonal part of the input selfadjoint tridiagonal matrix + * \param subdiag the sub-diagonal part of the input selfadjoint tridiagonal matrix + * \param start starting index of the submatrix to work on + * \param end last+1 index of the submatrix to work on + * \param matrixQ pointer to the column-major matrix holding the eigenvectors, can be 0 + * \param n size of the input matrix + * + * For compilation efficiency reasons, this procedure does not use eigen expression + * for its arguments. + * + * Implemented from Golub's "Matrix Computations", algorithm 8.3.2: + * "implicit symmetric QR step with Wilkinson shift" + */ +template +EIGEN_DEVICE_FUNC +static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n); +} + +template +template +EIGEN_DEVICE_FUNC +SelfAdjointEigenSolver& SelfAdjointEigenSolver +::compute(const EigenBase& a_matrix, int options) +{ + check_template_parameters(); + + const InputType &matrix(a_matrix.derived()); + + using std::abs; + eigen_assert(matrix.cols() == matrix.rows()); + eigen_assert((options&~(EigVecMask|GenEigMask))==0 + && (options&EigVecMask)!=EigVecMask + && "invalid option parameter"); + bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; + Index n = matrix.cols(); + m_eivalues.resize(n,1); + + if(n==1) + { + m_eivec = matrix; + m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0)); + if(computeEigenvectors) + m_eivec.setOnes(n,n); + m_info = Success; + m_isInitialized = true; + m_eigenvectorsOk = computeEigenvectors; + return *this; + } + + // declare some aliases + RealVectorType& diag = m_eivalues; + EigenvectorsType& mat = m_eivec; + + // map the matrix coefficients to [-1:1] to avoid over- and underflow. + mat = matrix.template triangularView(); + RealScalar scale = mat.cwiseAbs().maxCoeff(); + if(scale==RealScalar(0)) scale = RealScalar(1); + mat.template triangularView() /= scale; + m_subdiag.resize(n-1); + internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors); + + m_info = internal::computeFromTridiagonal_impl(diag, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec); + + // scale back the eigen values + m_eivalues *= scale; + + m_isInitialized = true; + m_eigenvectorsOk = computeEigenvectors; + return *this; +} + +template +SelfAdjointEigenSolver& SelfAdjointEigenSolver +::computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options) +{ + //TODO : Add an option to scale the values beforehand + bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; + + m_eivalues = diag; + m_subdiag = subdiag; + if (computeEigenvectors) + { + m_eivec.setIdentity(diag.size(), diag.size()); + } + m_info = internal::computeFromTridiagonal_impl(m_eivalues, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec); + + m_isInitialized = true; + m_eigenvectorsOk = computeEigenvectors; + return *this; +} + +namespace internal { +/** + * \internal + * \brief Compute the eigendecomposition from a tridiagonal matrix + * + * \param[in,out] diag : On input, the diagonal of the matrix, on output the eigenvalues + * \param[in,out] subdiag : The subdiagonal part of the matrix (entries are modified during the decomposition) + * \param[in] maxIterations : the maximum number of iterations + * \param[in] computeEigenvectors : whether the eigenvectors have to be computed or not + * \param[out] eivec : The matrix to store the eigenvectors if computeEigenvectors==true. Must be allocated on input. + * \returns \c Success or \c NoConvergence + */ +template +ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec) +{ + using std::abs; + + ComputationInfo info; + typedef typename MatrixType::Scalar Scalar; + + Index n = diag.size(); + Index end = n-1; + Index start = 0; + Index iter = 0; // total number of iterations + + typedef typename DiagType::RealScalar RealScalar; + const RealScalar considerAsZero = (std::numeric_limits::min)(); + const RealScalar precision = RealScalar(2)*NumTraits::epsilon(); + + while (end>0) + { + for (Index i = start; i0 && subdiag[end-1]==RealScalar(0)) + { + end--; + } + if (end<=0) + break; + + // if we spent too many iterations, we give up + iter++; + if(iter > maxIterations * n) break; + + start = end - 1; + while (start>0 && subdiag[start-1]!=0) + start--; + + internal::tridiagonal_qr_step(diag.data(), subdiag.data(), start, end, computeEigenvectors ? eivec.data() : (Scalar*)0, n); + } + if (iter <= maxIterations * n) + info = Success; + else + info = NoConvergence; + + // Sort eigenvalues and corresponding vectors. + // TODO make the sort optional ? + // TODO use a better sort algorithm !! + if (info == Success) + { + for (Index i = 0; i < n-1; ++i) + { + Index k; + diag.segment(i,n-i).minCoeff(&k); + if (k > 0) + { + std::swap(diag[i], diag[k+i]); + if(computeEigenvectors) + eivec.col(i).swap(eivec.col(k+i)); + } + } + } + return info; +} + +template struct direct_selfadjoint_eigenvalues +{ + EIGEN_DEVICE_FUNC + static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options) + { eig.compute(A,options); } +}; + +template struct direct_selfadjoint_eigenvalues +{ + typedef typename SolverType::MatrixType MatrixType; + typedef typename SolverType::RealVectorType VectorType; + typedef typename SolverType::Scalar Scalar; + typedef typename SolverType::EigenvectorsType EigenvectorsType; + + + /** \internal + * Computes the roots of the characteristic polynomial of \a m. + * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized. + */ + EIGEN_DEVICE_FUNC + static inline void computeRoots(const MatrixType& m, VectorType& roots) + { + EIGEN_USING_STD_MATH(sqrt) + EIGEN_USING_STD_MATH(atan2) + EIGEN_USING_STD_MATH(cos) + EIGEN_USING_STD_MATH(sin) + const Scalar s_inv3 = Scalar(1)/Scalar(3); + const Scalar s_sqrt3 = sqrt(Scalar(3)); + + // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The + // eigenvalues are the roots to this equation, all guaranteed to be + // real-valued, because the matrix is symmetric. + Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0); + Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1); + Scalar c2 = m(0,0) + m(1,1) + m(2,2); + + // Construct the parameters used in classifying the roots of the equation + // and in solving the equation for the roots in closed form. + Scalar c2_over_3 = c2*s_inv3; + Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3; + a_over_3 = numext::maxi(a_over_3, Scalar(0)); + + Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1)); + + Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b; + q = numext::maxi(q, Scalar(0)); + + // Compute the eigenvalues by solving for the roots of the polynomial. + Scalar rho = sqrt(a_over_3); + Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3] + Scalar cos_theta = cos(theta); + Scalar sin_theta = sin(theta); + // roots are already sorted, since cos is monotonically decreasing on [0, pi] + roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3) + roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3) + roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta; + } + + EIGEN_DEVICE_FUNC + static inline bool extract_kernel(MatrixType& mat, Ref res, Ref representative) + { + using std::abs; + Index i0; + // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal): + mat.diagonal().cwiseAbs().maxCoeff(&i0); + // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector, + // so let's save it: + representative = mat.col(i0); + Scalar n0, n1; + VectorType c0, c1; + n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm(); + n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm(); + if(n0>n1) res = c0/std::sqrt(n0); + else res = c1/std::sqrt(n1); + + return true; + } + + EIGEN_DEVICE_FUNC + static inline void run(SolverType& solver, const MatrixType& mat, int options) + { + eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows()); + eigen_assert((options&~(EigVecMask|GenEigMask))==0 + && (options&EigVecMask)!=EigVecMask + && "invalid option parameter"); + bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; + + EigenvectorsType& eivecs = solver.m_eivec; + VectorType& eivals = solver.m_eivalues; + + // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow. + Scalar shift = mat.trace() / Scalar(3); + // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later + MatrixType scaledMat = mat.template selfadjointView(); + scaledMat.diagonal().array() -= shift; + Scalar scale = scaledMat.cwiseAbs().maxCoeff(); + if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations + + // compute the eigenvalues + computeRoots(scaledMat,eivals); + + // compute the eigenvectors + if(computeEigenvectors) + { + if((eivals(2)-eivals(0))<=Eigen::NumTraits::epsilon()) + { + // All three eigenvalues are numerically the same + eivecs.setIdentity(); + } + else + { + MatrixType tmp; + tmp = scaledMat; + + // Compute the eigenvector of the most distinct eigenvalue + Scalar d0 = eivals(2) - eivals(1); + Scalar d1 = eivals(1) - eivals(0); + Index k(0), l(2); + if(d0 > d1) + { + numext::swap(k,l); + d0 = d1; + } + + // Compute the eigenvector of index k + { + tmp.diagonal().array () -= eivals(k); + // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector. + extract_kernel(tmp, eivecs.col(k), eivecs.col(l)); + } + + // Compute eigenvector of index l + if(d0<=2*Eigen::NumTraits::epsilon()*d1) + { + // If d0 is too small, then the two other eigenvalues are numerically the same, + // and thus we only have to ortho-normalize the near orthogonal vector we saved above. + eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l); + eivecs.col(l).normalize(); + } + else + { + tmp = scaledMat; + tmp.diagonal().array () -= eivals(l); + + VectorType dummy; + extract_kernel(tmp, eivecs.col(l), dummy); + } + + // Compute last eigenvector from the other two + eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized(); + } + } + + // Rescale back to the original size. + eivals *= scale; + eivals.array() += shift; + + solver.m_info = Success; + solver.m_isInitialized = true; + solver.m_eigenvectorsOk = computeEigenvectors; + } +}; + +// 2x2 direct eigenvalues decomposition, code from Hauke Heibel +template +struct direct_selfadjoint_eigenvalues +{ + typedef typename SolverType::MatrixType MatrixType; + typedef typename SolverType::RealVectorType VectorType; + typedef typename SolverType::Scalar Scalar; + typedef typename SolverType::EigenvectorsType EigenvectorsType; + + EIGEN_DEVICE_FUNC + static inline void computeRoots(const MatrixType& m, VectorType& roots) + { + using std::sqrt; + const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0))); + const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1)); + roots(0) = t1 - t0; + roots(1) = t1 + t0; + } + + EIGEN_DEVICE_FUNC + static inline void run(SolverType& solver, const MatrixType& mat, int options) + { + EIGEN_USING_STD_MATH(sqrt); + EIGEN_USING_STD_MATH(abs); + + eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows()); + eigen_assert((options&~(EigVecMask|GenEigMask))==0 + && (options&EigVecMask)!=EigVecMask + && "invalid option parameter"); + bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; + + EigenvectorsType& eivecs = solver.m_eivec; + VectorType& eivals = solver.m_eivalues; + + // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow. + Scalar shift = mat.trace() / Scalar(2); + MatrixType scaledMat = mat; + scaledMat.coeffRef(0,1) = mat.coeff(1,0); + scaledMat.diagonal().array() -= shift; + Scalar scale = scaledMat.cwiseAbs().maxCoeff(); + if(scale > Scalar(0)) + scaledMat /= scale; + + // Compute the eigenvalues + computeRoots(scaledMat,eivals); + + // compute the eigen vectors + if(computeEigenvectors) + { + if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits::epsilon()) + { + eivecs.setIdentity(); + } + else + { + scaledMat.diagonal().array () -= eivals(1); + Scalar a2 = numext::abs2(scaledMat(0,0)); + Scalar c2 = numext::abs2(scaledMat(1,1)); + Scalar b2 = numext::abs2(scaledMat(1,0)); + if(a2>c2) + { + eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0); + eivecs.col(1) /= sqrt(a2+b2); + } + else + { + eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0); + eivecs.col(1) /= sqrt(c2+b2); + } + + eivecs.col(0) << eivecs.col(1).unitOrthogonal(); + } + } + + // Rescale back to the original size. + eivals *= scale; + eivals.array() += shift; + + solver.m_info = Success; + solver.m_isInitialized = true; + solver.m_eigenvectorsOk = computeEigenvectors; + } +}; + +} + +template +EIGEN_DEVICE_FUNC +SelfAdjointEigenSolver& SelfAdjointEigenSolver +::computeDirect(const MatrixType& matrix, int options) +{ + internal::direct_selfadjoint_eigenvalues::IsComplex>::run(*this,matrix,options); + return *this; +} + +namespace internal { +template +EIGEN_DEVICE_FUNC +static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n) +{ + using std::abs; + RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5); + RealScalar e = subdiag[end-1]; + // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still + // underflow thus leading to inf/NaN values when using the following commented code: +// RealScalar e2 = numext::abs2(subdiag[end-1]); +// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2)); + // This explain the following, somewhat more complicated, version: + RealScalar mu = diag[end]; + if(td==RealScalar(0)) + mu -= abs(e); + else + { + RealScalar e2 = numext::abs2(subdiag[end-1]); + RealScalar h = numext::hypot(td,e); + if(e2==RealScalar(0)) mu -= (e / (td + (td>RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h); + else mu -= e2 / (td + (td>RealScalar(0) ? h : -h)); + } + + RealScalar x = diag[start] - mu; + RealScalar z = subdiag[start]; + for (Index k = start; k < end; ++k) + { + JacobiRotation rot; + rot.makeGivens(x, z); + + // do T = G' T G + RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k]; + RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1]; + + diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]); + diag[k+1] = rot.s() * sdk + rot.c() * dkp1; + subdiag[k] = rot.c() * sdk - rot.s() * dkp1; + + + if (k > start) + subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z; + + x = subdiag[k]; + + if (k < end - 1) + { + z = -rot.s() * subdiag[k+1]; + subdiag[k + 1] = rot.c() * subdiag[k+1]; + } + + // apply the givens rotation to the unit matrix Q = Q * G + if (matrixQ) + { + // FIXME if StorageOrder == RowMajor this operation is not very efficient + Map > q(matrixQ,n,n); + q.applyOnTheRight(k,k+1,rot); + } + } +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINTEIGENSOLVER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h new file mode 100644 index 000000000..b0c947dc0 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h @@ -0,0 +1,87 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to LAPACKe + * Self-adjoint eigenvalues/eigenvectors. + ******************************************************************************** +*/ + +#ifndef EIGEN_SAEIGENSOLVER_LAPACKE_H +#define EIGEN_SAEIGENSOLVER_LAPACKE_H + +namespace Eigen { + +/** \internal Specialization for the data types supported by LAPACKe */ + +#define EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, EIGCOLROW ) \ +template<> template inline \ +SelfAdjointEigenSolver >& \ +SelfAdjointEigenSolver >::compute(const EigenBase& matrix, int options) \ +{ \ + eigen_assert(matrix.cols() == matrix.rows()); \ + eigen_assert((options&~(EigVecMask|GenEigMask))==0 \ + && (options&EigVecMask)!=EigVecMask \ + && "invalid option parameter"); \ + bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; \ + lapack_int n = internal::convert_index(matrix.cols()), lda, info; \ + m_eivalues.resize(n,1); \ + m_subdiag.resize(n-1); \ + m_eivec = matrix; \ +\ + if(n==1) \ + { \ + m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0)); \ + if(computeEigenvectors) m_eivec.setOnes(n,n); \ + m_info = Success; \ + m_isInitialized = true; \ + m_eigenvectorsOk = computeEigenvectors; \ + return *this; \ + } \ +\ + lda = internal::convert_index(m_eivec.outerStride()); \ + char jobz, uplo='L'/*, range='A'*/; \ + jobz = computeEigenvectors ? 'V' : 'N'; \ +\ + info = LAPACKE_##LAPACKE_NAME( LAPACK_COL_MAJOR, jobz, uplo, n, (LAPACKE_TYPE*)m_eivec.data(), lda, (LAPACKE_RTYPE*)m_eivalues.data() ); \ + m_info = (info==0) ? Success : NoConvergence; \ + m_isInitialized = true; \ + m_eigenvectorsOk = computeEigenvectors; \ + return *this; \ +} + +#define EIGEN_LAPACKE_EIG_SELFADJ(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME ) \ + EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, ColMajor ) \ + EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, RowMajor ) + +EIGEN_LAPACKE_EIG_SELFADJ(double, double, double, dsyev) +EIGEN_LAPACKE_EIG_SELFADJ(float, float, float, ssyev) +EIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev) +EIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float, float, cheev) + +} // end namespace Eigen + +#endif // EIGEN_SAEIGENSOLVER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h new file mode 100644 index 000000000..1d102c17b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h @@ -0,0 +1,556 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2010 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRIDIAGONALIZATION_H +#define EIGEN_TRIDIAGONALIZATION_H + +namespace Eigen { + +namespace internal { + +template struct TridiagonalizationMatrixTReturnType; +template +struct traits > + : public traits +{ + typedef typename MatrixType::PlainObject ReturnType; // FIXME shall it be a BandMatrix? + enum { Flags = 0 }; +}; + +template +void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs); +} + +/** \eigenvalues_module \ingroup Eigenvalues_Module + * + * + * \class Tridiagonalization + * + * \brief Tridiagonal decomposition of a selfadjoint matrix + * + * \tparam _MatrixType the type of the matrix of which we are computing the + * tridiagonal decomposition; this is expected to be an instantiation of the + * Matrix class template. + * + * This class performs a tridiagonal decomposition of a selfadjoint matrix \f$ A \f$ such that: + * \f$ A = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real symmetric tridiagonal matrix. + * + * A tridiagonal matrix is a matrix which has nonzero elements only on the + * main diagonal and the first diagonal below and above it. The Hessenberg + * decomposition of a selfadjoint matrix is in fact a tridiagonal + * decomposition. This class is used in SelfAdjointEigenSolver to compute the + * eigenvalues and eigenvectors of a selfadjoint matrix. + * + * Call the function compute() to compute the tridiagonal decomposition of a + * given matrix. Alternatively, you can use the Tridiagonalization(const MatrixType&) + * constructor which computes the tridiagonal Schur decomposition at + * construction time. Once the decomposition is computed, you can use the + * matrixQ() and matrixT() functions to retrieve the matrices Q and T in the + * decomposition. + * + * The documentation of Tridiagonalization(const MatrixType&) contains an + * example of the typical use of this class. + * + * \sa class HessenbergDecomposition, class SelfAdjointEigenSolver + */ +template class Tridiagonalization +{ + public: + + /** \brief Synonym for the template parameter \p _MatrixType. */ + typedef _MatrixType MatrixType; + + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + + enum { + Size = MatrixType::RowsAtCompileTime, + SizeMinusOne = Size == Dynamic ? Dynamic : (Size > 1 ? Size - 1 : 1), + Options = MatrixType::Options, + MaxSize = MatrixType::MaxRowsAtCompileTime, + MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : (MaxSize > 1 ? MaxSize - 1 : 1) + }; + + typedef Matrix CoeffVectorType; + typedef typename internal::plain_col_type::type DiagonalType; + typedef Matrix SubDiagonalType; + typedef typename internal::remove_all::type MatrixTypeRealView; + typedef internal::TridiagonalizationMatrixTReturnType MatrixTReturnType; + + typedef typename internal::conditional::IsComplex, + typename internal::add_const_on_value_type::RealReturnType>::type, + const Diagonal + >::type DiagonalReturnType; + + typedef typename internal::conditional::IsComplex, + typename internal::add_const_on_value_type::RealReturnType>::type, + const Diagonal + >::type SubDiagonalReturnType; + + /** \brief Return type of matrixQ() */ + typedef HouseholderSequence::type> HouseholderSequenceType; + + /** \brief Default constructor. + * + * \param [in] size Positive integer, size of the matrix whose tridiagonal + * decomposition will be computed. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via compute(). The \p size parameter is only + * used as a hint. It is not an error to give a wrong \p size, but it may + * impair performance. + * + * \sa compute() for an example. + */ + explicit Tridiagonalization(Index size = Size==Dynamic ? 2 : Size) + : m_matrix(size,size), + m_hCoeffs(size > 1 ? size-1 : 1), + m_isInitialized(false) + {} + + /** \brief Constructor; computes tridiagonal decomposition of given matrix. + * + * \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition + * is to be computed. + * + * This constructor calls compute() to compute the tridiagonal decomposition. + * + * Example: \include Tridiagonalization_Tridiagonalization_MatrixType.cpp + * Output: \verbinclude Tridiagonalization_Tridiagonalization_MatrixType.out + */ + template + explicit Tridiagonalization(const EigenBase& matrix) + : m_matrix(matrix.derived()), + m_hCoeffs(matrix.cols() > 1 ? matrix.cols()-1 : 1), + m_isInitialized(false) + { + internal::tridiagonalization_inplace(m_matrix, m_hCoeffs); + m_isInitialized = true; + } + + /** \brief Computes tridiagonal decomposition of given matrix. + * + * \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition + * is to be computed. + * \returns Reference to \c *this + * + * The tridiagonal decomposition is computed by bringing the columns of + * the matrix successively in the required form using Householder + * reflections. The cost is \f$ 4n^3/3 \f$ flops, where \f$ n \f$ denotes + * the size of the given matrix. + * + * This method reuses of the allocated data in the Tridiagonalization + * object, if the size of the matrix does not change. + * + * Example: \include Tridiagonalization_compute.cpp + * Output: \verbinclude Tridiagonalization_compute.out + */ + template + Tridiagonalization& compute(const EigenBase& matrix) + { + m_matrix = matrix.derived(); + m_hCoeffs.resize(matrix.rows()-1, 1); + internal::tridiagonalization_inplace(m_matrix, m_hCoeffs); + m_isInitialized = true; + return *this; + } + + /** \brief Returns the Householder coefficients. + * + * \returns a const reference to the vector of Householder coefficients + * + * \pre Either the constructor Tridiagonalization(const MatrixType&) or + * the member function compute(const MatrixType&) has been called before + * to compute the tridiagonal decomposition of a matrix. + * + * The Householder coefficients allow the reconstruction of the matrix + * \f$ Q \f$ in the tridiagonal decomposition from the packed data. + * + * Example: \include Tridiagonalization_householderCoefficients.cpp + * Output: \verbinclude Tridiagonalization_householderCoefficients.out + * + * \sa packedMatrix(), \ref Householder_Module "Householder module" + */ + inline CoeffVectorType householderCoefficients() const + { + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); + return m_hCoeffs; + } + + /** \brief Returns the internal representation of the decomposition + * + * \returns a const reference to a matrix with the internal representation + * of the decomposition. + * + * \pre Either the constructor Tridiagonalization(const MatrixType&) or + * the member function compute(const MatrixType&) has been called before + * to compute the tridiagonal decomposition of a matrix. + * + * The returned matrix contains the following information: + * - the strict upper triangular part is equal to the input matrix A. + * - the diagonal and lower sub-diagonal represent the real tridiagonal + * symmetric matrix T. + * - the rest of the lower part contains the Householder vectors that, + * combined with Householder coefficients returned by + * householderCoefficients(), allows to reconstruct the matrix Q as + * \f$ Q = H_{N-1} \ldots H_1 H_0 \f$. + * Here, the matrices \f$ H_i \f$ are the Householder transformations + * \f$ H_i = (I - h_i v_i v_i^T) \f$ + * where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and + * \f$ v_i \f$ is the Householder vector defined by + * \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$ + * with M the matrix returned by this function. + * + * See LAPACK for further details on this packed storage. + * + * Example: \include Tridiagonalization_packedMatrix.cpp + * Output: \verbinclude Tridiagonalization_packedMatrix.out + * + * \sa householderCoefficients() + */ + inline const MatrixType& packedMatrix() const + { + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); + return m_matrix; + } + + /** \brief Returns the unitary matrix Q in the decomposition + * + * \returns object representing the matrix Q + * + * \pre Either the constructor Tridiagonalization(const MatrixType&) or + * the member function compute(const MatrixType&) has been called before + * to compute the tridiagonal decomposition of a matrix. + * + * This function returns a light-weight object of template class + * HouseholderSequence. You can either apply it directly to a matrix or + * you can convert it to a matrix of type #MatrixType. + * + * \sa Tridiagonalization(const MatrixType&) for an example, + * matrixT(), class HouseholderSequence + */ + HouseholderSequenceType matrixQ() const + { + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); + return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate()) + .setLength(m_matrix.rows() - 1) + .setShift(1); + } + + /** \brief Returns an expression of the tridiagonal matrix T in the decomposition + * + * \returns expression object representing the matrix T + * + * \pre Either the constructor Tridiagonalization(const MatrixType&) or + * the member function compute(const MatrixType&) has been called before + * to compute the tridiagonal decomposition of a matrix. + * + * Currently, this function can be used to extract the matrix T from internal + * data and copy it to a dense matrix object. In most cases, it may be + * sufficient to directly use the packed matrix or the vector expressions + * returned by diagonal() and subDiagonal() instead of creating a new + * dense copy matrix with this function. + * + * \sa Tridiagonalization(const MatrixType&) for an example, + * matrixQ(), packedMatrix(), diagonal(), subDiagonal() + */ + MatrixTReturnType matrixT() const + { + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); + return MatrixTReturnType(m_matrix.real()); + } + + /** \brief Returns the diagonal of the tridiagonal matrix T in the decomposition. + * + * \returns expression representing the diagonal of T + * + * \pre Either the constructor Tridiagonalization(const MatrixType&) or + * the member function compute(const MatrixType&) has been called before + * to compute the tridiagonal decomposition of a matrix. + * + * Example: \include Tridiagonalization_diagonal.cpp + * Output: \verbinclude Tridiagonalization_diagonal.out + * + * \sa matrixT(), subDiagonal() + */ + DiagonalReturnType diagonal() const; + + /** \brief Returns the subdiagonal of the tridiagonal matrix T in the decomposition. + * + * \returns expression representing the subdiagonal of T + * + * \pre Either the constructor Tridiagonalization(const MatrixType&) or + * the member function compute(const MatrixType&) has been called before + * to compute the tridiagonal decomposition of a matrix. + * + * \sa diagonal() for an example, matrixT() + */ + SubDiagonalReturnType subDiagonal() const; + + protected: + + MatrixType m_matrix; + CoeffVectorType m_hCoeffs; + bool m_isInitialized; +}; + +template +typename Tridiagonalization::DiagonalReturnType +Tridiagonalization::diagonal() const +{ + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); + return m_matrix.diagonal().real(); +} + +template +typename Tridiagonalization::SubDiagonalReturnType +Tridiagonalization::subDiagonal() const +{ + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); + return m_matrix.template diagonal<-1>().real(); +} + +namespace internal { + +/** \internal + * Performs a tridiagonal decomposition of the selfadjoint matrix \a matA in-place. + * + * \param[in,out] matA On input the selfadjoint matrix. Only the \b lower triangular part is referenced. + * On output, the strict upper part is left unchanged, and the lower triangular part + * represents the T and Q matrices in packed format has detailed below. + * \param[out] hCoeffs returned Householder coefficients (see below) + * + * On output, the tridiagonal selfadjoint matrix T is stored in the diagonal + * and lower sub-diagonal of the matrix \a matA. + * The unitary matrix Q is represented in a compact way as a product of + * Householder reflectors \f$ H_i \f$ such that: + * \f$ Q = H_{N-1} \ldots H_1 H_0 \f$. + * The Householder reflectors are defined as + * \f$ H_i = (I - h_i v_i v_i^T) \f$ + * where \f$ h_i = hCoeffs[i]\f$ is the \f$ i \f$th Householder coefficient and + * \f$ v_i \f$ is the Householder vector defined by + * \f$ v_i = [ 0, \ldots, 0, 1, matA(i+2,i), \ldots, matA(N-1,i) ]^T \f$. + * + * Implemented from Golub's "Matrix Computations", algorithm 8.3.1. + * + * \sa Tridiagonalization::packedMatrix() + */ +template +void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs) +{ + using numext::conj; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + Index n = matA.rows(); + eigen_assert(n==matA.cols()); + eigen_assert(n==hCoeffs.size()+1 || n==1); + + for (Index i = 0; i() + * (conj(h) * matA.col(i).tail(remainingSize))); + + hCoeffs.tail(n-i-1) += (conj(h)*RealScalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1); + + matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView() + .rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), Scalar(-1)); + + matA.col(i).coeffRef(i+1) = beta; + hCoeffs.coeffRef(i) = h; + } +} + +// forward declaration, implementation at the end of this file +template::IsComplex> +struct tridiagonalization_inplace_selector; + +/** \brief Performs a full tridiagonalization in place + * + * \param[in,out] mat On input, the selfadjoint matrix whose tridiagonal + * decomposition is to be computed. Only the lower triangular part referenced. + * The rest is left unchanged. On output, the orthogonal matrix Q + * in the decomposition if \p extractQ is true. + * \param[out] diag The diagonal of the tridiagonal matrix T in the + * decomposition. + * \param[out] subdiag The subdiagonal of the tridiagonal matrix T in + * the decomposition. + * \param[in] extractQ If true, the orthogonal matrix Q in the + * decomposition is computed and stored in \p mat. + * + * Computes the tridiagonal decomposition of the selfadjoint matrix \p mat in place + * such that \f$ mat = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real + * symmetric tridiagonal matrix. + * + * The tridiagonal matrix T is passed to the output parameters \p diag and \p subdiag. If + * \p extractQ is true, then the orthogonal matrix Q is passed to \p mat. Otherwise the lower + * part of the matrix \p mat is destroyed. + * + * The vectors \p diag and \p subdiag are not resized. The function + * assumes that they are already of the correct size. The length of the + * vector \p diag should equal the number of rows in \p mat, and the + * length of the vector \p subdiag should be one left. + * + * This implementation contains an optimized path for 3-by-3 matrices + * which is especially useful for plane fitting. + * + * \note Currently, it requires two temporary vectors to hold the intermediate + * Householder coefficients, and to reconstruct the matrix Q from the Householder + * reflectors. + * + * Example (this uses the same matrix as the example in + * Tridiagonalization::Tridiagonalization(const MatrixType&)): + * \include Tridiagonalization_decomposeInPlace.cpp + * Output: \verbinclude Tridiagonalization_decomposeInPlace.out + * + * \sa class Tridiagonalization + */ +template +void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) +{ + eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1); + tridiagonalization_inplace_selector::run(mat, diag, subdiag, extractQ); +} + +/** \internal + * General full tridiagonalization + */ +template +struct tridiagonalization_inplace_selector +{ + typedef typename Tridiagonalization::CoeffVectorType CoeffVectorType; + typedef typename Tridiagonalization::HouseholderSequenceType HouseholderSequenceType; + template + static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) + { + CoeffVectorType hCoeffs(mat.cols()-1); + tridiagonalization_inplace(mat,hCoeffs); + diag = mat.diagonal().real(); + subdiag = mat.template diagonal<-1>().real(); + if(extractQ) + mat = HouseholderSequenceType(mat, hCoeffs.conjugate()) + .setLength(mat.rows() - 1) + .setShift(1); + } +}; + +/** \internal + * Specialization for 3x3 real matrices. + * Especially useful for plane fitting. + */ +template +struct tridiagonalization_inplace_selector +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + + template + static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) + { + using std::sqrt; + const RealScalar tol = (std::numeric_limits::min)(); + diag[0] = mat(0,0); + RealScalar v1norm2 = numext::abs2(mat(2,0)); + if(v1norm2 <= tol) + { + diag[1] = mat(1,1); + diag[2] = mat(2,2); + subdiag[0] = mat(1,0); + subdiag[1] = mat(2,1); + if (extractQ) + mat.setIdentity(); + } + else + { + RealScalar beta = sqrt(numext::abs2(mat(1,0)) + v1norm2); + RealScalar invBeta = RealScalar(1)/beta; + Scalar m01 = mat(1,0) * invBeta; + Scalar m02 = mat(2,0) * invBeta; + Scalar q = RealScalar(2)*m01*mat(2,1) + m02*(mat(2,2) - mat(1,1)); + diag[1] = mat(1,1) + m02*q; + diag[2] = mat(2,2) - m02*q; + subdiag[0] = beta; + subdiag[1] = mat(2,1) - m01 * q; + if (extractQ) + { + mat << 1, 0, 0, + 0, m01, m02, + 0, m02, -m01; + } + } + } +}; + +/** \internal + * Trivial specialization for 1x1 matrices + */ +template +struct tridiagonalization_inplace_selector +{ + typedef typename MatrixType::Scalar Scalar; + + template + static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ) + { + diag(0,0) = numext::real(mat(0,0)); + if(extractQ) + mat(0,0) = Scalar(1); + } +}; + +/** \internal + * \eigenvalues_module \ingroup Eigenvalues_Module + * + * \brief Expression type for return value of Tridiagonalization::matrixT() + * + * \tparam MatrixType type of underlying dense matrix + */ +template struct TridiagonalizationMatrixTReturnType +: public ReturnByValue > +{ + public: + /** \brief Constructor. + * + * \param[in] mat The underlying dense matrix + */ + TridiagonalizationMatrixTReturnType(const MatrixType& mat) : m_matrix(mat) { } + + template + inline void evalTo(ResultType& result) const + { + result.setZero(); + result.template diagonal<1>() = m_matrix.template diagonal<-1>().conjugate(); + result.diagonal() = m_matrix.diagonal(); + result.template diagonal<-1>() = m_matrix.template diagonal<-1>(); + } + + Index rows() const { return m_matrix.rows(); } + Index cols() const { return m_matrix.cols(); } + + protected: + typename MatrixType::Nested m_matrix; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIDIAGONALIZATION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/AlignedBox.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/AlignedBox.h new file mode 100644 index 000000000..066eae4f9 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/AlignedBox.h @@ -0,0 +1,392 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ALIGNEDBOX_H +#define EIGEN_ALIGNEDBOX_H + +namespace Eigen { + +/** \geometry_module \ingroup Geometry_Module + * + * + * \class AlignedBox + * + * \brief An axis aligned box + * + * \tparam _Scalar the type of the scalar coefficients + * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. + * + * This class represents an axis aligned box as a pair of the minimal and maximal corners. + * \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty(). + * \sa alignedboxtypedefs + */ +template +class AlignedBox +{ +public: +EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) + enum { AmbientDimAtCompileTime = _AmbientDim }; + typedef _Scalar Scalar; + typedef NumTraits ScalarTraits; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + typedef typename ScalarTraits::Real RealScalar; + typedef typename ScalarTraits::NonInteger NonInteger; + typedef Matrix VectorType; + typedef CwiseBinaryOp, const VectorType, const VectorType> VectorTypeSum; + + /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */ + enum CornerType + { + /** 1D names @{ */ + Min=0, Max=1, + /** @} */ + + /** Identifier for 2D corner @{ */ + BottomLeft=0, BottomRight=1, + TopLeft=2, TopRight=3, + /** @} */ + + /** Identifier for 3D corner @{ */ + BottomLeftFloor=0, BottomRightFloor=1, + TopLeftFloor=2, TopRightFloor=3, + BottomLeftCeil=4, BottomRightCeil=5, + TopLeftCeil=6, TopRightCeil=7 + /** @} */ + }; + + + /** Default constructor initializing a null box. */ + EIGEN_DEVICE_FUNC inline AlignedBox() + { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); } + + /** Constructs a null box with \a _dim the dimension of the ambient space. */ + EIGEN_DEVICE_FUNC inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim) + { setEmpty(); } + + /** Constructs a box with extremities \a _min and \a _max. + * \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */ + template + EIGEN_DEVICE_FUNC inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {} + + /** Constructs a box containing a single point \a p. */ + template + EIGEN_DEVICE_FUNC inline explicit AlignedBox(const MatrixBase& p) : m_min(p), m_max(m_min) + { } + + EIGEN_DEVICE_FUNC ~AlignedBox() {} + + /** \returns the dimension in which the box holds */ + EIGEN_DEVICE_FUNC inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); } + + /** \deprecated use isEmpty() */ + EIGEN_DEVICE_FUNC inline bool isNull() const { return isEmpty(); } + + /** \deprecated use setEmpty() */ + EIGEN_DEVICE_FUNC inline void setNull() { setEmpty(); } + + /** \returns true if the box is empty. + * \sa setEmpty */ + EIGEN_DEVICE_FUNC inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); } + + /** Makes \c *this an empty box. + * \sa isEmpty */ + EIGEN_DEVICE_FUNC inline void setEmpty() + { + m_min.setConstant( ScalarTraits::highest() ); + m_max.setConstant( ScalarTraits::lowest() ); + } + + /** \returns the minimal corner */ + EIGEN_DEVICE_FUNC inline const VectorType& (min)() const { return m_min; } + /** \returns a non const reference to the minimal corner */ + EIGEN_DEVICE_FUNC inline VectorType& (min)() { return m_min; } + /** \returns the maximal corner */ + EIGEN_DEVICE_FUNC inline const VectorType& (max)() const { return m_max; } + /** \returns a non const reference to the maximal corner */ + EIGEN_DEVICE_FUNC inline VectorType& (max)() { return m_max; } + + /** \returns the center of the box */ + EIGEN_DEVICE_FUNC inline const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(VectorTypeSum, RealScalar, quotient) + center() const + { return (m_min+m_max)/RealScalar(2); } + + /** \returns the lengths of the sides of the bounding box. + * Note that this function does not get the same + * result for integral or floating scalar types: see + */ + EIGEN_DEVICE_FUNC inline const CwiseBinaryOp< internal::scalar_difference_op, const VectorType, const VectorType> sizes() const + { return m_max - m_min; } + + /** \returns the volume of the bounding box */ + EIGEN_DEVICE_FUNC inline Scalar volume() const + { return sizes().prod(); } + + /** \returns an expression for the bounding box diagonal vector + * if the length of the diagonal is needed: diagonal().norm() + * will provide it. + */ + EIGEN_DEVICE_FUNC inline CwiseBinaryOp< internal::scalar_difference_op, const VectorType, const VectorType> diagonal() const + { return sizes(); } + + /** \returns the vertex of the bounding box at the corner defined by + * the corner-id corner. It works only for a 1D, 2D or 3D bounding box. + * For 1D bounding boxes corners are named by 2 enum constants: + * BottomLeft and BottomRight. + * For 2D bounding boxes, corners are named by 4 enum constants: + * BottomLeft, BottomRight, TopLeft, TopRight. + * For 3D bounding boxes, the following names are added: + * BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil. + */ + EIGEN_DEVICE_FUNC inline VectorType corner(CornerType corner) const + { + EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE); + + VectorType res; + + Index mult = 1; + for(Index d=0; d(Scalar(0), Scalar(1)); + } + else + r[d] = internal::random(m_min[d], m_max[d]); + } + return r; + } + + /** \returns true if the point \a p is inside the box \c *this. */ + template + EIGEN_DEVICE_FUNC inline bool contains(const MatrixBase& p) const + { + typename internal::nested_eval::type p_n(p.derived()); + return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all(); + } + + /** \returns true if the box \a b is entirely inside the box \c *this. */ + EIGEN_DEVICE_FUNC inline bool contains(const AlignedBox& b) const + { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); } + + /** \returns true if the box \a b is intersecting the box \c *this. + * \sa intersection, clamp */ + EIGEN_DEVICE_FUNC inline bool intersects(const AlignedBox& b) const + { return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); } + + /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. + * \sa extend(const AlignedBox&) */ + template + EIGEN_DEVICE_FUNC inline AlignedBox& extend(const MatrixBase& p) + { + typename internal::nested_eval::type p_n(p.derived()); + m_min = m_min.cwiseMin(p_n); + m_max = m_max.cwiseMax(p_n); + return *this; + } + + /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. + * \sa merged, extend(const MatrixBase&) */ + EIGEN_DEVICE_FUNC inline AlignedBox& extend(const AlignedBox& b) + { + m_min = m_min.cwiseMin(b.m_min); + m_max = m_max.cwiseMax(b.m_max); + return *this; + } + + /** Clamps \c *this by the box \a b and returns a reference to \c *this. + * \note If the boxes don't intersect, the resulting box is empty. + * \sa intersection(), intersects() */ + EIGEN_DEVICE_FUNC inline AlignedBox& clamp(const AlignedBox& b) + { + m_min = m_min.cwiseMax(b.m_min); + m_max = m_max.cwiseMin(b.m_max); + return *this; + } + + /** Returns an AlignedBox that is the intersection of \a b and \c *this + * \note If the boxes don't intersect, the resulting box is empty. + * \sa intersects(), clamp, contains() */ + EIGEN_DEVICE_FUNC inline AlignedBox intersection(const AlignedBox& b) const + {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); } + + /** Returns an AlignedBox that is the union of \a b and \c *this. + * \note Merging with an empty box may result in a box bigger than \c *this. + * \sa extend(const AlignedBox&) */ + EIGEN_DEVICE_FUNC inline AlignedBox merged(const AlignedBox& b) const + { return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); } + + /** Translate \c *this by the vector \a t and returns a reference to \c *this. */ + template + EIGEN_DEVICE_FUNC inline AlignedBox& translate(const MatrixBase& a_t) + { + const typename internal::nested_eval::type t(a_t.derived()); + m_min += t; + m_max += t; + return *this; + } + + /** \returns the squared distance between the point \a p and the box \c *this, + * and zero if \a p is inside the box. + * \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&) + */ + template + EIGEN_DEVICE_FUNC inline Scalar squaredExteriorDistance(const MatrixBase& p) const; + + /** \returns the squared distance between the boxes \a b and \c *this, + * and zero if the boxes intersect. + * \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&) + */ + EIGEN_DEVICE_FUNC inline Scalar squaredExteriorDistance(const AlignedBox& b) const; + + /** \returns the distance between the point \a p and the box \c *this, + * and zero if \a p is inside the box. + * \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&) + */ + template + EIGEN_DEVICE_FUNC inline NonInteger exteriorDistance(const MatrixBase& p) const + { EIGEN_USING_STD_MATH(sqrt) return sqrt(NonInteger(squaredExteriorDistance(p))); } + + /** \returns the distance between the boxes \a b and \c *this, + * and zero if the boxes intersect. + * \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&) + */ + EIGEN_DEVICE_FUNC inline NonInteger exteriorDistance(const AlignedBox& b) const + { EIGEN_USING_STD_MATH(sqrt) return sqrt(NonInteger(squaredExteriorDistance(b))); } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + EIGEN_DEVICE_FUNC inline typename internal::cast_return_type >::type cast() const + { + return typename internal::cast_return_type >::type(*this); + } + + /** Copy constructor with scalar type conversion */ + template + EIGEN_DEVICE_FUNC inline explicit AlignedBox(const AlignedBox& other) + { + m_min = (other.min)().template cast(); + m_max = (other.max)().template cast(); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + EIGEN_DEVICE_FUNC bool isApprox(const AlignedBox& other, const RealScalar& prec = ScalarTraits::dummy_precision()) const + { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); } + +protected: + + VectorType m_min, m_max; +}; + + + +template +template +EIGEN_DEVICE_FUNC inline Scalar AlignedBox::squaredExteriorDistance(const MatrixBase& a_p) const +{ + typename internal::nested_eval::type p(a_p.derived()); + Scalar dist2(0); + Scalar aux; + for (Index k=0; k p[k] ) + { + aux = m_min[k] - p[k]; + dist2 += aux*aux; + } + else if( p[k] > m_max[k] ) + { + aux = p[k] - m_max[k]; + dist2 += aux*aux; + } + } + return dist2; +} + +template +EIGEN_DEVICE_FUNC inline Scalar AlignedBox::squaredExteriorDistance(const AlignedBox& b) const +{ + Scalar dist2(0); + Scalar aux; + for (Index k=0; k b.m_max[k] ) + { + aux = m_min[k] - b.m_max[k]; + dist2 += aux*aux; + } + else if( b.m_min[k] > m_max[k] ) + { + aux = b.m_min[k] - m_max[k]; + dist2 += aux*aux; + } + } + return dist2; +} + +/** \defgroup alignedboxtypedefs Global aligned box typedefs + * + * \ingroup Geometry_Module + * + * Eigen defines several typedef shortcuts for most common aligned box types. + * + * The general patterns are the following: + * + * \c AlignedBoxSizeType where \c Size can be \c 1, \c 2,\c 3,\c 4 for fixed size boxes or \c X for dynamic size, + * and where \c Type can be \c i for integer, \c f for float, \c d for double. + * + * For example, \c AlignedBox3d is a fixed-size 3x3 aligned box type of doubles, and \c AlignedBoxXf is a dynamic-size aligned box of floats. + * + * \sa class AlignedBox + */ + +#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ +/** \ingroup alignedboxtypedefs */ \ +typedef AlignedBox AlignedBox##SizeSuffix##TypeSuffix; + +#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 1, 1) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) + +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) + +#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES +#undef EIGEN_MAKE_TYPEDEFS + +} // end namespace Eigen + +#endif // EIGEN_ALIGNEDBOX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/AngleAxis.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/AngleAxis.h new file mode 100644 index 000000000..83ee1be46 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/AngleAxis.h @@ -0,0 +1,247 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ANGLEAXIS_H +#define EIGEN_ANGLEAXIS_H + +namespace Eigen { + +/** \geometry_module \ingroup Geometry_Module + * + * \class AngleAxis + * + * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis + * + * \param _Scalar the scalar type, i.e., the type of the coefficients. + * + * \warning When setting up an AngleAxis object, the axis vector \b must \b be \b normalized. + * + * The following two typedefs are provided for convenience: + * \li \c AngleAxisf for \c float + * \li \c AngleAxisd for \c double + * + * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily + * mimic Euler-angles. Here is an example: + * \include AngleAxis_mimic_euler.cpp + * Output: \verbinclude AngleAxis_mimic_euler.out + * + * \note This class is not aimed to be used to store a rotation transformation, + * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix) + * and transformation objects. + * + * \sa class Quaternion, class Transform, MatrixBase::UnitX() + */ + +namespace internal { +template struct traits > +{ + typedef _Scalar Scalar; +}; +} + +template +class AngleAxis : public RotationBase,3> +{ + typedef RotationBase,3> Base; + +public: + + using Base::operator*; + + enum { Dim = 3 }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + typedef Matrix Matrix3; + typedef Matrix Vector3; + typedef Quaternion QuaternionType; + +protected: + + Vector3 m_axis; + Scalar m_angle; + +public: + + /** Default constructor without initialization. */ + EIGEN_DEVICE_FUNC AngleAxis() {} + /** Constructs and initialize the angle-axis rotation from an \a angle in radian + * and an \a axis which \b must \b be \b normalized. + * + * \warning If the \a axis vector is not normalized, then the angle-axis object + * represents an invalid rotation. */ + template + EIGEN_DEVICE_FUNC + inline AngleAxis(const Scalar& angle, const MatrixBase& axis) : m_axis(axis), m_angle(angle) {} + /** Constructs and initialize the angle-axis rotation from a quaternion \a q. + * This function implicitly normalizes the quaternion \a q. + */ + template + EIGEN_DEVICE_FUNC inline explicit AngleAxis(const QuaternionBase& q) { *this = q; } + /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */ + template + EIGEN_DEVICE_FUNC inline explicit AngleAxis(const MatrixBase& m) { *this = m; } + + /** \returns the value of the rotation angle in radian */ + EIGEN_DEVICE_FUNC Scalar angle() const { return m_angle; } + /** \returns a read-write reference to the stored angle in radian */ + EIGEN_DEVICE_FUNC Scalar& angle() { return m_angle; } + + /** \returns the rotation axis */ + EIGEN_DEVICE_FUNC const Vector3& axis() const { return m_axis; } + /** \returns a read-write reference to the stored rotation axis. + * + * \warning The rotation axis must remain a \b unit vector. + */ + EIGEN_DEVICE_FUNC Vector3& axis() { return m_axis; } + + /** Concatenates two rotations */ + EIGEN_DEVICE_FUNC inline QuaternionType operator* (const AngleAxis& other) const + { return QuaternionType(*this) * QuaternionType(other); } + + /** Concatenates two rotations */ + EIGEN_DEVICE_FUNC inline QuaternionType operator* (const QuaternionType& other) const + { return QuaternionType(*this) * other; } + + /** Concatenates two rotations */ + friend EIGEN_DEVICE_FUNC inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b) + { return a * QuaternionType(b); } + + /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */ + EIGEN_DEVICE_FUNC AngleAxis inverse() const + { return AngleAxis(-m_angle, m_axis); } + + template + EIGEN_DEVICE_FUNC AngleAxis& operator=(const QuaternionBase& q); + template + EIGEN_DEVICE_FUNC AngleAxis& operator=(const MatrixBase& m); + + template + EIGEN_DEVICE_FUNC AngleAxis& fromRotationMatrix(const MatrixBase& m); + EIGEN_DEVICE_FUNC Matrix3 toRotationMatrix(void) const; + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + EIGEN_DEVICE_FUNC inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + EIGEN_DEVICE_FUNC inline explicit AngleAxis(const AngleAxis& other) + { + m_axis = other.axis().template cast(); + m_angle = Scalar(other.angle()); + } + + EIGEN_DEVICE_FUNC static inline const AngleAxis Identity() { return AngleAxis(Scalar(0), Vector3::UnitX()); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + EIGEN_DEVICE_FUNC bool isApprox(const AngleAxis& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const + { return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); } +}; + +/** \ingroup Geometry_Module + * single precision angle-axis type */ +typedef AngleAxis AngleAxisf; +/** \ingroup Geometry_Module + * double precision angle-axis type */ +typedef AngleAxis AngleAxisd; + +/** Set \c *this from a \b unit quaternion. + * + * The resulting axis is normalized, and the computed angle is in the [0,pi] range. + * + * This function implicitly normalizes the quaternion \a q. + */ +template +template +EIGEN_DEVICE_FUNC AngleAxis& AngleAxis::operator=(const QuaternionBase& q) +{ + EIGEN_USING_STD_MATH(atan2) + EIGEN_USING_STD_MATH(abs) + Scalar n = q.vec().norm(); + if(n::epsilon()) + n = q.vec().stableNorm(); + + if (n != Scalar(0)) + { + m_angle = Scalar(2)*atan2(n, abs(q.w())); + if(q.w() < Scalar(0)) + n = -n; + m_axis = q.vec() / n; + } + else + { + m_angle = Scalar(0); + m_axis << Scalar(1), Scalar(0), Scalar(0); + } + return *this; +} + +/** Set \c *this from a 3x3 rotation matrix \a mat. + */ +template +template +EIGEN_DEVICE_FUNC AngleAxis& AngleAxis::operator=(const MatrixBase& mat) +{ + // Since a direct conversion would not be really faster, + // let's use the robust Quaternion implementation: + return *this = QuaternionType(mat); +} + +/** +* \brief Sets \c *this from a 3x3 rotation matrix. +**/ +template +template +EIGEN_DEVICE_FUNC AngleAxis& AngleAxis::fromRotationMatrix(const MatrixBase& mat) +{ + return *this = QuaternionType(mat); +} + +/** Constructs and \returns an equivalent 3x3 rotation matrix. + */ +template +typename AngleAxis::Matrix3 +EIGEN_DEVICE_FUNC AngleAxis::toRotationMatrix(void) const +{ + EIGEN_USING_STD_MATH(sin) + EIGEN_USING_STD_MATH(cos) + Matrix3 res; + Vector3 sin_axis = sin(m_angle) * m_axis; + Scalar c = cos(m_angle); + Vector3 cos1_axis = (Scalar(1)-c) * m_axis; + + Scalar tmp; + tmp = cos1_axis.x() * m_axis.y(); + res.coeffRef(0,1) = tmp - sin_axis.z(); + res.coeffRef(1,0) = tmp + sin_axis.z(); + + tmp = cos1_axis.x() * m_axis.z(); + res.coeffRef(0,2) = tmp + sin_axis.y(); + res.coeffRef(2,0) = tmp - sin_axis.y(); + + tmp = cos1_axis.y() * m_axis.z(); + res.coeffRef(1,2) = tmp - sin_axis.x(); + res.coeffRef(2,1) = tmp + sin_axis.x(); + + res.diagonal() = (cos1_axis.cwiseProduct(m_axis)).array() + c; + + return res; +} + +} // end namespace Eigen + +#endif // EIGEN_ANGLEAXIS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/EulerAngles.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/EulerAngles.h new file mode 100644 index 000000000..c633268af --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/EulerAngles.h @@ -0,0 +1,114 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EULERANGLES_H +#define EIGEN_EULERANGLES_H + +namespace Eigen { + +/** \geometry_module \ingroup Geometry_Module + * + * + * \returns the Euler-angles of the rotation matrix \c *this using the convention defined by the triplet (\a a0,\a a1,\a a2) + * + * Each of the three parameters \a a0,\a a1,\a a2 represents the respective rotation axis as an integer in {0,1,2}. + * For instance, in: + * \code Vector3f ea = mat.eulerAngles(2, 0, 2); \endcode + * "2" represents the z axis and "0" the x axis, etc. The returned angles are such that + * we have the following equality: + * \code + * mat == AngleAxisf(ea[0], Vector3f::UnitZ()) + * * AngleAxisf(ea[1], Vector3f::UnitX()) + * * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode + * This corresponds to the right-multiply conventions (with right hand side frames). + * + * The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi]. + * + * \sa class AngleAxis + */ +template +EIGEN_DEVICE_FUNC inline Matrix::Scalar,3,1> +MatrixBase::eulerAngles(Index a0, Index a1, Index a2) const +{ + EIGEN_USING_STD_MATH(atan2) + EIGEN_USING_STD_MATH(sin) + EIGEN_USING_STD_MATH(cos) + /* Implemented from Graphics Gems IV */ + EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3) + + Matrix res; + typedef Matrix Vector2; + + const Index odd = ((a0+1)%3 == a1) ? 0 : 1; + const Index i = a0; + const Index j = (a0 + 1 + odd)%3; + const Index k = (a0 + 2 - odd)%3; + + if (a0==a2) + { + res[0] = atan2(coeff(j,i), coeff(k,i)); + if((odd && res[0]Scalar(0))) + { + if(res[0] > Scalar(0)) { + res[0] -= Scalar(EIGEN_PI); + } + else { + res[0] += Scalar(EIGEN_PI); + } + Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm(); + res[1] = -atan2(s2, coeff(i,i)); + } + else + { + Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm(); + res[1] = atan2(s2, coeff(i,i)); + } + + // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles, + // we can compute their respective rotation, and apply its inverse to M. Since the result must + // be a rotation around x, we have: + // + // c2 s1.s2 c1.s2 1 0 0 + // 0 c1 -s1 * M = 0 c3 s3 + // -s2 s1.c2 c1.c2 0 -s3 c3 + // + // Thus: m11.c1 - m21.s1 = c3 & m12.c1 - m22.s1 = s3 + + Scalar s1 = sin(res[0]); + Scalar c1 = cos(res[0]); + res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j)); + } + else + { + res[0] = atan2(coeff(j,k), coeff(k,k)); + Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm(); + if((odd && res[0]Scalar(0))) { + if(res[0] > Scalar(0)) { + res[0] -= Scalar(EIGEN_PI); + } + else { + res[0] += Scalar(EIGEN_PI); + } + res[1] = atan2(-coeff(i,k), -c2); + } + else + res[1] = atan2(-coeff(i,k), c2); + Scalar s1 = sin(res[0]); + Scalar c1 = cos(res[0]); + res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j)); + } + if (!odd) + res = -res; + + return res; +} + +} // end namespace Eigen + +#endif // EIGEN_EULERANGLES_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Homogeneous.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Homogeneous.h new file mode 100644 index 000000000..5f0da1a9e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Homogeneous.h @@ -0,0 +1,497 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_HOMOGENEOUS_H +#define EIGEN_HOMOGENEOUS_H + +namespace Eigen { + +/** \geometry_module \ingroup Geometry_Module + * + * \class Homogeneous + * + * \brief Expression of one (or a set of) homogeneous vector(s) + * + * \param MatrixType the type of the object in which we are making homogeneous + * + * This class represents an expression of one (or a set of) homogeneous vector(s). + * It is the return type of MatrixBase::homogeneous() and most of the time + * this is the only way it is used. + * + * \sa MatrixBase::homogeneous() + */ + +namespace internal { + +template +struct traits > + : traits +{ + typedef typename traits::StorageKind StorageKind; + typedef typename ref_selector::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; + enum { + RowsPlusOne = (MatrixType::RowsAtCompileTime != Dynamic) ? + int(MatrixType::RowsAtCompileTime) + 1 : Dynamic, + ColsPlusOne = (MatrixType::ColsAtCompileTime != Dynamic) ? + int(MatrixType::ColsAtCompileTime) + 1 : Dynamic, + RowsAtCompileTime = Direction==Vertical ? RowsPlusOne : MatrixType::RowsAtCompileTime, + ColsAtCompileTime = Direction==Horizontal ? ColsPlusOne : MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + TmpFlags = _MatrixTypeNested::Flags & HereditaryBits, + Flags = ColsAtCompileTime==1 ? (TmpFlags & ~RowMajorBit) + : RowsAtCompileTime==1 ? (TmpFlags | RowMajorBit) + : TmpFlags + }; +}; + +template struct homogeneous_left_product_impl; +template struct homogeneous_right_product_impl; + +} // end namespace internal + +template class Homogeneous + : public MatrixBase >, internal::no_assignment_operator +{ + public: + + typedef MatrixType NestedExpression; + enum { Direction = _Direction }; + + typedef MatrixBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Homogeneous) + + EIGEN_DEVICE_FUNC explicit inline Homogeneous(const MatrixType& matrix) + : m_matrix(matrix) + {} + + EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); } + EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); } + + EIGEN_DEVICE_FUNC const NestedExpression& nestedExpression() const { return m_matrix; } + + template + EIGEN_DEVICE_FUNC inline const Product + operator* (const MatrixBase& rhs) const + { + eigen_assert(int(Direction)==Horizontal); + return Product(*this,rhs.derived()); + } + + template friend + EIGEN_DEVICE_FUNC inline const Product + operator* (const MatrixBase& lhs, const Homogeneous& rhs) + { + eigen_assert(int(Direction)==Vertical); + return Product(lhs.derived(),rhs); + } + + template friend + EIGEN_DEVICE_FUNC inline const Product, Homogeneous > + operator* (const Transform& lhs, const Homogeneous& rhs) + { + eigen_assert(int(Direction)==Vertical); + return Product, Homogeneous>(lhs,rhs); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::result_of::type + redux(const Func& func) const + { + return func(m_matrix.redux(func), Scalar(1)); + } + + protected: + typename MatrixType::Nested m_matrix; +}; + +/** \geometry_module \ingroup Geometry_Module + * + * \returns a vector expression that is one longer than the vector argument, with the value 1 symbolically appended as the last coefficient. + * + * This can be used to convert affine coordinates to homogeneous coordinates. + * + * \only_for_vectors + * + * Example: \include MatrixBase_homogeneous.cpp + * Output: \verbinclude MatrixBase_homogeneous.out + * + * \sa VectorwiseOp::homogeneous(), class Homogeneous + */ +template +EIGEN_DEVICE_FUNC inline typename MatrixBase::HomogeneousReturnType +MatrixBase::homogeneous() const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + return HomogeneousReturnType(derived()); +} + +/** \geometry_module \ingroup Geometry_Module + * + * \returns an expression where the value 1 is symbolically appended as the final coefficient to each column (or row) of the matrix. + * + * This can be used to convert affine coordinates to homogeneous coordinates. + * + * Example: \include VectorwiseOp_homogeneous.cpp + * Output: \verbinclude VectorwiseOp_homogeneous.out + * + * \sa MatrixBase::homogeneous(), class Homogeneous */ +template +EIGEN_DEVICE_FUNC inline Homogeneous +VectorwiseOp::homogeneous() const +{ + return HomogeneousReturnType(_expression()); +} + +/** \geometry_module \ingroup Geometry_Module + * + * \brief homogeneous normalization + * + * \returns a vector expression of the N-1 first coefficients of \c *this divided by that last coefficient. + * + * This can be used to convert homogeneous coordinates to affine coordinates. + * + * It is essentially a shortcut for: + * \code + this->head(this->size()-1)/this->coeff(this->size()-1); + \endcode + * + * Example: \include MatrixBase_hnormalized.cpp + * Output: \verbinclude MatrixBase_hnormalized.out + * + * \sa VectorwiseOp::hnormalized() */ +template +EIGEN_DEVICE_FUNC inline const typename MatrixBase::HNormalizedReturnType +MatrixBase::hnormalized() const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + return ConstStartMinusOne(derived(),0,0, + ColsAtCompileTime==1?size()-1:1, + ColsAtCompileTime==1?1:size()-1) / coeff(size()-1); +} + +/** \geometry_module \ingroup Geometry_Module + * + * \brief column or row-wise homogeneous normalization + * + * \returns an expression of the first N-1 coefficients of each column (or row) of \c *this divided by the last coefficient of each column (or row). + * + * This can be used to convert homogeneous coordinates to affine coordinates. + * + * It is conceptually equivalent to calling MatrixBase::hnormalized() to each column (or row) of \c *this. + * + * Example: \include DirectionWise_hnormalized.cpp + * Output: \verbinclude DirectionWise_hnormalized.out + * + * \sa MatrixBase::hnormalized() */ +template +EIGEN_DEVICE_FUNC inline const typename VectorwiseOp::HNormalizedReturnType +VectorwiseOp::hnormalized() const +{ + return HNormalized_Block(_expression(),0,0, + Direction==Vertical ? _expression().rows()-1 : _expression().rows(), + Direction==Horizontal ? _expression().cols()-1 : _expression().cols()).cwiseQuotient( + Replicate + (HNormalized_Factors(_expression(), + Direction==Vertical ? _expression().rows()-1:0, + Direction==Horizontal ? _expression().cols()-1:0, + Direction==Vertical ? 1 : _expression().rows(), + Direction==Horizontal ? 1 : _expression().cols()), + Direction==Vertical ? _expression().rows()-1 : 1, + Direction==Horizontal ? _expression().cols()-1 : 1)); +} + +namespace internal { + +template +struct take_matrix_for_product +{ + typedef MatrixOrTransformType type; + EIGEN_DEVICE_FUNC static const type& run(const type &x) { return x; } +}; + +template +struct take_matrix_for_product > +{ + typedef Transform TransformType; + typedef typename internal::add_const::type type; + EIGEN_DEVICE_FUNC static type run (const TransformType& x) { return x.affine(); } +}; + +template +struct take_matrix_for_product > +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType type; + EIGEN_DEVICE_FUNC static const type& run (const TransformType& x) { return x.matrix(); } +}; + +template +struct traits,Lhs> > +{ + typedef typename take_matrix_for_product::type LhsMatrixType; + typedef typename remove_all::type MatrixTypeCleaned; + typedef typename remove_all::type LhsMatrixTypeCleaned; + typedef typename make_proper_matrix_type< + typename traits::Scalar, + LhsMatrixTypeCleaned::RowsAtCompileTime, + MatrixTypeCleaned::ColsAtCompileTime, + MatrixTypeCleaned::PlainObject::Options, + LhsMatrixTypeCleaned::MaxRowsAtCompileTime, + MatrixTypeCleaned::MaxColsAtCompileTime>::type ReturnType; +}; + +template +struct homogeneous_left_product_impl,Lhs> + : public ReturnByValue,Lhs> > +{ + typedef typename traits::LhsMatrixType LhsMatrixType; + typedef typename remove_all::type LhsMatrixTypeCleaned; + typedef typename remove_all::type LhsMatrixTypeNested; + EIGEN_DEVICE_FUNC homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs) + : m_lhs(take_matrix_for_product::run(lhs)), + m_rhs(rhs) + {} + + EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); } + EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); } + + template EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const + { + // FIXME investigate how to allow lazy evaluation of this product when possible + dst = Block + (m_lhs,0,0,m_lhs.rows(),m_lhs.cols()-1) * m_rhs; + dst += m_lhs.col(m_lhs.cols()-1).rowwise() + .template replicate(m_rhs.cols()); + } + + typename LhsMatrixTypeCleaned::Nested m_lhs; + typename MatrixType::Nested m_rhs; +}; + +template +struct traits,Rhs> > +{ + typedef typename make_proper_matrix_type::Scalar, + MatrixType::RowsAtCompileTime, + Rhs::ColsAtCompileTime, + MatrixType::PlainObject::Options, + MatrixType::MaxRowsAtCompileTime, + Rhs::MaxColsAtCompileTime>::type ReturnType; +}; + +template +struct homogeneous_right_product_impl,Rhs> + : public ReturnByValue,Rhs> > +{ + typedef typename remove_all::type RhsNested; + EIGEN_DEVICE_FUNC homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs) + : m_lhs(lhs), m_rhs(rhs) + {} + + EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); } + EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); } + + template EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const + { + // FIXME investigate how to allow lazy evaluation of this product when possible + dst = m_lhs * Block + (m_rhs,0,0,m_rhs.rows()-1,m_rhs.cols()); + dst += m_rhs.row(m_rhs.rows()-1).colwise() + .template replicate(m_lhs.rows()); + } + + typename MatrixType::Nested m_lhs; + typename Rhs::Nested m_rhs; +}; + +template +struct evaluator_traits > +{ + typedef typename storage_kind_to_evaluator_kind::Kind Kind; + typedef HomogeneousShape Shape; +}; + +template<> struct AssignmentKind { typedef Dense2Dense Kind; }; + + +template +struct unary_evaluator, IndexBased> + : evaluator::PlainObject > +{ + typedef Homogeneous XprType; + typedef typename XprType::PlainObject PlainObject; + typedef evaluator Base; + + EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) + : Base(), m_temp(op) + { + ::new (static_cast(this)) Base(m_temp); + } + +protected: + PlainObject m_temp; +}; + +// dense = homogeneous +template< typename DstXprType, typename ArgType, typename Scalar> +struct Assignment, internal::assign_op, Dense2Dense> +{ + typedef Homogeneous SrcXprType; + EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + dst.template topRows(src.nestedExpression().rows()) = src.nestedExpression(); + dst.row(dst.rows()-1).setOnes(); + } +}; + +// dense = homogeneous +template< typename DstXprType, typename ArgType, typename Scalar> +struct Assignment, internal::assign_op, Dense2Dense> +{ + typedef Homogeneous SrcXprType; + EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + dst.template leftCols(src.nestedExpression().cols()) = src.nestedExpression(); + dst.col(dst.cols()-1).setOnes(); + } +}; + +template +struct generic_product_impl, Rhs, HomogeneousShape, DenseShape, ProductTag> +{ + template + EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const Homogeneous& lhs, const Rhs& rhs) + { + homogeneous_right_product_impl, Rhs>(lhs.nestedExpression(), rhs).evalTo(dst); + } +}; + +template +struct homogeneous_right_product_refactoring_helper +{ + enum { + Dim = Lhs::ColsAtCompileTime, + Rows = Lhs::RowsAtCompileTime + }; + typedef typename Rhs::template ConstNRowsBlockXpr::Type LinearBlockConst; + typedef typename remove_const::type LinearBlock; + typedef typename Rhs::ConstRowXpr ConstantColumn; + typedef Replicate ConstantBlock; + typedef Product LinearProduct; + typedef CwiseBinaryOp, const LinearProduct, const ConstantBlock> Xpr; +}; + +template +struct product_evaluator, ProductTag, HomogeneousShape, DenseShape> + : public evaluator::Xpr> +{ + typedef Product XprType; + typedef homogeneous_right_product_refactoring_helper helper; + typedef typename helper::ConstantBlock ConstantBlock; + typedef typename helper::Xpr RefactoredXpr; + typedef evaluator Base; + + EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) + : Base( xpr.lhs().nestedExpression() .lazyProduct( xpr.rhs().template topRows(xpr.lhs().nestedExpression().cols()) ) + + ConstantBlock(xpr.rhs().row(xpr.rhs().rows()-1),xpr.lhs().rows(), 1) ) + {} +}; + +template +struct generic_product_impl, DenseShape, HomogeneousShape, ProductTag> +{ + template + EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const Lhs& lhs, const Homogeneous& rhs) + { + homogeneous_left_product_impl, Lhs>(lhs, rhs.nestedExpression()).evalTo(dst); + } +}; + +// TODO: the following specialization is to address a regression from 3.2 to 3.3 +// In the future, this path should be optimized. +template +struct generic_product_impl, TriangularShape, HomogeneousShape, ProductTag> +{ + template + static void evalTo(Dest& dst, const Lhs& lhs, const Homogeneous& rhs) + { + dst.noalias() = lhs * rhs.eval(); + } +}; + +template +struct homogeneous_left_product_refactoring_helper +{ + enum { + Dim = Rhs::RowsAtCompileTime, + Cols = Rhs::ColsAtCompileTime + }; + typedef typename Lhs::template ConstNColsBlockXpr::Type LinearBlockConst; + typedef typename remove_const::type LinearBlock; + typedef typename Lhs::ConstColXpr ConstantColumn; + typedef Replicate ConstantBlock; + typedef Product LinearProduct; + typedef CwiseBinaryOp, const LinearProduct, const ConstantBlock> Xpr; +}; + +template +struct product_evaluator, ProductTag, DenseShape, HomogeneousShape> + : public evaluator::Xpr> +{ + typedef Product XprType; + typedef homogeneous_left_product_refactoring_helper helper; + typedef typename helper::ConstantBlock ConstantBlock; + typedef typename helper::Xpr RefactoredXpr; + typedef evaluator Base; + + EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) + : Base( xpr.lhs().template leftCols(xpr.rhs().nestedExpression().rows()) .lazyProduct( xpr.rhs().nestedExpression() ) + + ConstantBlock(xpr.lhs().col(xpr.lhs().cols()-1),1,xpr.rhs().cols()) ) + {} +}; + +template +struct generic_product_impl, Homogeneous, DenseShape, HomogeneousShape, ProductTag> +{ + typedef Transform TransformType; + template + EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const TransformType& lhs, const Homogeneous& rhs) + { + homogeneous_left_product_impl, TransformType>(lhs, rhs.nestedExpression()).evalTo(dst); + } +}; + +template +struct permutation_matrix_product + : public permutation_matrix_product +{}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_HOMOGENEOUS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Hyperplane.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Hyperplane.h new file mode 100644 index 000000000..05929b299 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Hyperplane.h @@ -0,0 +1,282 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_HYPERPLANE_H +#define EIGEN_HYPERPLANE_H + +namespace Eigen { + +/** \geometry_module \ingroup Geometry_Module + * + * \class Hyperplane + * + * \brief A hyperplane + * + * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n. + * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane. + * + * \tparam _Scalar the scalar type, i.e., the type of the coefficients + * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. + * Notice that the dimension of the hyperplane is _AmbientDim-1. + * + * This class represents an hyperplane as the zero set of the implicit equation + * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part) + * and \f$ d \f$ is the distance (offset) to the origin. + */ +template +class Hyperplane +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1) + enum { + AmbientDimAtCompileTime = _AmbientDim, + Options = _Options + }; + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + typedef Matrix VectorType; + typedef Matrix Coefficients; + typedef Block NormalReturnType; + typedef const Block ConstNormalReturnType; + + /** Default constructor without initialization */ + EIGEN_DEVICE_FUNC inline Hyperplane() {} + + template + EIGEN_DEVICE_FUNC Hyperplane(const Hyperplane& other) + : m_coeffs(other.coeffs()) + {} + + /** Constructs a dynamic-size hyperplane with \a _dim the dimension + * of the ambient space */ + EIGEN_DEVICE_FUNC inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {} + + /** Construct a plane from its normal \a n and a point \a e onto the plane. + * \warning the vector normal is assumed to be normalized. + */ + EIGEN_DEVICE_FUNC inline Hyperplane(const VectorType& n, const VectorType& e) + : m_coeffs(n.size()+1) + { + normal() = n; + offset() = -n.dot(e); + } + + /** Constructs a plane from its normal \a n and distance to the origin \a d + * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$. + * \warning the vector normal is assumed to be normalized. + */ + EIGEN_DEVICE_FUNC inline Hyperplane(const VectorType& n, const Scalar& d) + : m_coeffs(n.size()+1) + { + normal() = n; + offset() = d; + } + + /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space + * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made. + */ + EIGEN_DEVICE_FUNC static inline Hyperplane Through(const VectorType& p0, const VectorType& p1) + { + Hyperplane result(p0.size()); + result.normal() = (p1 - p0).unitOrthogonal(); + result.offset() = -p0.dot(result.normal()); + return result; + } + + /** Constructs a hyperplane passing through the three points. The dimension of the ambient space + * is required to be exactly 3. + */ + EIGEN_DEVICE_FUNC static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2) + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3) + Hyperplane result(p0.size()); + VectorType v0(p2 - p0), v1(p1 - p0); + result.normal() = v0.cross(v1); + RealScalar norm = result.normal().norm(); + if(norm <= v0.norm() * v1.norm() * NumTraits::epsilon()) + { + Matrix m; m << v0.transpose(), v1.transpose(); + JacobiSVD > svd(m, ComputeFullV); + result.normal() = svd.matrixV().col(2); + } + else + result.normal() /= norm; + result.offset() = -p0.dot(result.normal()); + return result; + } + + /** Constructs a hyperplane passing through the parametrized line \a parametrized. + * If the dimension of the ambient space is greater than 2, then there isn't uniqueness, + * so an arbitrary choice is made. + */ + // FIXME to be consitent with the rest this could be implemented as a static Through function ?? + EIGEN_DEVICE_FUNC explicit Hyperplane(const ParametrizedLine& parametrized) + { + normal() = parametrized.direction().unitOrthogonal(); + offset() = -parametrized.origin().dot(normal()); + } + + EIGEN_DEVICE_FUNC ~Hyperplane() {} + + /** \returns the dimension in which the plane holds */ + EIGEN_DEVICE_FUNC inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); } + + /** normalizes \c *this */ + EIGEN_DEVICE_FUNC void normalize(void) + { + m_coeffs /= normal().norm(); + } + + /** \returns the signed distance between the plane \c *this and a point \a p. + * \sa absDistance() + */ + EIGEN_DEVICE_FUNC inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); } + + /** \returns the absolute distance between the plane \c *this and a point \a p. + * \sa signedDistance() + */ + EIGEN_DEVICE_FUNC inline Scalar absDistance(const VectorType& p) const { return numext::abs(signedDistance(p)); } + + /** \returns the projection of a point \a p onto the plane \c *this. + */ + EIGEN_DEVICE_FUNC inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); } + + /** \returns a constant reference to the unit normal vector of the plane, which corresponds + * to the linear part of the implicit equation. + */ + EIGEN_DEVICE_FUNC inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); } + + /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds + * to the linear part of the implicit equation. + */ + EIGEN_DEVICE_FUNC inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); } + + /** \returns the distance to the origin, which is also the "constant term" of the implicit equation + * \warning the vector normal is assumed to be normalized. + */ + EIGEN_DEVICE_FUNC inline const Scalar& offset() const { return m_coeffs.coeff(dim()); } + + /** \returns a non-constant reference to the distance to the origin, which is also the constant part + * of the implicit equation */ + EIGEN_DEVICE_FUNC inline Scalar& offset() { return m_coeffs(dim()); } + + /** \returns a constant reference to the coefficients c_i of the plane equation: + * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ + */ + EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; } + + /** \returns a non-constant reference to the coefficients c_i of the plane equation: + * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ + */ + EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs; } + + /** \returns the intersection of *this with \a other. + * + * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines. + * + * \note If \a other is approximately parallel to *this, this method will return any point on *this. + */ + EIGEN_DEVICE_FUNC VectorType intersection(const Hyperplane& other) const + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) + Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0); + // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests + // whether the two lines are approximately parallel. + if(internal::isMuchSmallerThan(det, Scalar(1))) + { // special case where the two lines are approximately parallel. Pick any point on the first line. + if(numext::abs(coeffs().coeff(1))>numext::abs(coeffs().coeff(0))) + return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0)); + else + return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0)); + } + else + { // general case + Scalar invdet = Scalar(1) / det; + return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)), + invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2))); + } + } + + /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this. + * + * \param mat the Dim x Dim transformation matrix + * \param traits specifies whether the matrix \a mat represents an #Isometry + * or a more generic #Affine transformation. The default is #Affine. + */ + template + EIGEN_DEVICE_FUNC inline Hyperplane& transform(const MatrixBase& mat, TransformTraits traits = Affine) + { + if (traits==Affine) + { + normal() = mat.inverse().transpose() * normal(); + m_coeffs /= normal().norm(); + } + else if (traits==Isometry) + normal() = mat * normal(); + else + { + eigen_assert(0 && "invalid traits value in Hyperplane::transform()"); + } + return *this; + } + + /** Applies the transformation \a t to \c *this and returns a reference to \c *this. + * + * \param t the transformation of dimension Dim + * \param traits specifies whether the transformation \a t represents an #Isometry + * or a more generic #Affine transformation. The default is #Affine. + * Other kind of transformations are not supported. + */ + template + EIGEN_DEVICE_FUNC inline Hyperplane& transform(const Transform& t, + TransformTraits traits = Affine) + { + transform(t.linear(), traits); + offset() -= normal().dot(t.translation()); + return *this; + } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + EIGEN_DEVICE_FUNC inline typename internal::cast_return_type >::type cast() const + { + return typename internal::cast_return_type >::type(*this); + } + + /** Copy constructor with scalar type conversion */ + template + EIGEN_DEVICE_FUNC inline explicit Hyperplane(const Hyperplane& other) + { m_coeffs = other.coeffs().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + template + EIGEN_DEVICE_FUNC bool isApprox(const Hyperplane& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const + { return m_coeffs.isApprox(other.m_coeffs, prec); } + +protected: + + Coefficients m_coeffs; +}; + +} // end namespace Eigen + +#endif // EIGEN_HYPERPLANE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/OrthoMethods.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/OrthoMethods.h new file mode 100644 index 000000000..a035e6310 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/OrthoMethods.h @@ -0,0 +1,234 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ORTHOMETHODS_H +#define EIGEN_ORTHOMETHODS_H + +namespace Eigen { + +/** \geometry_module \ingroup Geometry_Module + * + * \returns the cross product of \c *this and \a other + * + * Here is a very good explanation of cross-product: http://xkcd.com/199/ + * + * With complex numbers, the cross product is implemented as + * \f$ (\mathbf{a}+i\mathbf{b}) \times (\mathbf{c}+i\mathbf{d}) = (\mathbf{a} \times \mathbf{c} - \mathbf{b} \times \mathbf{d}) - i(\mathbf{a} \times \mathbf{d} - \mathbf{b} \times \mathbf{c})\f$ + * + * \sa MatrixBase::cross3() + */ +template +template +#ifndef EIGEN_PARSED_BY_DOXYGEN +EIGEN_DEVICE_FUNC inline typename MatrixBase::template cross_product_return_type::type +#else +inline typename MatrixBase::PlainObject +#endif +MatrixBase::cross(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3) + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3) + + // Note that there is no need for an expression here since the compiler + // optimize such a small temporary very well (even within a complex expression) + typename internal::nested_eval::type lhs(derived()); + typename internal::nested_eval::type rhs(other.derived()); + return typename cross_product_return_type::type( + numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), + numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), + numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)) + ); +} + +namespace internal { + +template< int Arch,typename VectorLhs,typename VectorRhs, + typename Scalar = typename VectorLhs::Scalar, + bool Vectorizable = bool((VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit)> +struct cross3_impl { + EIGEN_DEVICE_FUNC static inline typename internal::plain_matrix_type::type + run(const VectorLhs& lhs, const VectorRhs& rhs) + { + return typename internal::plain_matrix_type::type( + numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), + numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), + numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)), + 0 + ); + } +}; + +} + +/** \geometry_module \ingroup Geometry_Module + * + * \returns the cross product of \c *this and \a other using only the x, y, and z coefficients + * + * The size of \c *this and \a other must be four. This function is especially useful + * when using 4D vectors instead of 3D ones to get advantage of SSE/AltiVec vectorization. + * + * \sa MatrixBase::cross() + */ +template +template +EIGEN_DEVICE_FUNC inline typename MatrixBase::PlainObject +MatrixBase::cross3(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4) + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,4) + + typedef typename internal::nested_eval::type DerivedNested; + typedef typename internal::nested_eval::type OtherDerivedNested; + DerivedNested lhs(derived()); + OtherDerivedNested rhs(other.derived()); + + return internal::cross3_impl::type, + typename internal::remove_all::type>::run(lhs,rhs); +} + +/** \geometry_module \ingroup Geometry_Module + * + * \returns a matrix expression of the cross product of each column or row + * of the referenced expression with the \a other vector. + * + * The referenced matrix must have one dimension equal to 3. + * The result matrix has the same dimensions than the referenced one. + * + * \sa MatrixBase::cross() */ +template +template +EIGEN_DEVICE_FUNC +const typename VectorwiseOp::CrossReturnType +VectorwiseOp::cross(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3) + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + typename internal::nested_eval::type mat(_expression()); + typename internal::nested_eval::type vec(other.derived()); + + CrossReturnType res(_expression().rows(),_expression().cols()); + if(Direction==Vertical) + { + eigen_assert(CrossReturnType::RowsAtCompileTime==3 && "the matrix must have exactly 3 rows"); + res.row(0) = (mat.row(1) * vec.coeff(2) - mat.row(2) * vec.coeff(1)).conjugate(); + res.row(1) = (mat.row(2) * vec.coeff(0) - mat.row(0) * vec.coeff(2)).conjugate(); + res.row(2) = (mat.row(0) * vec.coeff(1) - mat.row(1) * vec.coeff(0)).conjugate(); + } + else + { + eigen_assert(CrossReturnType::ColsAtCompileTime==3 && "the matrix must have exactly 3 columns"); + res.col(0) = (mat.col(1) * vec.coeff(2) - mat.col(2) * vec.coeff(1)).conjugate(); + res.col(1) = (mat.col(2) * vec.coeff(0) - mat.col(0) * vec.coeff(2)).conjugate(); + res.col(2) = (mat.col(0) * vec.coeff(1) - mat.col(1) * vec.coeff(0)).conjugate(); + } + return res; +} + +namespace internal { + +template +struct unitOrthogonal_selector +{ + typedef typename plain_matrix_type::type VectorType; + typedef typename traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix Vector2; + EIGEN_DEVICE_FUNC + static inline VectorType run(const Derived& src) + { + VectorType perp = VectorType::Zero(src.size()); + Index maxi = 0; + Index sndi = 0; + src.cwiseAbs().maxCoeff(&maxi); + if (maxi==0) + sndi = 1; + RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm(); + perp.coeffRef(maxi) = -numext::conj(src.coeff(sndi)) * invnm; + perp.coeffRef(sndi) = numext::conj(src.coeff(maxi)) * invnm; + + return perp; + } +}; + +template +struct unitOrthogonal_selector +{ + typedef typename plain_matrix_type::type VectorType; + typedef typename traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC + static inline VectorType run(const Derived& src) + { + VectorType perp; + /* Let us compute the crossed product of *this with a vector + * that is not too close to being colinear to *this. + */ + + /* unless the x and y coords are both close to zero, we can + * simply take ( -y, x, 0 ) and normalize it. + */ + if((!isMuchSmallerThan(src.x(), src.z())) + || (!isMuchSmallerThan(src.y(), src.z()))) + { + RealScalar invnm = RealScalar(1)/src.template head<2>().norm(); + perp.coeffRef(0) = -numext::conj(src.y())*invnm; + perp.coeffRef(1) = numext::conj(src.x())*invnm; + perp.coeffRef(2) = 0; + } + /* if both x and y are close to zero, then the vector is close + * to the z-axis, so it's far from colinear to the x-axis for instance. + * So we take the crossed product with (1,0,0) and normalize it. + */ + else + { + RealScalar invnm = RealScalar(1)/src.template tail<2>().norm(); + perp.coeffRef(0) = 0; + perp.coeffRef(1) = -numext::conj(src.z())*invnm; + perp.coeffRef(2) = numext::conj(src.y())*invnm; + } + + return perp; + } +}; + +template +struct unitOrthogonal_selector +{ + typedef typename plain_matrix_type::type VectorType; + EIGEN_DEVICE_FUNC + static inline VectorType run(const Derived& src) + { return VectorType(-numext::conj(src.y()), numext::conj(src.x())).normalized(); } +}; + +} // end namespace internal + +/** \geometry_module \ingroup Geometry_Module + * + * \returns a unit vector which is orthogonal to \c *this + * + * The size of \c *this must be at least 2. If the size is exactly 2, + * then the returned vector is a counter clock wise rotation of \c *this, i.e., (-y,x).normalized(). + * + * \sa cross() + */ +template +EIGEN_DEVICE_FUNC typename MatrixBase::PlainObject +MatrixBase::unitOrthogonal() const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return internal::unitOrthogonal_selector::run(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_ORTHOMETHODS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/ParametrizedLine.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/ParametrizedLine.h new file mode 100644 index 000000000..1e985d8cd --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/ParametrizedLine.h @@ -0,0 +1,195 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PARAMETRIZEDLINE_H +#define EIGEN_PARAMETRIZEDLINE_H + +namespace Eigen { + +/** \geometry_module \ingroup Geometry_Module + * + * \class ParametrizedLine + * + * \brief A parametrized line + * + * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit + * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to + * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$. + * + * \tparam _Scalar the scalar type, i.e., the type of the coefficients + * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. + */ +template +class ParametrizedLine +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) + enum { + AmbientDimAtCompileTime = _AmbientDim, + Options = _Options + }; + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + typedef Matrix VectorType; + + /** Default constructor without initialization */ + EIGEN_DEVICE_FUNC inline ParametrizedLine() {} + + template + EIGEN_DEVICE_FUNC ParametrizedLine(const ParametrizedLine& other) + : m_origin(other.origin()), m_direction(other.direction()) + {} + + /** Constructs a dynamic-size line with \a _dim the dimension + * of the ambient space */ + EIGEN_DEVICE_FUNC inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {} + + /** Initializes a parametrized line of direction \a direction and origin \a origin. + * \warning the vector direction is assumed to be normalized. + */ + EIGEN_DEVICE_FUNC ParametrizedLine(const VectorType& origin, const VectorType& direction) + : m_origin(origin), m_direction(direction) {} + + template + EIGEN_DEVICE_FUNC explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane); + + /** Constructs a parametrized line going from \a p0 to \a p1. */ + EIGEN_DEVICE_FUNC static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1) + { return ParametrizedLine(p0, (p1-p0).normalized()); } + + EIGEN_DEVICE_FUNC ~ParametrizedLine() {} + + /** \returns the dimension in which the line holds */ + EIGEN_DEVICE_FUNC inline Index dim() const { return m_direction.size(); } + + EIGEN_DEVICE_FUNC const VectorType& origin() const { return m_origin; } + EIGEN_DEVICE_FUNC VectorType& origin() { return m_origin; } + + EIGEN_DEVICE_FUNC const VectorType& direction() const { return m_direction; } + EIGEN_DEVICE_FUNC VectorType& direction() { return m_direction; } + + /** \returns the squared distance of a point \a p to its projection onto the line \c *this. + * \sa distance() + */ + EIGEN_DEVICE_FUNC RealScalar squaredDistance(const VectorType& p) const + { + VectorType diff = p - origin(); + return (diff - direction().dot(diff) * direction()).squaredNorm(); + } + /** \returns the distance of a point \a p to its projection onto the line \c *this. + * \sa squaredDistance() + */ + EIGEN_DEVICE_FUNC RealScalar distance(const VectorType& p) const { EIGEN_USING_STD_MATH(sqrt) return sqrt(squaredDistance(p)); } + + /** \returns the projection of a point \a p onto the line \c *this. */ + EIGEN_DEVICE_FUNC VectorType projection(const VectorType& p) const + { return origin() + direction().dot(p-origin()) * direction(); } + + EIGEN_DEVICE_FUNC VectorType pointAt(const Scalar& t) const; + + template + EIGEN_DEVICE_FUNC Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; + + template + EIGEN_DEVICE_FUNC Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; + + template + EIGEN_DEVICE_FUNC VectorType intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + EIGEN_DEVICE_FUNC inline typename internal::cast_return_type >::type cast() const + { + return typename internal::cast_return_type >::type(*this); + } + + /** Copy constructor with scalar type conversion */ + template + EIGEN_DEVICE_FUNC inline explicit ParametrizedLine(const ParametrizedLine& other) + { + m_origin = other.origin().template cast(); + m_direction = other.direction().template cast(); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + EIGEN_DEVICE_FUNC bool isApprox(const ParametrizedLine& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const + { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); } + +protected: + + VectorType m_origin, m_direction; +}; + +/** Constructs a parametrized line from a 2D hyperplane + * + * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line + */ +template +template +EIGEN_DEVICE_FUNC inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) + direction() = hyperplane.normal().unitOrthogonal(); + origin() = -hyperplane.normal()*hyperplane.offset(); +} + +/** \returns the point at \a t along this line + */ +template +EIGEN_DEVICE_FUNC inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType +ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt(const _Scalar& t) const +{ + return origin() + (direction()*t); +} + +/** \returns the parameter value of the intersection between \c *this and the given \a hyperplane + */ +template +template +EIGEN_DEVICE_FUNC inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const +{ + return -(hyperplane.offset()+hyperplane.normal().dot(origin())) + / hyperplane.normal().dot(direction()); +} + + +/** \deprecated use intersectionParameter() + * \returns the parameter value of the intersection between \c *this and the given \a hyperplane + */ +template +template +EIGEN_DEVICE_FUNC inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const +{ + return intersectionParameter(hyperplane); +} + +/** \returns the point of the intersection between \c *this and the given hyperplane + */ +template +template +EIGEN_DEVICE_FUNC inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType +ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const +{ + return pointAt(intersectionParameter(hyperplane)); +} + +} // end namespace Eigen + +#endif // EIGEN_PARAMETRIZEDLINE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Quaternion.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Quaternion.h new file mode 100644 index 000000000..5a3228dc5 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Quaternion.h @@ -0,0 +1,817 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2009 Mathieu Gautier +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_QUATERNION_H +#define EIGEN_QUATERNION_H +namespace Eigen { + + +/*************************************************************************** +* Definition of QuaternionBase +* The implementation is at the end of the file +***************************************************************************/ + +namespace internal { +template +struct quaternionbase_assign_impl; +} + +/** \geometry_module \ingroup Geometry_Module + * \class QuaternionBase + * \brief Base class for quaternion expressions + * \tparam Derived derived type (CRTP) + * \sa class Quaternion + */ +template +class QuaternionBase : public RotationBase +{ + public: + typedef RotationBase Base; + + using Base::operator*; + using Base::derived; + + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename internal::traits::Coefficients Coefficients; + typedef typename Coefficients::CoeffReturnType CoeffReturnType; + typedef typename internal::conditional::Flags&LvalueBit), + Scalar&, CoeffReturnType>::type NonConstCoeffReturnType; + + + enum { + Flags = Eigen::internal::traits::Flags + }; + + // typedef typename Matrix Coefficients; + /** the type of a 3D vector */ + typedef Matrix Vector3; + /** the equivalent rotation matrix type */ + typedef Matrix Matrix3; + /** the equivalent angle-axis type */ + typedef AngleAxis AngleAxisType; + + + + /** \returns the \c x coefficient */ + EIGEN_DEVICE_FUNC inline CoeffReturnType x() const { return this->derived().coeffs().coeff(0); } + /** \returns the \c y coefficient */ + EIGEN_DEVICE_FUNC inline CoeffReturnType y() const { return this->derived().coeffs().coeff(1); } + /** \returns the \c z coefficient */ + EIGEN_DEVICE_FUNC inline CoeffReturnType z() const { return this->derived().coeffs().coeff(2); } + /** \returns the \c w coefficient */ + EIGEN_DEVICE_FUNC inline CoeffReturnType w() const { return this->derived().coeffs().coeff(3); } + + /** \returns a reference to the \c x coefficient (if Derived is a non-const lvalue) */ + EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType x() { return this->derived().coeffs().x(); } + /** \returns a reference to the \c y coefficient (if Derived is a non-const lvalue) */ + EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType y() { return this->derived().coeffs().y(); } + /** \returns a reference to the \c z coefficient (if Derived is a non-const lvalue) */ + EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType z() { return this->derived().coeffs().z(); } + /** \returns a reference to the \c w coefficient (if Derived is a non-const lvalue) */ + EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType w() { return this->derived().coeffs().w(); } + + /** \returns a read-only vector expression of the imaginary part (x,y,z) */ + EIGEN_DEVICE_FUNC inline const VectorBlock vec() const { return coeffs().template head<3>(); } + + /** \returns a vector expression of the imaginary part (x,y,z) */ + EIGEN_DEVICE_FUNC inline VectorBlock vec() { return coeffs().template head<3>(); } + + /** \returns a read-only vector expression of the coefficients (x,y,z,w) */ + EIGEN_DEVICE_FUNC inline const typename internal::traits::Coefficients& coeffs() const { return derived().coeffs(); } + + /** \returns a vector expression of the coefficients (x,y,z,w) */ + EIGEN_DEVICE_FUNC inline typename internal::traits::Coefficients& coeffs() { return derived().coeffs(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE QuaternionBase& operator=(const QuaternionBase& other); + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase& other); + +// disabled this copy operator as it is giving very strange compilation errors when compiling +// test_stdvector with GCC 4.4.2. This looks like a GCC bug though, so feel free to re-enable it if it's +// useful; however notice that we already have the templated operator= above and e.g. in MatrixBase +// we didn't have to add, in addition to templated operator=, such a non-templated copy operator. +// Derived& operator=(const QuaternionBase& other) +// { return operator=(other); } + + EIGEN_DEVICE_FUNC Derived& operator=(const AngleAxisType& aa); + template EIGEN_DEVICE_FUNC Derived& operator=(const MatrixBase& m); + + /** \returns a quaternion representing an identity rotation + * \sa MatrixBase::Identity() + */ + EIGEN_DEVICE_FUNC static inline Quaternion Identity() { return Quaternion(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); } + + /** \sa QuaternionBase::Identity(), MatrixBase::setIdentity() + */ + EIGEN_DEVICE_FUNC inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; } + + /** \returns the squared norm of the quaternion's coefficients + * \sa QuaternionBase::norm(), MatrixBase::squaredNorm() + */ + EIGEN_DEVICE_FUNC inline Scalar squaredNorm() const { return coeffs().squaredNorm(); } + + /** \returns the norm of the quaternion's coefficients + * \sa QuaternionBase::squaredNorm(), MatrixBase::norm() + */ + EIGEN_DEVICE_FUNC inline Scalar norm() const { return coeffs().norm(); } + + /** Normalizes the quaternion \c *this + * \sa normalized(), MatrixBase::normalize() */ + EIGEN_DEVICE_FUNC inline void normalize() { coeffs().normalize(); } + /** \returns a normalized copy of \c *this + * \sa normalize(), MatrixBase::normalized() */ + EIGEN_DEVICE_FUNC inline Quaternion normalized() const { return Quaternion(coeffs().normalized()); } + + /** \returns the dot product of \c *this and \a other + * Geometrically speaking, the dot product of two unit quaternions + * corresponds to the cosine of half the angle between the two rotations. + * \sa angularDistance() + */ + template EIGEN_DEVICE_FUNC inline Scalar dot(const QuaternionBase& other) const { return coeffs().dot(other.coeffs()); } + + template EIGEN_DEVICE_FUNC Scalar angularDistance(const QuaternionBase& other) const; + + /** \returns an equivalent 3x3 rotation matrix */ + EIGEN_DEVICE_FUNC Matrix3 toRotationMatrix() const; + + /** \returns the quaternion which transform \a a into \a b through a rotation */ + template + EIGEN_DEVICE_FUNC Derived& setFromTwoVectors(const MatrixBase& a, const MatrixBase& b); + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion operator* (const QuaternionBase& q) const; + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*= (const QuaternionBase& q); + + /** \returns the quaternion describing the inverse rotation */ + EIGEN_DEVICE_FUNC Quaternion inverse() const; + + /** \returns the conjugated quaternion */ + EIGEN_DEVICE_FUNC Quaternion conjugate() const; + + template EIGEN_DEVICE_FUNC Quaternion slerp(const Scalar& t, const QuaternionBase& other) const; + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + template + EIGEN_DEVICE_FUNC bool isApprox(const QuaternionBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const + { return coeffs().isApprox(other.coeffs(), prec); } + + /** return the result vector of \a v through the rotation*/ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const; + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + EIGEN_DEVICE_FUNC inline typename internal::cast_return_type >::type cast() const + { + return typename internal::cast_return_type >::type(derived()); + } + +#ifdef EIGEN_QUATERNIONBASE_PLUGIN +# include EIGEN_QUATERNIONBASE_PLUGIN +#endif +protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(QuaternionBase) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(QuaternionBase) +}; + +/*************************************************************************** +* Definition/implementation of Quaternion +***************************************************************************/ + +/** \geometry_module \ingroup Geometry_Module + * + * \class Quaternion + * + * \brief The quaternion class used to represent 3D orientations and rotations + * + * \tparam _Scalar the scalar type, i.e., the type of the coefficients + * \tparam _Options controls the memory alignment of the coefficients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign. + * + * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of + * orientations and rotations of objects in three dimensions. Compared to other representations + * like Euler angles or 3x3 matrices, quaternions offer the following advantages: + * \li \b compact storage (4 scalars) + * \li \b efficient to compose (28 flops), + * \li \b stable spherical interpolation + * + * The following two typedefs are provided for convenience: + * \li \c Quaternionf for \c float + * \li \c Quaterniond for \c double + * + * \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized. + * + * \sa class AngleAxis, class Transform + */ + +namespace internal { +template +struct traits > +{ + typedef Quaternion<_Scalar,_Options> PlainObject; + typedef _Scalar Scalar; + typedef Matrix<_Scalar,4,1,_Options> Coefficients; + enum{ + Alignment = internal::traits::Alignment, + Flags = LvalueBit + }; +}; +} + +template +class Quaternion : public QuaternionBase > +{ +public: + typedef QuaternionBase > Base; + enum { NeedsAlignment = internal::traits::Alignment>0 }; + + typedef _Scalar Scalar; + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion) + using Base::operator*=; + + typedef typename internal::traits::Coefficients Coefficients; + typedef typename Base::AngleAxisType AngleAxisType; + + /** Default constructor leaving the quaternion uninitialized. */ + EIGEN_DEVICE_FUNC inline Quaternion() {} + + /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from + * its four coefficients \a w, \a x, \a y and \a z. + * + * \warning Note the order of the arguments: the real \a w coefficient first, + * while internally the coefficients are stored in the following order: + * [\c x, \c y, \c z, \c w] + */ + EIGEN_DEVICE_FUNC inline Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, w){} + + /** Constructs and initialize a quaternion from the array data */ + EIGEN_DEVICE_FUNC explicit inline Quaternion(const Scalar* data) : m_coeffs(data) {} + + /** Copy constructor */ + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion(const QuaternionBase& other) { this->Base::operator=(other); } + + /** Constructs and initializes a quaternion from the angle-axis \a aa */ + EIGEN_DEVICE_FUNC explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; } + + /** Constructs and initializes a quaternion from either: + * - a rotation matrix expression, + * - a 4D vector expression representing quaternion coefficients. + */ + template + EIGEN_DEVICE_FUNC explicit inline Quaternion(const MatrixBase& other) { *this = other; } + + /** Explicit copy constructor with scalar conversion */ + template + EIGEN_DEVICE_FUNC explicit inline Quaternion(const Quaternion& other) + { m_coeffs = other.coeffs().template cast(); } + + EIGEN_DEVICE_FUNC static Quaternion UnitRandom(); + + template + EIGEN_DEVICE_FUNC static Quaternion FromTwoVectors(const MatrixBase& a, const MatrixBase& b); + + EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs;} + EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs;} + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(NeedsAlignment)) + +#ifdef EIGEN_QUATERNION_PLUGIN +# include EIGEN_QUATERNION_PLUGIN +#endif + +protected: + Coefficients m_coeffs; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + static EIGEN_STRONG_INLINE void _check_template_params() + { + EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options, + INVALID_MATRIX_TEMPLATE_PARAMETERS) + } +#endif +}; + +/** \ingroup Geometry_Module + * single precision quaternion type */ +typedef Quaternion Quaternionf; +/** \ingroup Geometry_Module + * double precision quaternion type */ +typedef Quaternion Quaterniond; + +/*************************************************************************** +* Specialization of Map> +***************************************************************************/ + +namespace internal { + template + struct traits, _Options> > : traits > + { + typedef Map, _Options> Coefficients; + }; +} + +namespace internal { + template + struct traits, _Options> > : traits > + { + typedef Map, _Options> Coefficients; + typedef traits > TraitsBase; + enum { + Flags = TraitsBase::Flags & ~LvalueBit + }; + }; +} + +/** \ingroup Geometry_Module + * \brief Quaternion expression mapping a constant memory buffer + * + * \tparam _Scalar the type of the Quaternion coefficients + * \tparam _Options see class Map + * + * This is a specialization of class Map for Quaternion. This class allows to view + * a 4 scalar memory buffer as an Eigen's Quaternion object. + * + * \sa class Map, class Quaternion, class QuaternionBase + */ +template +class Map, _Options > + : public QuaternionBase, _Options> > +{ + public: + typedef QuaternionBase, _Options> > Base; + + typedef _Scalar Scalar; + typedef typename internal::traits::Coefficients Coefficients; + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) + using Base::operator*=; + + /** Constructs a Mapped Quaternion object from the pointer \a coeffs + * + * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: + * \code *coeffs == {x, y, z, w} \endcode + * + * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ + EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {} + + EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs;} + + protected: + const Coefficients m_coeffs; +}; + +/** \ingroup Geometry_Module + * \brief Expression of a quaternion from a memory buffer + * + * \tparam _Scalar the type of the Quaternion coefficients + * \tparam _Options see class Map + * + * This is a specialization of class Map for Quaternion. This class allows to view + * a 4 scalar memory buffer as an Eigen's Quaternion object. + * + * \sa class Map, class Quaternion, class QuaternionBase + */ +template +class Map, _Options > + : public QuaternionBase, _Options> > +{ + public: + typedef QuaternionBase, _Options> > Base; + + typedef _Scalar Scalar; + typedef typename internal::traits::Coefficients Coefficients; + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) + using Base::operator*=; + + /** Constructs a Mapped Quaternion object from the pointer \a coeffs + * + * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: + * \code *coeffs == {x, y, z, w} \endcode + * + * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ + EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {} + + EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs; } + EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; } + + protected: + Coefficients m_coeffs; +}; + +/** \ingroup Geometry_Module + * Map an unaligned array of single precision scalars as a quaternion */ +typedef Map, 0> QuaternionMapf; +/** \ingroup Geometry_Module + * Map an unaligned array of double precision scalars as a quaternion */ +typedef Map, 0> QuaternionMapd; +/** \ingroup Geometry_Module + * Map a 16-byte aligned array of single precision scalars as a quaternion */ +typedef Map, Aligned> QuaternionMapAlignedf; +/** \ingroup Geometry_Module + * Map a 16-byte aligned array of double precision scalars as a quaternion */ +typedef Map, Aligned> QuaternionMapAlignedd; + +/*************************************************************************** +* Implementation of QuaternionBase methods +***************************************************************************/ + +// Generic Quaternion * Quaternion product +// This product can be specialized for a given architecture via the Arch template argument. +namespace internal { +template struct quat_product +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Quaternion run(const QuaternionBase& a, const QuaternionBase& b){ + return Quaternion + ( + a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(), + a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(), + a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(), + a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x() + ); + } +}; +} + +/** \returns the concatenation of two rotations as a quaternion-quaternion product */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion::Scalar> +QuaternionBase::operator* (const QuaternionBase& other) const +{ + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + return internal::quat_product::Scalar>::run(*this, other); +} + +/** \sa operator*(Quaternion) */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase::operator*= (const QuaternionBase& other) +{ + derived() = derived() * other.derived(); + return derived(); +} + +/** Rotation of a vector by a quaternion. + * \remarks If the quaternion is used to rotate several points (>1) + * then it is much more efficient to first convert it to a 3x3 Matrix. + * Comparison of the operation cost for n transformations: + * - Quaternion2: 30n + * - Via a Matrix3: 24 + 15n + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename QuaternionBase::Vector3 +QuaternionBase::_transformVector(const Vector3& v) const +{ + // Note that this algorithm comes from the optimization by hand + // of the conversion to a Matrix followed by a Matrix/Vector product. + // It appears to be much faster than the common algorithm found + // in the literature (30 versus 39 flops). It also requires two + // Vector3 as temporaries. + Vector3 uv = this->vec().cross(v); + uv += uv; + return v + this->w() * uv + this->vec().cross(uv); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE QuaternionBase& QuaternionBase::operator=(const QuaternionBase& other) +{ + coeffs() = other.coeffs(); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase::operator=(const QuaternionBase& other) +{ + coeffs() = other.coeffs(); + return derived(); +} + +/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase::operator=(const AngleAxisType& aa) +{ + EIGEN_USING_STD_MATH(cos) + EIGEN_USING_STD_MATH(sin) + Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings + this->w() = cos(ha); + this->vec() = sin(ha) * aa.axis(); + return derived(); +} + +/** Set \c *this from the expression \a xpr: + * - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion + * - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix + * and \a xpr is converted to a quaternion + */ + +template +template +EIGEN_DEVICE_FUNC inline Derived& QuaternionBase::operator=(const MatrixBase& xpr) +{ + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + internal::quaternionbase_assign_impl::run(*this, xpr.derived()); + return derived(); +} + +/** Convert the quaternion to a 3x3 rotation matrix. The quaternion is required to + * be normalized, otherwise the result is undefined. + */ +template +EIGEN_DEVICE_FUNC inline typename QuaternionBase::Matrix3 +QuaternionBase::toRotationMatrix(void) const +{ + // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!) + // if not inlined then the cost of the return by value is huge ~ +35%, + // however, not inlining this function is an order of magnitude slower, so + // it has to be inlined, and so the return by value is not an issue + Matrix3 res; + + const Scalar tx = Scalar(2)*this->x(); + const Scalar ty = Scalar(2)*this->y(); + const Scalar tz = Scalar(2)*this->z(); + const Scalar twx = tx*this->w(); + const Scalar twy = ty*this->w(); + const Scalar twz = tz*this->w(); + const Scalar txx = tx*this->x(); + const Scalar txy = ty*this->x(); + const Scalar txz = tz*this->x(); + const Scalar tyy = ty*this->y(); + const Scalar tyz = tz*this->y(); + const Scalar tzz = tz*this->z(); + + res.coeffRef(0,0) = Scalar(1)-(tyy+tzz); + res.coeffRef(0,1) = txy-twz; + res.coeffRef(0,2) = txz+twy; + res.coeffRef(1,0) = txy+twz; + res.coeffRef(1,1) = Scalar(1)-(txx+tzz); + res.coeffRef(1,2) = tyz-twx; + res.coeffRef(2,0) = txz-twy; + res.coeffRef(2,1) = tyz+twx; + res.coeffRef(2,2) = Scalar(1)-(txx+tyy); + + return res; +} + +/** Sets \c *this to be a quaternion representing a rotation between + * the two arbitrary vectors \a a and \a b. In other words, the built + * rotation represent a rotation sending the line of direction \a a + * to the line of direction \a b, both lines passing through the origin. + * + * \returns a reference to \c *this. + * + * Note that the two input vectors do \b not have to be normalized, and + * do not need to have the same norm. + */ +template +template +EIGEN_DEVICE_FUNC inline Derived& QuaternionBase::setFromTwoVectors(const MatrixBase& a, const MatrixBase& b) +{ + EIGEN_USING_STD_MATH(sqrt) + Vector3 v0 = a.normalized(); + Vector3 v1 = b.normalized(); + Scalar c = v1.dot(v0); + + // if dot == -1, vectors are nearly opposites + // => accurately compute the rotation axis by computing the + // intersection of the two planes. This is done by solving: + // x^T v0 = 0 + // x^T v1 = 0 + // under the constraint: + // ||x|| = 1 + // which yields a singular value problem + if (c < Scalar(-1)+NumTraits::dummy_precision()) + { + c = numext::maxi(c,Scalar(-1)); + Matrix m; m << v0.transpose(), v1.transpose(); + JacobiSVD > svd(m, ComputeFullV); + Vector3 axis = svd.matrixV().col(2); + + Scalar w2 = (Scalar(1)+c)*Scalar(0.5); + this->w() = sqrt(w2); + this->vec() = axis * sqrt(Scalar(1) - w2); + return derived(); + } + Vector3 axis = v0.cross(v1); + Scalar s = sqrt((Scalar(1)+c)*Scalar(2)); + Scalar invs = Scalar(1)/s; + this->vec() = axis * invs; + this->w() = s * Scalar(0.5); + + return derived(); +} + +/** \returns a random unit quaternion following a uniform distribution law on SO(3) + * + * \note The implementation is based on http://planning.cs.uiuc.edu/node198.html + */ +template +EIGEN_DEVICE_FUNC Quaternion Quaternion::UnitRandom() +{ + EIGEN_USING_STD_MATH(sqrt) + EIGEN_USING_STD_MATH(sin) + EIGEN_USING_STD_MATH(cos) + const Scalar u1 = internal::random(0, 1), + u2 = internal::random(0, 2*EIGEN_PI), + u3 = internal::random(0, 2*EIGEN_PI); + const Scalar a = sqrt(1 - u1), + b = sqrt(u1); + return Quaternion (a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3)); +} + + +/** Returns a quaternion representing a rotation between + * the two arbitrary vectors \a a and \a b. In other words, the built + * rotation represent a rotation sending the line of direction \a a + * to the line of direction \a b, both lines passing through the origin. + * + * \returns resulting quaternion + * + * Note that the two input vectors do \b not have to be normalized, and + * do not need to have the same norm. + */ +template +template +EIGEN_DEVICE_FUNC Quaternion Quaternion::FromTwoVectors(const MatrixBase& a, const MatrixBase& b) +{ + Quaternion quat; + quat.setFromTwoVectors(a, b); + return quat; +} + + +/** \returns the multiplicative inverse of \c *this + * Note that in most cases, i.e., if you simply want the opposite rotation, + * and/or the quaternion is normalized, then it is enough to use the conjugate. + * + * \sa QuaternionBase::conjugate() + */ +template +EIGEN_DEVICE_FUNC inline Quaternion::Scalar> QuaternionBase::inverse() const +{ + // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ?? + Scalar n2 = this->squaredNorm(); + if (n2 > Scalar(0)) + return Quaternion(conjugate().coeffs() / n2); + else + { + // return an invalid result to flag the error + return Quaternion(Coefficients::Zero()); + } +} + +// Generic conjugate of a Quaternion +namespace internal { +template struct quat_conj +{ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Quaternion run(const QuaternionBase& q){ + return Quaternion(q.w(),-q.x(),-q.y(),-q.z()); + } +}; +} + +/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse + * if the quaternion is normalized. + * The conjugate of a quaternion represents the opposite rotation. + * + * \sa Quaternion2::inverse() + */ +template +EIGEN_DEVICE_FUNC inline Quaternion::Scalar> +QuaternionBase::conjugate() const +{ + return internal::quat_conj::Scalar>::run(*this); + +} + +/** \returns the angle (in radian) between two rotations + * \sa dot() + */ +template +template +EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar +QuaternionBase::angularDistance(const QuaternionBase& other) const +{ + EIGEN_USING_STD_MATH(atan2) + Quaternion d = (*this) * other.conjugate(); + return Scalar(2) * atan2( d.vec().norm(), numext::abs(d.w()) ); +} + + + +/** \returns the spherical linear interpolation between the two quaternions + * \c *this and \a other at the parameter \a t in [0;1]. + * + * This represents an interpolation for a constant motion between \c *this and \a other, + * see also http://en.wikipedia.org/wiki/Slerp. + */ +template +template +EIGEN_DEVICE_FUNC Quaternion::Scalar> +QuaternionBase::slerp(const Scalar& t, const QuaternionBase& other) const +{ + EIGEN_USING_STD_MATH(acos) + EIGEN_USING_STD_MATH(sin) + const Scalar one = Scalar(1) - NumTraits::epsilon(); + Scalar d = this->dot(other); + Scalar absD = numext::abs(d); + + Scalar scale0; + Scalar scale1; + + if(absD>=one) + { + scale0 = Scalar(1) - t; + scale1 = t; + } + else + { + // theta is the angle between the 2 quaternions + Scalar theta = acos(absD); + Scalar sinTheta = sin(theta); + + scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta; + scale1 = sin( ( t * theta) ) / sinTheta; + } + if(d(scale0 * coeffs() + scale1 * other.coeffs()); +} + +namespace internal { + +// set from a rotation matrix +template +struct quaternionbase_assign_impl +{ + typedef typename Other::Scalar Scalar; + template EIGEN_DEVICE_FUNC static inline void run(QuaternionBase& q, const Other& a_mat) + { + const typename internal::nested_eval::type mat(a_mat); + EIGEN_USING_STD_MATH(sqrt) + // This algorithm comes from "Quaternion Calculus and Fast Animation", + // Ken Shoemake, 1987 SIGGRAPH course notes + Scalar t = mat.trace(); + if (t > Scalar(0)) + { + t = sqrt(t + Scalar(1.0)); + q.w() = Scalar(0.5)*t; + t = Scalar(0.5)/t; + q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t; + q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t; + q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t; + } + else + { + Index i = 0; + if (mat.coeff(1,1) > mat.coeff(0,0)) + i = 1; + if (mat.coeff(2,2) > mat.coeff(i,i)) + i = 2; + Index j = (i+1)%3; + Index k = (j+1)%3; + + t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); + q.coeffs().coeffRef(i) = Scalar(0.5) * t; + t = Scalar(0.5)/t; + q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t; + q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t; + q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t; + } + } +}; + +// set from a vector of coefficients assumed to be a quaternion +template +struct quaternionbase_assign_impl +{ + typedef typename Other::Scalar Scalar; + template EIGEN_DEVICE_FUNC static inline void run(QuaternionBase& q, const Other& vec) + { + q.coeffs() = vec; + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_QUATERNION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Rotation2D.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Rotation2D.h new file mode 100644 index 000000000..884b7d0ee --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Rotation2D.h @@ -0,0 +1,199 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ROTATION2D_H +#define EIGEN_ROTATION2D_H + +namespace Eigen { + +/** \geometry_module \ingroup Geometry_Module + * + * \class Rotation2D + * + * \brief Represents a rotation/orientation in a 2 dimensional space. + * + * \tparam _Scalar the scalar type, i.e., the type of the coefficients + * + * This class is equivalent to a single scalar representing a counter clock wise rotation + * as a single angle in radian. It provides some additional features such as the automatic + * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar + * interface to Quaternion in order to facilitate the writing of generic algorithms + * dealing with rotations. + * + * \sa class Quaternion, class Transform + */ + +namespace internal { + +template struct traits > +{ + typedef _Scalar Scalar; +}; +} // end namespace internal + +template +class Rotation2D : public RotationBase,2> +{ + typedef RotationBase,2> Base; + +public: + + using Base::operator*; + + enum { Dim = 2 }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + typedef Matrix Vector2; + typedef Matrix Matrix2; + +protected: + + Scalar m_angle; + +public: + + /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */ + EIGEN_DEVICE_FUNC explicit inline Rotation2D(const Scalar& a) : m_angle(a) {} + + /** Default constructor wihtout initialization. The represented rotation is undefined. */ + EIGEN_DEVICE_FUNC Rotation2D() {} + + /** Construct a 2D rotation from a 2x2 rotation matrix \a mat. + * + * \sa fromRotationMatrix() + */ + template + EIGEN_DEVICE_FUNC explicit Rotation2D(const MatrixBase& m) + { + fromRotationMatrix(m.derived()); + } + + /** \returns the rotation angle */ + EIGEN_DEVICE_FUNC inline Scalar angle() const { return m_angle; } + + /** \returns a read-write reference to the rotation angle */ + EIGEN_DEVICE_FUNC inline Scalar& angle() { return m_angle; } + + /** \returns the rotation angle in [0,2pi] */ + EIGEN_DEVICE_FUNC inline Scalar smallestPositiveAngle() const { + Scalar tmp = numext::fmod(m_angle,Scalar(2*EIGEN_PI)); + return tmpScalar(EIGEN_PI)) tmp -= Scalar(2*EIGEN_PI); + else if(tmp<-Scalar(EIGEN_PI)) tmp += Scalar(2*EIGEN_PI); + return tmp; + } + + /** \returns the inverse rotation */ + EIGEN_DEVICE_FUNC inline Rotation2D inverse() const { return Rotation2D(-m_angle); } + + /** Concatenates two rotations */ + EIGEN_DEVICE_FUNC inline Rotation2D operator*(const Rotation2D& other) const + { return Rotation2D(m_angle + other.m_angle); } + + /** Concatenates two rotations */ + EIGEN_DEVICE_FUNC inline Rotation2D& operator*=(const Rotation2D& other) + { m_angle += other.m_angle; return *this; } + + /** Applies the rotation to a 2D vector */ + EIGEN_DEVICE_FUNC Vector2 operator* (const Vector2& vec) const + { return toRotationMatrix() * vec; } + + template + EIGEN_DEVICE_FUNC Rotation2D& fromRotationMatrix(const MatrixBase& m); + EIGEN_DEVICE_FUNC Matrix2 toRotationMatrix() const; + + /** Set \c *this from a 2x2 rotation matrix \a mat. + * In other words, this function extract the rotation angle from the rotation matrix. + * + * This method is an alias for fromRotationMatrix() + * + * \sa fromRotationMatrix() + */ + template + EIGEN_DEVICE_FUNC Rotation2D& operator=(const MatrixBase& m) + { return fromRotationMatrix(m.derived()); } + + /** \returns the spherical interpolation between \c *this and \a other using + * parameter \a t. It is in fact equivalent to a linear interpolation. + */ + EIGEN_DEVICE_FUNC inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const + { + Scalar dist = Rotation2D(other.m_angle-m_angle).smallestAngle(); + return Rotation2D(m_angle + dist*t); + } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + EIGEN_DEVICE_FUNC inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + EIGEN_DEVICE_FUNC inline explicit Rotation2D(const Rotation2D& other) + { + m_angle = Scalar(other.angle()); + } + + EIGEN_DEVICE_FUNC static inline Rotation2D Identity() { return Rotation2D(0); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + EIGEN_DEVICE_FUNC bool isApprox(const Rotation2D& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const + { return internal::isApprox(m_angle,other.m_angle, prec); } + +}; + +/** \ingroup Geometry_Module + * single precision 2D rotation type */ +typedef Rotation2D Rotation2Df; +/** \ingroup Geometry_Module + * double precision 2D rotation type */ +typedef Rotation2D Rotation2Dd; + +/** Set \c *this from a 2x2 rotation matrix \a mat. + * In other words, this function extract the rotation angle + * from the rotation matrix. + */ +template +template +EIGEN_DEVICE_FUNC Rotation2D& Rotation2D::fromRotationMatrix(const MatrixBase& mat) +{ + EIGEN_USING_STD_MATH(atan2) + EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE) + m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0)); + return *this; +} + +/** Constructs and \returns an equivalent 2x2 rotation matrix. + */ +template +typename Rotation2D::Matrix2 +EIGEN_DEVICE_FUNC Rotation2D::toRotationMatrix(void) const +{ + EIGEN_USING_STD_MATH(sin) + EIGEN_USING_STD_MATH(cos) + Scalar sinA = sin(m_angle); + Scalar cosA = cos(m_angle); + return (Matrix2() << cosA, -sinA, sinA, cosA).finished(); +} + +} // end namespace Eigen + +#endif // EIGEN_ROTATION2D_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/RotationBase.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/RotationBase.h new file mode 100644 index 000000000..f0ee0bd03 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/RotationBase.h @@ -0,0 +1,206 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ROTATIONBASE_H +#define EIGEN_ROTATIONBASE_H + +namespace Eigen { + +// forward declaration +namespace internal { +template +struct rotation_base_generic_product_selector; +} + +/** \class RotationBase + * + * \brief Common base class for compact rotation representations + * + * \tparam Derived is the derived type, i.e., a rotation type + * \tparam _Dim the dimension of the space + */ +template +class RotationBase +{ + public: + enum { Dim = _Dim }; + /** the scalar type of the coefficients */ + typedef typename internal::traits::Scalar Scalar; + + /** corresponding linear transformation matrix type */ + typedef Matrix RotationMatrixType; + typedef Matrix VectorType; + + public: + EIGEN_DEVICE_FUNC inline const Derived& derived() const { return *static_cast(this); } + EIGEN_DEVICE_FUNC inline Derived& derived() { return *static_cast(this); } + + /** \returns an equivalent rotation matrix */ + EIGEN_DEVICE_FUNC inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); } + + /** \returns an equivalent rotation matrix + * This function is added to be conform with the Transform class' naming scheme. + */ + EIGEN_DEVICE_FUNC inline RotationMatrixType matrix() const { return derived().toRotationMatrix(); } + + /** \returns the inverse rotation */ + EIGEN_DEVICE_FUNC inline Derived inverse() const { return derived().inverse(); } + + /** \returns the concatenation of the rotation \c *this with a translation \a t */ + EIGEN_DEVICE_FUNC inline Transform operator*(const Translation& t) const + { return Transform(*this) * t; } + + /** \returns the concatenation of the rotation \c *this with a uniform scaling \a s */ + EIGEN_DEVICE_FUNC inline RotationMatrixType operator*(const UniformScaling& s) const + { return toRotationMatrix() * s.factor(); } + + /** \returns the concatenation of the rotation \c *this with a generic expression \a e + * \a e can be: + * - a DimxDim linear transformation matrix + * - a DimxDim diagonal matrix (axis aligned scaling) + * - a vector of size Dim + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::rotation_base_generic_product_selector::ReturnType + operator*(const EigenBase& e) const + { return internal::rotation_base_generic_product_selector::run(derived(), e.derived()); } + + /** \returns the concatenation of a linear transformation \a l with the rotation \a r */ + template friend + EIGEN_DEVICE_FUNC inline RotationMatrixType operator*(const EigenBase& l, const Derived& r) + { return l.derived() * r.toRotationMatrix(); } + + /** \returns the concatenation of a scaling \a l with the rotation \a r */ + EIGEN_DEVICE_FUNC friend inline Transform operator*(const DiagonalMatrix& l, const Derived& r) + { + Transform res(r); + res.linear().applyOnTheLeft(l); + return res; + } + + /** \returns the concatenation of the rotation \c *this with a transformation \a t */ + template + EIGEN_DEVICE_FUNC inline Transform operator*(const Transform& t) const + { return toRotationMatrix() * t; } + + template + EIGEN_DEVICE_FUNC inline VectorType _transformVector(const OtherVectorType& v) const + { return toRotationMatrix() * v; } +}; + +namespace internal { + +// implementation of the generic product rotation * matrix +template +struct rotation_base_generic_product_selector +{ + enum { Dim = RotationDerived::Dim }; + typedef Matrix ReturnType; + EIGEN_DEVICE_FUNC static inline ReturnType run(const RotationDerived& r, const MatrixType& m) + { return r.toRotationMatrix() * m; } +}; + +template +struct rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix, false > +{ + typedef Transform ReturnType; + EIGEN_DEVICE_FUNC static inline ReturnType run(const RotationDerived& r, const DiagonalMatrix& m) + { + ReturnType res(r); + res.linear() *= m; + return res; + } +}; + +template +struct rotation_base_generic_product_selector +{ + enum { Dim = RotationDerived::Dim }; + typedef Matrix ReturnType; + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE ReturnType run(const RotationDerived& r, const OtherVectorType& v) + { + return r._transformVector(v); + } +}; + +} // end namespace internal + +/** \geometry_module + * + * \brief Constructs a Dim x Dim rotation matrix from the rotation \a r + */ +template +template +EIGEN_DEVICE_FUNC Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> +::Matrix(const RotationBase& r) +{ + EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) + *this = r.toRotationMatrix(); +} + +/** \geometry_module + * + * \brief Set a Dim x Dim rotation matrix from the rotation \a r + */ +template +template +EIGEN_DEVICE_FUNC Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> +::operator=(const RotationBase& r) +{ + EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) + return *this = r.toRotationMatrix(); +} + +namespace internal { + +/** \internal + * + * Helper function to return an arbitrary rotation object to a rotation matrix. + * + * \tparam Scalar the numeric type of the matrix coefficients + * \tparam Dim the dimension of the current space + * + * It returns a Dim x Dim fixed size matrix. + * + * Default specializations are provided for: + * - any scalar type (2D), + * - any matrix expression, + * - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D) + * + * Currently toRotationMatrix is only used by Transform. + * + * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis + */ +template +EIGEN_DEVICE_FUNC static inline Matrix toRotationMatrix(const Scalar& s) +{ + EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE) + return Rotation2D(s).toRotationMatrix(); +} + +template +EIGEN_DEVICE_FUNC static inline Matrix toRotationMatrix(const RotationBase& r) +{ + return r.toRotationMatrix(); +} + +template +EIGEN_DEVICE_FUNC static inline const MatrixBase& toRotationMatrix(const MatrixBase& mat) +{ + EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim, + YOU_MADE_A_PROGRAMMING_MISTAKE) + return mat; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ROTATIONBASE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Scaling.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Scaling.h new file mode 100644 index 000000000..f58ca03d9 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Scaling.h @@ -0,0 +1,170 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SCALING_H +#define EIGEN_SCALING_H + +namespace Eigen { + +/** \geometry_module \ingroup Geometry_Module + * + * \class Scaling + * + * \brief Represents a generic uniform scaling transformation + * + * \tparam _Scalar the scalar type, i.e., the type of the coefficients. + * + * This class represent a uniform scaling transformation. It is the return + * type of Scaling(Scalar), and most of the time this is the only way it + * is used. In particular, this class is not aimed to be used to store a scaling transformation, + * but rather to make easier the constructions and updates of Transform objects. + * + * To represent an axis aligned scaling, use the DiagonalMatrix class. + * + * \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform + */ +template +class UniformScaling +{ +public: + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + +protected: + + Scalar m_factor; + +public: + + /** Default constructor without initialization. */ + UniformScaling() {} + /** Constructs and initialize a uniform scaling transformation */ + explicit inline UniformScaling(const Scalar& s) : m_factor(s) {} + + inline const Scalar& factor() const { return m_factor; } + inline Scalar& factor() { return m_factor; } + + /** Concatenates two uniform scaling */ + inline UniformScaling operator* (const UniformScaling& other) const + { return UniformScaling(m_factor * other.factor()); } + + /** Concatenates a uniform scaling and a translation */ + template + inline Transform operator* (const Translation& t) const; + + /** Concatenates a uniform scaling and an affine transformation */ + template + inline Transform operator* (const Transform& t) const + { + Transform res = t; + res.prescale(factor()); + return res; + } + + /** Concatenates a uniform scaling and a linear transformation matrix */ + // TODO returns an expression + template + inline typename internal::plain_matrix_type::type operator* (const MatrixBase& other) const + { return other * m_factor; } + + template + inline Matrix operator*(const RotationBase& r) const + { return r.toRotationMatrix() * m_factor; } + + /** \returns the inverse scaling */ + inline UniformScaling inverse() const + { return UniformScaling(Scalar(1)/m_factor); } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline UniformScaling cast() const + { return UniformScaling(NewScalarType(m_factor)); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit UniformScaling(const UniformScaling& other) + { m_factor = Scalar(other.factor()); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const UniformScaling& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const + { return internal::isApprox(m_factor, other.factor(), prec); } + +}; + +/** \addtogroup Geometry_Module */ +//@{ + +/** Concatenates a linear transformation matrix and a uniform scaling + * \relates UniformScaling + */ +// NOTE this operator is defiend in MatrixBase and not as a friend function +// of UniformScaling to fix an internal crash of Intel's ICC +template +EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,Scalar,product) +operator*(const MatrixBase& matrix, const UniformScaling& s) +{ return matrix.derived() * s.factor(); } + +/** Constructs a uniform scaling from scale factor \a s */ +inline UniformScaling Scaling(float s) { return UniformScaling(s); } +/** Constructs a uniform scaling from scale factor \a s */ +inline UniformScaling Scaling(double s) { return UniformScaling(s); } +/** Constructs a uniform scaling from scale factor \a s */ +template +inline UniformScaling > Scaling(const std::complex& s) +{ return UniformScaling >(s); } + +/** Constructs a 2D axis aligned scaling */ +template +inline DiagonalMatrix Scaling(const Scalar& sx, const Scalar& sy) +{ return DiagonalMatrix(sx, sy); } +/** Constructs a 3D axis aligned scaling */ +template +inline DiagonalMatrix Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz) +{ return DiagonalMatrix(sx, sy, sz); } + +/** Constructs an axis aligned scaling expression from vector expression \a coeffs + * This is an alias for coeffs.asDiagonal() + */ +template +inline const DiagonalWrapper Scaling(const MatrixBase& coeffs) +{ return coeffs.asDiagonal(); } + +/** \deprecated */ +typedef DiagonalMatrix AlignedScaling2f; +/** \deprecated */ +typedef DiagonalMatrix AlignedScaling2d; +/** \deprecated */ +typedef DiagonalMatrix AlignedScaling3f; +/** \deprecated */ +typedef DiagonalMatrix AlignedScaling3d; +//@} + +template +template +inline Transform +UniformScaling::operator* (const Translation& t) const +{ + Transform res; + res.matrix().setZero(); + res.linear().diagonal().fill(factor()); + res.translation() = factor() * t.vector(); + res(Dim,Dim) = Scalar(1); + return res; +} + +} // end namespace Eigen + +#endif // EIGEN_SCALING_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Transform.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Transform.h new file mode 100644 index 000000000..3f31ee45d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Transform.h @@ -0,0 +1,1542 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2010 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRANSFORM_H +#define EIGEN_TRANSFORM_H + +namespace Eigen { + +namespace internal { + +template +struct transform_traits +{ + enum + { + Dim = Transform::Dim, + HDim = Transform::HDim, + Mode = Transform::Mode, + IsProjective = (int(Mode)==int(Projective)) + }; +}; + +template< typename TransformType, + typename MatrixType, + int Case = transform_traits::IsProjective ? 0 + : int(MatrixType::RowsAtCompileTime) == int(transform_traits::HDim) ? 1 + : 2, + int RhsCols = MatrixType::ColsAtCompileTime> +struct transform_right_product_impl; + +template< typename Other, + int Mode, + int Options, + int Dim, + int HDim, + int OtherRows=Other::RowsAtCompileTime, + int OtherCols=Other::ColsAtCompileTime> +struct transform_left_product_impl; + +template< typename Lhs, + typename Rhs, + bool AnyProjective = + transform_traits::IsProjective || + transform_traits::IsProjective> +struct transform_transform_product_impl; + +template< typename Other, + int Mode, + int Options, + int Dim, + int HDim, + int OtherRows=Other::RowsAtCompileTime, + int OtherCols=Other::ColsAtCompileTime> +struct transform_construct_from_matrix; + +template struct transform_take_affine_part; + +template +struct traits > +{ + typedef _Scalar Scalar; + typedef Eigen::Index StorageIndex; + typedef Dense StorageKind; + enum { + Dim1 = _Dim==Dynamic ? _Dim : _Dim + 1, + RowsAtCompileTime = _Mode==Projective ? Dim1 : _Dim, + ColsAtCompileTime = Dim1, + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + Flags = 0 + }; +}; + +template struct transform_make_affine; + +} // end namespace internal + +/** \geometry_module \ingroup Geometry_Module + * + * \class Transform + * + * \brief Represents an homogeneous transformation in a N dimensional space + * + * \tparam _Scalar the scalar type, i.e., the type of the coefficients + * \tparam _Dim the dimension of the space + * \tparam _Mode the type of the transformation. Can be: + * - #Affine: the transformation is stored as a (Dim+1)^2 matrix, + * where the last row is assumed to be [0 ... 0 1]. + * - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix. + * - #Projective: the transformation is stored as a (Dim+1)^2 matrix + * without any assumption. + * \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor. + * These Options are passed directly to the underlying matrix type. + * + * The homography is internally represented and stored by a matrix which + * is available through the matrix() method. To understand the behavior of + * this class you have to think a Transform object as its internal + * matrix representation. The chosen convention is right multiply: + * + * \code v' = T * v \endcode + * + * Therefore, an affine transformation matrix M is shaped like this: + * + * \f$ \left( \begin{array}{cc} + * linear & translation\\ + * 0 ... 0 & 1 + * \end{array} \right) \f$ + * + * Note that for a projective transformation the last row can be anything, + * and then the interpretation of different parts might be sightly different. + * + * However, unlike a plain matrix, the Transform class provides many features + * simplifying both its assembly and usage. In particular, it can be composed + * with any other transformations (Transform,Translation,RotationBase,DiagonalMatrix) + * and can be directly used to transform implicit homogeneous vectors. All these + * operations are handled via the operator*. For the composition of transformations, + * its principle consists to first convert the right/left hand sides of the product + * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product. + * Of course, internally, operator* tries to perform the minimal number of operations + * according to the nature of each terms. Likewise, when applying the transform + * to points, the latters are automatically promoted to homogeneous vectors + * before doing the matrix product. The conventions to homogeneous representations + * are performed as follow: + * + * \b Translation t (Dim)x(1): + * \f$ \left( \begin{array}{cc} + * I & t \\ + * 0\,...\,0 & 1 + * \end{array} \right) \f$ + * + * \b Rotation R (Dim)x(Dim): + * \f$ \left( \begin{array}{cc} + * R & 0\\ + * 0\,...\,0 & 1 + * \end{array} \right) \f$ + * + * \b Scaling \b DiagonalMatrix S (Dim)x(Dim): + * \f$ \left( \begin{array}{cc} + * S & 0\\ + * 0\,...\,0 & 1 + * \end{array} \right) \f$ + * + * \b Column \b point v (Dim)x(1): + * \f$ \left( \begin{array}{c} + * v\\ + * 1 + * \end{array} \right) \f$ + * + * \b Set \b of \b column \b points V1...Vn (Dim)x(n): + * \f$ \left( \begin{array}{ccc} + * v_1 & ... & v_n\\ + * 1 & ... & 1 + * \end{array} \right) \f$ + * + * The concatenation of a Transform object with any kind of other transformation + * always returns a Transform object. + * + * A little exception to the "as pure matrix product" rule is the case of the + * transformation of non homogeneous vectors by an affine transformation. In + * that case the last matrix row can be ignored, and the product returns non + * homogeneous vectors. + * + * Since, for instance, a Dim x Dim matrix is interpreted as a linear transformation, + * it is not possible to directly transform Dim vectors stored in a Dim x Dim matrix. + * The solution is either to use a Dim x Dynamic matrix or explicitly request a + * vector transformation by making the vector homogeneous: + * \code + * m' = T * m.colwise().homogeneous(); + * \endcode + * Note that there is zero overhead. + * + * Conversion methods from/to Qt's QMatrix and QTransform are available if the + * preprocessor token EIGEN_QT_SUPPORT is defined. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_TRANSFORM_PLUGIN. + * + * \sa class Matrix, class Quaternion + */ +template +class Transform +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1)) + enum { + Mode = _Mode, + Options = _Options, + Dim = _Dim, ///< space dimension in which the transformation holds + HDim = _Dim+1, ///< size of a respective homogeneous vector + Rows = int(Mode)==(AffineCompact) ? Dim : HDim + }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + typedef Eigen::Index StorageIndex; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + /** type of the matrix used to represent the transformation */ + typedef typename internal::make_proper_matrix_type::type MatrixType; + /** constified MatrixType */ + typedef const MatrixType ConstMatrixType; + /** type of the matrix used to represent the linear part of the transformation */ + typedef Matrix LinearMatrixType; + /** type of read/write reference to the linear part of the transformation */ + typedef Block LinearPart; + /** type of read reference to the linear part of the transformation */ + typedef const Block ConstLinearPart; + /** type of read/write reference to the affine part of the transformation */ + typedef typename internal::conditional >::type AffinePart; + /** type of read reference to the affine part of the transformation */ + typedef typename internal::conditional >::type ConstAffinePart; + /** type of a vector */ + typedef Matrix VectorType; + /** type of a read/write reference to the translation part of the rotation */ + typedef Block::Flags & RowMajorBit)> TranslationPart; + /** type of a read reference to the translation part of the rotation */ + typedef const Block::Flags & RowMajorBit)> ConstTranslationPart; + /** corresponding translation type */ + typedef Translation TranslationType; + + // this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0 + enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) }; + /** The return type of the product between a diagonal matrix and a transform */ + typedef Transform TransformTimeDiagonalReturnType; + +protected: + + MatrixType m_matrix; + +public: + + /** Default constructor without initialization of the meaningful coefficients. + * If Mode==Affine, then the last row is set to [0 ... 0 1] */ + EIGEN_DEVICE_FUNC inline Transform() + { + check_template_params(); + internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix); + } + + EIGEN_DEVICE_FUNC inline Transform(const Transform& other) + { + check_template_params(); + m_matrix = other.m_matrix; + } + + EIGEN_DEVICE_FUNC inline explicit Transform(const TranslationType& t) + { + check_template_params(); + *this = t; + } + EIGEN_DEVICE_FUNC inline explicit Transform(const UniformScaling& s) + { + check_template_params(); + *this = s; + } + template + EIGEN_DEVICE_FUNC inline explicit Transform(const RotationBase& r) + { + check_template_params(); + *this = r; + } + + EIGEN_DEVICE_FUNC inline Transform& operator=(const Transform& other) + { m_matrix = other.m_matrix; return *this; } + + typedef internal::transform_take_affine_part take_affine_part; + + /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */ + template + EIGEN_DEVICE_FUNC inline explicit Transform(const EigenBase& other) + { + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY); + + check_template_params(); + internal::transform_construct_from_matrix::run(this, other.derived()); + } + + /** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */ + template + EIGEN_DEVICE_FUNC inline Transform& operator=(const EigenBase& other) + { + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY); + + internal::transform_construct_from_matrix::run(this, other.derived()); + return *this; + } + + template + EIGEN_DEVICE_FUNC inline Transform(const Transform& other) + { + check_template_params(); + // only the options change, we can directly copy the matrices + m_matrix = other.matrix(); + } + + template + EIGEN_DEVICE_FUNC inline Transform(const Transform& other) + { + check_template_params(); + // prevent conversions as: + // Affine | AffineCompact | Isometry = Projective + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)), + YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION) + + // prevent conversions as: + // Isometry = Affine | AffineCompact + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)), + YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION) + + enum { ModeIsAffineCompact = Mode == int(AffineCompact), + OtherModeIsAffineCompact = OtherMode == int(AffineCompact) + }; + + if(ModeIsAffineCompact == OtherModeIsAffineCompact) + { + // We need the block expression because the code is compiled for all + // combinations of transformations and will trigger a compile time error + // if one tries to assign the matrices directly + m_matrix.template block(0,0) = other.matrix().template block(0,0); + makeAffine(); + } + else if(OtherModeIsAffineCompact) + { + typedef typename Transform::MatrixType OtherMatrixType; + internal::transform_construct_from_matrix::run(this, other.matrix()); + } + else + { + // here we know that Mode == AffineCompact and OtherMode != AffineCompact. + // if OtherMode were Projective, the static assert above would already have caught it. + // So the only possibility is that OtherMode == Affine + linear() = other.linear(); + translation() = other.translation(); + } + } + + template + EIGEN_DEVICE_FUNC Transform(const ReturnByValue& other) + { + check_template_params(); + other.evalTo(*this); + } + + template + EIGEN_DEVICE_FUNC Transform& operator=(const ReturnByValue& other) + { + other.evalTo(*this); + return *this; + } + + #ifdef EIGEN_QT_SUPPORT + inline Transform(const QMatrix& other); + inline Transform& operator=(const QMatrix& other); + inline QMatrix toQMatrix(void) const; + inline Transform(const QTransform& other); + inline Transform& operator=(const QTransform& other); + inline QTransform toQTransform(void) const; + #endif + + EIGEN_DEVICE_FUNC Index rows() const { return int(Mode)==int(Projective) ? m_matrix.cols() : (m_matrix.cols()-1); } + EIGEN_DEVICE_FUNC Index cols() const { return m_matrix.cols(); } + + /** shortcut for m_matrix(row,col); + * \sa MatrixBase::operator(Index,Index) const */ + EIGEN_DEVICE_FUNC inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); } + /** shortcut for m_matrix(row,col); + * \sa MatrixBase::operator(Index,Index) */ + EIGEN_DEVICE_FUNC inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); } + + /** \returns a read-only expression of the transformation matrix */ + EIGEN_DEVICE_FUNC inline const MatrixType& matrix() const { return m_matrix; } + /** \returns a writable expression of the transformation matrix */ + EIGEN_DEVICE_FUNC inline MatrixType& matrix() { return m_matrix; } + + /** \returns a read-only expression of the linear part of the transformation */ + EIGEN_DEVICE_FUNC inline ConstLinearPart linear() const { return ConstLinearPart(m_matrix,0,0); } + /** \returns a writable expression of the linear part of the transformation */ + EIGEN_DEVICE_FUNC inline LinearPart linear() { return LinearPart(m_matrix,0,0); } + + /** \returns a read-only expression of the Dim x HDim affine part of the transformation */ + EIGEN_DEVICE_FUNC inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); } + /** \returns a writable expression of the Dim x HDim affine part of the transformation */ + EIGEN_DEVICE_FUNC inline AffinePart affine() { return take_affine_part::run(m_matrix); } + + /** \returns a read-only expression of the translation vector of the transformation */ + EIGEN_DEVICE_FUNC inline ConstTranslationPart translation() const { return ConstTranslationPart(m_matrix,0,Dim); } + /** \returns a writable expression of the translation vector of the transformation */ + EIGEN_DEVICE_FUNC inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); } + + /** \returns an expression of the product between the transform \c *this and a matrix expression \a other. + * + * The right-hand-side \a other can be either: + * \li an homogeneous vector of size Dim+1, + * \li a set of homogeneous vectors of size Dim+1 x N, + * \li a transformation matrix of size Dim+1 x Dim+1. + * + * Moreover, if \c *this represents an affine transformation (i.e., Mode!=Projective), then \a other can also be: + * \li a point of size Dim (computes: \code this->linear() * other + this->translation()\endcode), + * \li a set of N points as a Dim x N matrix (computes: \code (this->linear() * other).colwise() + this->translation()\endcode), + * + * In all cases, the return type is a matrix or vector of same sizes as the right-hand-side \a other. + * + * If you want to interpret \a other as a linear or affine transformation, then first convert it to a Transform<> type, + * or do your own cooking. + * + * Finally, if you want to apply Affine transformations to vectors, then explicitly apply the linear part only: + * \code + * Affine3f A; + * Vector3f v1, v2; + * v2 = A.linear() * v1; + * \endcode + * + */ + // note: this function is defined here because some compilers cannot find the respective declaration + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl::ResultType + operator * (const EigenBase &other) const + { return internal::transform_right_product_impl::run(*this,other.derived()); } + + /** \returns the product expression of a transformation matrix \a a times a transform \a b + * + * The left hand side \a other can be either: + * \li a linear transformation matrix of size Dim x Dim, + * \li an affine transformation matrix of size Dim x Dim+1, + * \li a general transformation matrix of size Dim+1 x Dim+1. + */ + template friend + EIGEN_DEVICE_FUNC inline const typename internal::transform_left_product_impl::ResultType + operator * (const EigenBase &a, const Transform &b) + { return internal::transform_left_product_impl::run(a.derived(),b); } + + /** \returns The product expression of a transform \a a times a diagonal matrix \a b + * + * The rhs diagonal matrix is interpreted as an affine scaling transformation. The + * product results in a Transform of the same type (mode) as the lhs only if the lhs + * mode is no isometry. In that case, the returned transform is an affinity. + */ + template + EIGEN_DEVICE_FUNC inline const TransformTimeDiagonalReturnType + operator * (const DiagonalBase &b) const + { + TransformTimeDiagonalReturnType res(*this); + res.linearExt() *= b; + return res; + } + + /** \returns The product expression of a diagonal matrix \a a times a transform \a b + * + * The lhs diagonal matrix is interpreted as an affine scaling transformation. The + * product results in a Transform of the same type (mode) as the lhs only if the lhs + * mode is no isometry. In that case, the returned transform is an affinity. + */ + template + EIGEN_DEVICE_FUNC friend inline TransformTimeDiagonalReturnType + operator * (const DiagonalBase &a, const Transform &b) + { + TransformTimeDiagonalReturnType res; + res.linear().noalias() = a*b.linear(); + res.translation().noalias() = a*b.translation(); + if (Mode!=int(AffineCompact)) + res.matrix().row(Dim) = b.matrix().row(Dim); + return res; + } + + template + EIGEN_DEVICE_FUNC inline Transform& operator*=(const EigenBase& other) { return *this = *this * other; } + + /** Concatenates two transformations */ + EIGEN_DEVICE_FUNC inline const Transform operator * (const Transform& other) const + { + return internal::transform_transform_product_impl::run(*this,other); + } + + #if EIGEN_COMP_ICC +private: + // this intermediate structure permits to workaround a bug in ICC 11: + // error: template instantiation resulted in unexpected function type of "Eigen::Transform + // (const Eigen::Transform &) const" + // (the meaning of a name may have changed since the template declaration -- the type of the template is: + // "Eigen::internal::transform_transform_product_impl, + // Eigen::Transform, >::ResultType (const Eigen::Transform &) const") + // + template struct icc_11_workaround + { + typedef internal::transform_transform_product_impl > ProductType; + typedef typename ProductType::ResultType ResultType; + }; + +public: + /** Concatenates two different transformations */ + template + inline typename icc_11_workaround::ResultType + operator * (const Transform& other) const + { + typedef typename icc_11_workaround::ProductType ProductType; + return ProductType::run(*this,other); + } + #else + /** Concatenates two different transformations */ + template + EIGEN_DEVICE_FUNC inline typename internal::transform_transform_product_impl >::ResultType + operator * (const Transform& other) const + { + return internal::transform_transform_product_impl >::run(*this,other); + } + #endif + + /** \sa MatrixBase::setIdentity() */ + EIGEN_DEVICE_FUNC void setIdentity() { m_matrix.setIdentity(); } + + /** + * \brief Returns an identity transformation. + * \todo In the future this function should be returning a Transform expression. + */ + EIGEN_DEVICE_FUNC static const Transform Identity() + { + return Transform(MatrixType::Identity()); + } + + template + EIGEN_DEVICE_FUNC + inline Transform& scale(const MatrixBase &other); + + template + EIGEN_DEVICE_FUNC + inline Transform& prescale(const MatrixBase &other); + + EIGEN_DEVICE_FUNC inline Transform& scale(const Scalar& s); + EIGEN_DEVICE_FUNC inline Transform& prescale(const Scalar& s); + + template + EIGEN_DEVICE_FUNC + inline Transform& translate(const MatrixBase &other); + + template + EIGEN_DEVICE_FUNC + inline Transform& pretranslate(const MatrixBase &other); + + template + EIGEN_DEVICE_FUNC + inline Transform& rotate(const RotationType& rotation); + + template + EIGEN_DEVICE_FUNC + inline Transform& prerotate(const RotationType& rotation); + + EIGEN_DEVICE_FUNC Transform& shear(const Scalar& sx, const Scalar& sy); + EIGEN_DEVICE_FUNC Transform& preshear(const Scalar& sx, const Scalar& sy); + + EIGEN_DEVICE_FUNC inline Transform& operator=(const TranslationType& t); + + EIGEN_DEVICE_FUNC + inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); } + + EIGEN_DEVICE_FUNC inline Transform operator*(const TranslationType& t) const; + + EIGEN_DEVICE_FUNC + inline Transform& operator=(const UniformScaling& t); + + EIGEN_DEVICE_FUNC + inline Transform& operator*=(const UniformScaling& s) { return scale(s.factor()); } + + EIGEN_DEVICE_FUNC + inline TransformTimeDiagonalReturnType operator*(const UniformScaling& s) const + { + TransformTimeDiagonalReturnType res = *this; + res.scale(s.factor()); + return res; + } + + EIGEN_DEVICE_FUNC + inline Transform& operator*=(const DiagonalMatrix& s) { linearExt() *= s; return *this; } + + template + EIGEN_DEVICE_FUNC inline Transform& operator=(const RotationBase& r); + template + EIGEN_DEVICE_FUNC inline Transform& operator*=(const RotationBase& r) { return rotate(r.toRotationMatrix()); } + template + EIGEN_DEVICE_FUNC inline Transform operator*(const RotationBase& r) const; + + EIGEN_DEVICE_FUNC const LinearMatrixType rotation() const; + template + EIGEN_DEVICE_FUNC + void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const; + template + EIGEN_DEVICE_FUNC + void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const; + + template + EIGEN_DEVICE_FUNC + Transform& fromPositionOrientationScale(const MatrixBase &position, + const OrientationType& orientation, const MatrixBase &scale); + + EIGEN_DEVICE_FUNC + inline Transform inverse(TransformTraits traits = (TransformTraits)Mode) const; + + /** \returns a const pointer to the column major internal matrix */ + EIGEN_DEVICE_FUNC const Scalar* data() const { return m_matrix.data(); } + /** \returns a non-const pointer to the column major internal matrix */ + EIGEN_DEVICE_FUNC Scalar* data() { return m_matrix.data(); } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + EIGEN_DEVICE_FUNC inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + EIGEN_DEVICE_FUNC inline explicit Transform(const Transform& other) + { + check_template_params(); + m_matrix = other.matrix().template cast(); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + EIGEN_DEVICE_FUNC bool isApprox(const Transform& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const + { return m_matrix.isApprox(other.m_matrix, prec); } + + /** Sets the last row to [0 ... 0 1] + */ + EIGEN_DEVICE_FUNC void makeAffine() + { + internal::transform_make_affine::run(m_matrix); + } + + /** \internal + * \returns the Dim x Dim linear part if the transformation is affine, + * and the HDim x Dim part for projective transformations. + */ + EIGEN_DEVICE_FUNC inline Block linearExt() + { return m_matrix.template block(0,0); } + /** \internal + * \returns the Dim x Dim linear part if the transformation is affine, + * and the HDim x Dim part for projective transformations. + */ + EIGEN_DEVICE_FUNC inline const Block linearExt() const + { return m_matrix.template block(0,0); } + + /** \internal + * \returns the translation part if the transformation is affine, + * and the last column for projective transformations. + */ + EIGEN_DEVICE_FUNC inline Block translationExt() + { return m_matrix.template block(0,Dim); } + /** \internal + * \returns the translation part if the transformation is affine, + * and the last column for projective transformations. + */ + EIGEN_DEVICE_FUNC inline const Block translationExt() const + { return m_matrix.template block(0,Dim); } + + + #ifdef EIGEN_TRANSFORM_PLUGIN + #include EIGEN_TRANSFORM_PLUGIN + #endif + +protected: + #ifndef EIGEN_PARSED_BY_DOXYGEN + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void check_template_params() + { + EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) + } + #endif + +}; + +/** \ingroup Geometry_Module */ +typedef Transform Isometry2f; +/** \ingroup Geometry_Module */ +typedef Transform Isometry3f; +/** \ingroup Geometry_Module */ +typedef Transform Isometry2d; +/** \ingroup Geometry_Module */ +typedef Transform Isometry3d; + +/** \ingroup Geometry_Module */ +typedef Transform Affine2f; +/** \ingroup Geometry_Module */ +typedef Transform Affine3f; +/** \ingroup Geometry_Module */ +typedef Transform Affine2d; +/** \ingroup Geometry_Module */ +typedef Transform Affine3d; + +/** \ingroup Geometry_Module */ +typedef Transform AffineCompact2f; +/** \ingroup Geometry_Module */ +typedef Transform AffineCompact3f; +/** \ingroup Geometry_Module */ +typedef Transform AffineCompact2d; +/** \ingroup Geometry_Module */ +typedef Transform AffineCompact3d; + +/** \ingroup Geometry_Module */ +typedef Transform Projective2f; +/** \ingroup Geometry_Module */ +typedef Transform Projective3f; +/** \ingroup Geometry_Module */ +typedef Transform Projective2d; +/** \ingroup Geometry_Module */ +typedef Transform Projective3d; + +/************************** +*** Optional QT support *** +**************************/ + +#ifdef EIGEN_QT_SUPPORT +/** Initializes \c *this from a QMatrix assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform::Transform(const QMatrix& other) +{ + check_template_params(); + *this = other; +} + +/** Set \c *this from a QMatrix assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform& Transform::operator=(const QMatrix& other) +{ + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + if (Mode == int(AffineCompact)) + m_matrix << other.m11(), other.m21(), other.dx(), + other.m12(), other.m22(), other.dy(); + else + m_matrix << other.m11(), other.m21(), other.dx(), + other.m12(), other.m22(), other.dy(), + 0, 0, 1; + return *this; +} + +/** \returns a QMatrix from \c *this assuming the dimension is 2. + * + * \warning this conversion might loss data if \c *this is not affine + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +QMatrix Transform::toQMatrix(void) const +{ + check_template_params(); + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0), + m_matrix.coeff(0,1), m_matrix.coeff(1,1), + m_matrix.coeff(0,2), m_matrix.coeff(1,2)); +} + +/** Initializes \c *this from a QTransform assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform::Transform(const QTransform& other) +{ + check_template_params(); + *this = other; +} + +/** Set \c *this from a QTransform assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform& Transform::operator=(const QTransform& other) +{ + check_template_params(); + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + if (Mode == int(AffineCompact)) + m_matrix << other.m11(), other.m21(), other.dx(), + other.m12(), other.m22(), other.dy(); + else + m_matrix << other.m11(), other.m21(), other.dx(), + other.m12(), other.m22(), other.dy(), + other.m13(), other.m23(), other.m33(); + return *this; +} + +/** \returns a QTransform from \c *this assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +QTransform Transform::toQTransform(void) const +{ + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + if (Mode == int(AffineCompact)) + return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), + m_matrix.coeff(0,1), m_matrix.coeff(1,1), + m_matrix.coeff(0,2), m_matrix.coeff(1,2)); + else + return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0), + m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1), + m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2)); +} +#endif + +/********************* +*** Procedural API *** +*********************/ + +/** Applies on the right the non uniform scale transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \sa prescale() + */ +template +template +EIGEN_DEVICE_FUNC Transform& +Transform::scale(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) + linearExt().noalias() = (linearExt() * other.asDiagonal()); + return *this; +} + +/** Applies on the right a uniform scale of a factor \a c to \c *this + * and returns a reference to \c *this. + * \sa prescale(Scalar) + */ +template +EIGEN_DEVICE_FUNC inline Transform& Transform::scale(const Scalar& s) +{ + EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) + linearExt() *= s; + return *this; +} + +/** Applies on the left the non uniform scale transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \sa scale() + */ +template +template +EIGEN_DEVICE_FUNC Transform& +Transform::prescale(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) + affine().noalias() = (other.asDiagonal() * affine()); + return *this; +} + +/** Applies on the left a uniform scale of a factor \a c to \c *this + * and returns a reference to \c *this. + * \sa scale(Scalar) + */ +template +EIGEN_DEVICE_FUNC inline Transform& Transform::prescale(const Scalar& s) +{ + EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) + m_matrix.template topRows() *= s; + return *this; +} + +/** Applies on the right the translation matrix represented by the vector \a other + * to \c *this and returns a reference to \c *this. + * \sa pretranslate() + */ +template +template +EIGEN_DEVICE_FUNC Transform& +Transform::translate(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + translationExt() += linearExt() * other; + return *this; +} + +/** Applies on the left the translation matrix represented by the vector \a other + * to \c *this and returns a reference to \c *this. + * \sa translate() + */ +template +template +EIGEN_DEVICE_FUNC Transform& +Transform::pretranslate(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + if(int(Mode)==int(Projective)) + affine() += other * m_matrix.row(Dim); + else + translation() += other; + return *this; +} + +/** Applies on the right the rotation represented by the rotation \a rotation + * to \c *this and returns a reference to \c *this. + * + * The template parameter \a RotationType is the type of the rotation which + * must be known by internal::toRotationMatrix<>. + * + * Natively supported types includes: + * - any scalar (2D), + * - a Dim x Dim matrix expression, + * - a Quaternion (3D), + * - a AngleAxis (3D) + * + * This mechanism is easily extendable to support user types such as Euler angles, + * or a pair of Quaternion for 4D rotations. + * + * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType) + */ +template +template +EIGEN_DEVICE_FUNC Transform& +Transform::rotate(const RotationType& rotation) +{ + linearExt() *= internal::toRotationMatrix(rotation); + return *this; +} + +/** Applies on the left the rotation represented by the rotation \a rotation + * to \c *this and returns a reference to \c *this. + * + * See rotate() for further details. + * + * \sa rotate() + */ +template +template +EIGEN_DEVICE_FUNC Transform& +Transform::prerotate(const RotationType& rotation) +{ + m_matrix.template block(0,0) = internal::toRotationMatrix(rotation) + * m_matrix.template block(0,0); + return *this; +} + +/** Applies on the right the shear transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \warning 2D only. + * \sa preshear() + */ +template +EIGEN_DEVICE_FUNC Transform& +Transform::shear(const Scalar& sx, const Scalar& sy) +{ + EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) + VectorType tmp = linear().col(0)*sy + linear().col(1); + linear() << linear().col(0) + linear().col(1)*sx, tmp; + return *this; +} + +/** Applies on the left the shear transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \warning 2D only. + * \sa shear() + */ +template +EIGEN_DEVICE_FUNC Transform& +Transform::preshear(const Scalar& sx, const Scalar& sy) +{ + EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) + m_matrix.template block(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block(0,0); + return *this; +} + +/****************************************************** +*** Scaling, Translation and Rotation compatibility *** +******************************************************/ + +template +EIGEN_DEVICE_FUNC inline Transform& Transform::operator=(const TranslationType& t) +{ + linear().setIdentity(); + translation() = t.vector(); + makeAffine(); + return *this; +} + +template +EIGEN_DEVICE_FUNC inline Transform Transform::operator*(const TranslationType& t) const +{ + Transform res = *this; + res.translate(t.vector()); + return res; +} + +template +EIGEN_DEVICE_FUNC inline Transform& Transform::operator=(const UniformScaling& s) +{ + m_matrix.setZero(); + linear().diagonal().fill(s.factor()); + makeAffine(); + return *this; +} + +template +template +EIGEN_DEVICE_FUNC inline Transform& Transform::operator=(const RotationBase& r) +{ + linear() = internal::toRotationMatrix(r); + translation().setZero(); + makeAffine(); + return *this; +} + +template +template +EIGEN_DEVICE_FUNC inline Transform Transform::operator*(const RotationBase& r) const +{ + Transform res = *this; + res.rotate(r.derived()); + return res; +} + +/************************ +*** Special functions *** +************************/ + +/** \returns the rotation part of the transformation + * + * + * \svd_module + * + * \sa computeRotationScaling(), computeScalingRotation(), class SVD + */ +template +EIGEN_DEVICE_FUNC const typename Transform::LinearMatrixType +Transform::rotation() const +{ + LinearMatrixType result; + computeRotationScaling(&result, (LinearMatrixType*)0); + return result; +} + + +/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being + * not necessarily positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * + * + * \svd_module + * + * \sa computeScalingRotation(), rotation(), class SVD + */ +template +template +EIGEN_DEVICE_FUNC void Transform::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const +{ + JacobiSVD svd(linear(), ComputeFullU | ComputeFullV); + + Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1 + VectorType sv(svd.singularValues()); + sv.coeffRef(0) *= x; + if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint()); + if(rotation) + { + LinearMatrixType m(svd.matrixU()); + m.col(0) /= x; + rotation->lazyAssign(m * svd.matrixV().adjoint()); + } +} + +/** decomposes the linear part of the transformation as a product scaling x rotation, the scaling being + * not necessarily positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * + * + * \svd_module + * + * \sa computeRotationScaling(), rotation(), class SVD + */ +template +template +EIGEN_DEVICE_FUNC void Transform::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const +{ + JacobiSVD svd(linear(), ComputeFullU | ComputeFullV); + + Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1 + VectorType sv(svd.singularValues()); + sv.coeffRef(0) *= x; + if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint()); + if(rotation) + { + LinearMatrixType m(svd.matrixU()); + m.col(0) /= x; + rotation->lazyAssign(m * svd.matrixV().adjoint()); + } +} + +/** Convenient method to set \c *this from a position, orientation and scale + * of a 3D object. + */ +template +template +EIGEN_DEVICE_FUNC Transform& +Transform::fromPositionOrientationScale(const MatrixBase &position, + const OrientationType& orientation, const MatrixBase &scale) +{ + linear() = internal::toRotationMatrix(orientation); + linear() *= scale.asDiagonal(); + translation() = position; + makeAffine(); + return *this; +} + +namespace internal { + +template +struct transform_make_affine +{ + template + EIGEN_DEVICE_FUNC static void run(MatrixType &mat) + { + static const int Dim = MatrixType::ColsAtCompileTime-1; + mat.template block<1,Dim>(Dim,0).setZero(); + mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1); + } +}; + +template<> +struct transform_make_affine +{ + template EIGEN_DEVICE_FUNC static void run(MatrixType &) { } +}; + +// selector needed to avoid taking the inverse of a 3x4 matrix +template +struct projective_transform_inverse +{ + EIGEN_DEVICE_FUNC static inline void run(const TransformType&, TransformType&) + {} +}; + +template +struct projective_transform_inverse +{ + EIGEN_DEVICE_FUNC static inline void run(const TransformType& m, TransformType& res) + { + res.matrix() = m.matrix().inverse(); + } +}; + +} // end namespace internal + + +/** + * + * \returns the inverse transformation according to some given knowledge + * on \c *this. + * + * \param hint allows to optimize the inversion process when the transformation + * is known to be not a general transformation (optional). The possible values are: + * - #Projective if the transformation is not necessarily affine, i.e., if the + * last row is not guaranteed to be [0 ... 0 1] + * - #Affine if the last row can be assumed to be [0 ... 0 1] + * - #Isometry if the transformation is only a concatenations of translations + * and rotations. + * The default is the template class parameter \c Mode. + * + * \warning unless \a traits is always set to NoShear or NoScaling, this function + * requires the generic inverse method of MatrixBase defined in the LU module. If + * you forget to include this module, then you will get hard to debug linking errors. + * + * \sa MatrixBase::inverse() + */ +template +EIGEN_DEVICE_FUNC Transform +Transform::inverse(TransformTraits hint) const +{ + Transform res; + if (hint == Projective) + { + internal::projective_transform_inverse::run(*this, res); + } + else + { + if (hint == Isometry) + { + res.matrix().template topLeftCorner() = linear().transpose(); + } + else if(hint&Affine) + { + res.matrix().template topLeftCorner() = linear().inverse(); + } + else + { + eigen_assert(false && "Invalid transform traits in Transform::Inverse"); + } + // translation and remaining parts + res.matrix().template topRightCorner() + = - res.matrix().template topLeftCorner() * translation(); + res.makeAffine(); // we do need this, because in the beginning res is uninitialized + } + return res; +} + +namespace internal { + +/***************************************************** +*** Specializations of take affine part *** +*****************************************************/ + +template struct transform_take_affine_part { + typedef typename TransformType::MatrixType MatrixType; + typedef typename TransformType::AffinePart AffinePart; + typedef typename TransformType::ConstAffinePart ConstAffinePart; + static inline AffinePart run(MatrixType& m) + { return m.template block(0,0); } + static inline ConstAffinePart run(const MatrixType& m) + { return m.template block(0,0); } +}; + +template +struct transform_take_affine_part > { + typedef typename Transform::MatrixType MatrixType; + static inline MatrixType& run(MatrixType& m) { return m; } + static inline const MatrixType& run(const MatrixType& m) { return m; } +}; + +/***************************************************** +*** Specializations of construct from matrix *** +*****************************************************/ + +template +struct transform_construct_from_matrix +{ + static inline void run(Transform *transform, const Other& other) + { + transform->linear() = other; + transform->translation().setZero(); + transform->makeAffine(); + } +}; + +template +struct transform_construct_from_matrix +{ + static inline void run(Transform *transform, const Other& other) + { + transform->affine() = other; + transform->makeAffine(); + } +}; + +template +struct transform_construct_from_matrix +{ + static inline void run(Transform *transform, const Other& other) + { transform->matrix() = other; } +}; + +template +struct transform_construct_from_matrix +{ + static inline void run(Transform *transform, const Other& other) + { transform->matrix() = other.template block(0,0); } +}; + +/********************************************************** +*** Specializations of operator* with rhs EigenBase *** +**********************************************************/ + +template +struct transform_product_result +{ + enum + { + Mode = + (LhsMode == (int)Projective || RhsMode == (int)Projective ) ? Projective : + (LhsMode == (int)Affine || RhsMode == (int)Affine ) ? Affine : + (LhsMode == (int)AffineCompact || RhsMode == (int)AffineCompact ) ? AffineCompact : + (LhsMode == (int)Isometry || RhsMode == (int)Isometry ) ? Isometry : Projective + }; +}; + +template< typename TransformType, typename MatrixType, int RhsCols> +struct transform_right_product_impl< TransformType, MatrixType, 0, RhsCols> +{ + typedef typename MatrixType::PlainObject ResultType; + + static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) + { + return T.matrix() * other; + } +}; + +template< typename TransformType, typename MatrixType, int RhsCols> +struct transform_right_product_impl< TransformType, MatrixType, 1, RhsCols> +{ + enum { + Dim = TransformType::Dim, + HDim = TransformType::HDim, + OtherRows = MatrixType::RowsAtCompileTime, + OtherCols = MatrixType::ColsAtCompileTime + }; + + typedef typename MatrixType::PlainObject ResultType; + + static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) + { + EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); + + typedef Block TopLeftLhs; + + ResultType res(other.rows(),other.cols()); + TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other; + res.row(OtherRows-1) = other.row(OtherRows-1); + + return res; + } +}; + +template< typename TransformType, typename MatrixType, int RhsCols> +struct transform_right_product_impl< TransformType, MatrixType, 2, RhsCols> +{ + enum { + Dim = TransformType::Dim, + HDim = TransformType::HDim, + OtherRows = MatrixType::RowsAtCompileTime, + OtherCols = MatrixType::ColsAtCompileTime + }; + + typedef typename MatrixType::PlainObject ResultType; + + static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) + { + EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); + + typedef Block TopLeftLhs; + ResultType res(Replicate(T.translation(),1,other.cols())); + TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() += T.linear() * other; + + return res; + } +}; + +template< typename TransformType, typename MatrixType > +struct transform_right_product_impl< TransformType, MatrixType, 2, 1> // rhs is a vector of size Dim +{ + typedef typename TransformType::MatrixType TransformMatrix; + enum { + Dim = TransformType::Dim, + HDim = TransformType::HDim, + OtherRows = MatrixType::RowsAtCompileTime, + WorkingRows = EIGEN_PLAIN_ENUM_MIN(TransformMatrix::RowsAtCompileTime,HDim) + }; + + typedef typename MatrixType::PlainObject ResultType; + + static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) + { + EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); + + Matrix rhs; + rhs.template head() = other; rhs[Dim] = typename ResultType::Scalar(1); + Matrix res(T.matrix() * rhs); + return res.template head(); + } +}; + +/********************************************************** +*** Specializations of operator* with lhs EigenBase *** +**********************************************************/ + +// generic HDim x HDim matrix * T => Projective +template +struct transform_left_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef Transform ResultType; + static ResultType run(const Other& other,const TransformType& tr) + { return ResultType(other * tr.matrix()); } +}; + +// generic HDim x HDim matrix * AffineCompact => Projective +template +struct transform_left_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef Transform ResultType; + static ResultType run(const Other& other,const TransformType& tr) + { + ResultType res; + res.matrix().noalias() = other.template block(0,0) * tr.matrix(); + res.matrix().col(Dim) += other.col(Dim); + return res; + } +}; + +// affine matrix * T +template +struct transform_left_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef TransformType ResultType; + static ResultType run(const Other& other,const TransformType& tr) + { + ResultType res; + res.affine().noalias() = other * tr.matrix(); + res.matrix().row(Dim) = tr.matrix().row(Dim); + return res; + } +}; + +// affine matrix * AffineCompact +template +struct transform_left_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef TransformType ResultType; + static ResultType run(const Other& other,const TransformType& tr) + { + ResultType res; + res.matrix().noalias() = other.template block(0,0) * tr.matrix(); + res.translation() += other.col(Dim); + return res; + } +}; + +// linear matrix * T +template +struct transform_left_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef TransformType ResultType; + static ResultType run(const Other& other, const TransformType& tr) + { + TransformType res; + if(Mode!=int(AffineCompact)) + res.matrix().row(Dim) = tr.matrix().row(Dim); + res.matrix().template topRows().noalias() + = other * tr.matrix().template topRows(); + return res; + } +}; + +/********************************************************** +*** Specializations of operator* with another Transform *** +**********************************************************/ + +template +struct transform_transform_product_impl,Transform,false > +{ + enum { ResultMode = transform_product_result::Mode }; + typedef Transform Lhs; + typedef Transform Rhs; + typedef Transform ResultType; + static ResultType run(const Lhs& lhs, const Rhs& rhs) + { + ResultType res; + res.linear() = lhs.linear() * rhs.linear(); + res.translation() = lhs.linear() * rhs.translation() + lhs.translation(); + res.makeAffine(); + return res; + } +}; + +template +struct transform_transform_product_impl,Transform,true > +{ + typedef Transform Lhs; + typedef Transform Rhs; + typedef Transform ResultType; + static ResultType run(const Lhs& lhs, const Rhs& rhs) + { + return ResultType( lhs.matrix() * rhs.matrix() ); + } +}; + +template +struct transform_transform_product_impl,Transform,true > +{ + typedef Transform Lhs; + typedef Transform Rhs; + typedef Transform ResultType; + static ResultType run(const Lhs& lhs, const Rhs& rhs) + { + ResultType res; + res.matrix().template topRows() = lhs.matrix() * rhs.matrix(); + res.matrix().row(Dim) = rhs.matrix().row(Dim); + return res; + } +}; + +template +struct transform_transform_product_impl,Transform,true > +{ + typedef Transform Lhs; + typedef Transform Rhs; + typedef Transform ResultType; + static ResultType run(const Lhs& lhs, const Rhs& rhs) + { + ResultType res(lhs.matrix().template leftCols() * rhs.matrix()); + res.matrix().col(Dim) += lhs.matrix().col(Dim); + return res; + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRANSFORM_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Translation.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Translation.h new file mode 100644 index 000000000..0e99ce68e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Translation.h @@ -0,0 +1,202 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRANSLATION_H +#define EIGEN_TRANSLATION_H + +namespace Eigen { + +/** \geometry_module \ingroup Geometry_Module + * + * \class Translation + * + * \brief Represents a translation transformation + * + * \tparam _Scalar the scalar type, i.e., the type of the coefficients. + * \tparam _Dim the dimension of the space, can be a compile time value or Dynamic + * + * \note This class is not aimed to be used to store a translation transformation, + * but rather to make easier the constructions and updates of Transform objects. + * + * \sa class Scaling, class Transform + */ +template +class Translation +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim) + /** dimension of the space */ + enum { Dim = _Dim }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + /** corresponding vector type */ + typedef Matrix VectorType; + /** corresponding linear transformation matrix type */ + typedef Matrix LinearMatrixType; + /** corresponding affine transformation type */ + typedef Transform AffineTransformType; + /** corresponding isometric transformation type */ + typedef Transform IsometryTransformType; + +protected: + + VectorType m_coeffs; + +public: + + /** Default constructor without initialization. */ + EIGEN_DEVICE_FUNC Translation() {} + /** */ + EIGEN_DEVICE_FUNC inline Translation(const Scalar& sx, const Scalar& sy) + { + eigen_assert(Dim==2); + m_coeffs.x() = sx; + m_coeffs.y() = sy; + } + /** */ + EIGEN_DEVICE_FUNC inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz) + { + eigen_assert(Dim==3); + m_coeffs.x() = sx; + m_coeffs.y() = sy; + m_coeffs.z() = sz; + } + /** Constructs and initialize the translation transformation from a vector of translation coefficients */ + EIGEN_DEVICE_FUNC explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {} + + /** \brief Retruns the x-translation by value. **/ + EIGEN_DEVICE_FUNC inline Scalar x() const { return m_coeffs.x(); } + /** \brief Retruns the y-translation by value. **/ + EIGEN_DEVICE_FUNC inline Scalar y() const { return m_coeffs.y(); } + /** \brief Retruns the z-translation by value. **/ + EIGEN_DEVICE_FUNC inline Scalar z() const { return m_coeffs.z(); } + + /** \brief Retruns the x-translation as a reference. **/ + EIGEN_DEVICE_FUNC inline Scalar& x() { return m_coeffs.x(); } + /** \brief Retruns the y-translation as a reference. **/ + EIGEN_DEVICE_FUNC inline Scalar& y() { return m_coeffs.y(); } + /** \brief Retruns the z-translation as a reference. **/ + EIGEN_DEVICE_FUNC inline Scalar& z() { return m_coeffs.z(); } + + EIGEN_DEVICE_FUNC const VectorType& vector() const { return m_coeffs; } + EIGEN_DEVICE_FUNC VectorType& vector() { return m_coeffs; } + + EIGEN_DEVICE_FUNC const VectorType& translation() const { return m_coeffs; } + EIGEN_DEVICE_FUNC VectorType& translation() { return m_coeffs; } + + /** Concatenates two translation */ + EIGEN_DEVICE_FUNC inline Translation operator* (const Translation& other) const + { return Translation(m_coeffs + other.m_coeffs); } + + /** Concatenates a translation and a uniform scaling */ + EIGEN_DEVICE_FUNC inline AffineTransformType operator* (const UniformScaling& other) const; + + /** Concatenates a translation and a linear transformation */ + template + EIGEN_DEVICE_FUNC inline AffineTransformType operator* (const EigenBase& linear) const; + + /** Concatenates a translation and a rotation */ + template + EIGEN_DEVICE_FUNC inline IsometryTransformType operator*(const RotationBase& r) const + { return *this * IsometryTransformType(r); } + + /** \returns the concatenation of a linear transformation \a l with the translation \a t */ + // its a nightmare to define a templated friend function outside its declaration + template friend + EIGEN_DEVICE_FUNC inline AffineTransformType operator*(const EigenBase& linear, const Translation& t) + { + AffineTransformType res; + res.matrix().setZero(); + res.linear() = linear.derived(); + res.translation() = linear.derived() * t.m_coeffs; + res.matrix().row(Dim).setZero(); + res(Dim,Dim) = Scalar(1); + return res; + } + + /** Concatenates a translation and a transformation */ + template + EIGEN_DEVICE_FUNC inline Transform operator* (const Transform& t) const + { + Transform res = t; + res.pretranslate(m_coeffs); + return res; + } + + /** Applies translation to vector */ + template + inline typename internal::enable_if::type + operator* (const MatrixBase& vec) const + { return m_coeffs + vec.derived(); } + + /** \returns the inverse translation (opposite) */ + Translation inverse() const { return Translation(-m_coeffs); } + + static const Translation Identity() { return Translation(VectorType::Zero()); } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + EIGEN_DEVICE_FUNC inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + EIGEN_DEVICE_FUNC inline explicit Translation(const Translation& other) + { m_coeffs = other.vector().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + EIGEN_DEVICE_FUNC bool isApprox(const Translation& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const + { return m_coeffs.isApprox(other.m_coeffs, prec); } + +}; + +/** \addtogroup Geometry_Module */ +//@{ +typedef Translation Translation2f; +typedef Translation Translation2d; +typedef Translation Translation3f; +typedef Translation Translation3d; +//@} + +template +EIGEN_DEVICE_FUNC inline typename Translation::AffineTransformType +Translation::operator* (const UniformScaling& other) const +{ + AffineTransformType res; + res.matrix().setZero(); + res.linear().diagonal().fill(other.factor()); + res.translation() = m_coeffs; + res(Dim,Dim) = Scalar(1); + return res; +} + +template +template +EIGEN_DEVICE_FUNC inline typename Translation::AffineTransformType +Translation::operator* (const EigenBase& linear) const +{ + AffineTransformType res; + res.matrix().setZero(); + res.linear() = linear.derived(); + res.translation() = m_coeffs; + res.matrix().row(Dim).setZero(); + res(Dim,Dim) = Scalar(1); + return res; +} + +} // end namespace Eigen + +#endif // EIGEN_TRANSLATION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Umeyama.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Umeyama.h new file mode 100644 index 000000000..7e933fca1 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/Umeyama.h @@ -0,0 +1,166 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_UMEYAMA_H +#define EIGEN_UMEYAMA_H + +// This file requires the user to include +// * Eigen/Core +// * Eigen/LU +// * Eigen/SVD +// * Eigen/Array + +namespace Eigen { + +#ifndef EIGEN_PARSED_BY_DOXYGEN + +// These helpers are required since it allows to use mixed types as parameters +// for the Umeyama. The problem with mixed parameters is that the return type +// cannot trivially be deduced when float and double types are mixed. +namespace internal { + +// Compile time return type deduction for different MatrixBase types. +// Different means here different alignment and parameters but the same underlying +// real scalar type. +template +struct umeyama_transform_matrix_type +{ + enum { + MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime), + + // When possible we want to choose some small fixed size value since the result + // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want. + HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1 + }; + + typedef Matrix::Scalar, + HomogeneousDimension, + HomogeneousDimension, + AutoAlign | (traits::Flags & RowMajorBit ? RowMajor : ColMajor), + HomogeneousDimension, + HomogeneousDimension + > type; +}; + +} + +#endif + +/** +* \geometry_module \ingroup Geometry_Module +* +* \brief Returns the transformation between two point sets. +* +* The algorithm is based on: +* "Least-squares estimation of transformation parameters between two point patterns", +* Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 +* +* It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that +* \f{align*} +* \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2 +* \f} +* is minimized. +* +* The algorithm is based on the analysis of the covariance matrix +* \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$ +* of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where +* \f$d\f$ is corresponding to the dimension (which is typically small). +* The analysis is involving the SVD having a complexity of \f$O(d^3)\f$ +* though the actual computational effort lies in the covariance +* matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when +* the input point sets have dimension \f$d \times m\f$. +* +* Currently the method is working only for floating point matrices. +* +* \todo Should the return type of umeyama() become a Transform? +* +* \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$. +* \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$. +* \param with_scaling Sets \f$ c=1 \f$ when false is passed. +* \return The homogeneous transformation +* \f{align*} +* T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix} +* \f} +* minimizing the resudiual above. This transformation is always returned as an +* Eigen::Matrix. +*/ +template +typename internal::umeyama_transform_matrix_type::type +umeyama(const MatrixBase& src, const MatrixBase& dst, bool with_scaling = true) +{ + typedef typename internal::umeyama_transform_matrix_type::type TransformationMatrixType; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) + EIGEN_STATIC_ASSERT((internal::is_same::Scalar>::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) }; + + typedef Matrix VectorType; + typedef Matrix MatrixType; + typedef typename internal::plain_matrix_type_row_major::type RowMajorMatrixType; + + const Index m = src.rows(); // dimension + const Index n = src.cols(); // number of measurements + + // required for demeaning ... + const RealScalar one_over_n = RealScalar(1) / static_cast(n); + + // computation of mean + const VectorType src_mean = src.rowwise().sum() * one_over_n; + const VectorType dst_mean = dst.rowwise().sum() * one_over_n; + + // demeaning of src and dst points + const RowMajorMatrixType src_demean = src.colwise() - src_mean; + const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean; + + // Eq. (36)-(37) + const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n; + + // Eq. (38) + const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose(); + + JacobiSVD svd(sigma, ComputeFullU | ComputeFullV); + + // Initialize the resulting transformation with an identity matrix... + TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1); + + // Eq. (39) + VectorType S = VectorType::Ones(m); + + if ( svd.matrixU().determinant() * svd.matrixV().determinant() < 0 ) + S(m-1) = -1; + + // Eq. (40) and (43) + Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); + + if (with_scaling) + { + // Eq. (42) + const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S); + + // Eq. (41) + Rt.col(m).head(m) = dst_mean; + Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean; + Rt.block(0,0,m,m) *= c; + } + else + { + Rt.col(m).head(m) = dst_mean; + Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean; + } + + return Rt; +} + +} // end namespace Eigen + +#endif // EIGEN_UMEYAMA_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/arch/Geometry_SSE.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/arch/Geometry_SSE.h new file mode 100644 index 000000000..f68cab583 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Geometry/arch/Geometry_SSE.h @@ -0,0 +1,161 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Rohit Garg +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GEOMETRY_SSE_H +#define EIGEN_GEOMETRY_SSE_H + +namespace Eigen { + +namespace internal { + +template +struct quat_product +{ + enum { + AAlignment = traits::Alignment, + BAlignment = traits::Alignment, + ResAlignment = traits >::Alignment + }; + static inline Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) + { + Quaternion res; + const __m128 mask = _mm_setr_ps(0.f,0.f,0.f,-0.f); + __m128 a = _a.coeffs().template packet(0); + __m128 b = _b.coeffs().template packet(0); + __m128 s1 = _mm_mul_ps(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2)); + __m128 s2 = _mm_mul_ps(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1)); + pstoret( + &res.x(), + _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)), + _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0), + vec4f_swizzle1(b,1,2,0,0))), + _mm_xor_ps(mask,_mm_add_ps(s1,s2)))); + + return res; + } +}; + +template +struct quat_conj +{ + enum { + ResAlignment = traits >::Alignment + }; + static inline Quaternion run(const QuaternionBase& q) + { + Quaternion res; + const __m128 mask = _mm_setr_ps(-0.f,-0.f,-0.f,0.f); + pstoret(&res.x(), _mm_xor_ps(mask, q.coeffs().template packet::Alignment>(0))); + return res; + } +}; + + +template +struct cross3_impl +{ + enum { + ResAlignment = traits::type>::Alignment + }; + static inline typename plain_matrix_type::type + run(const VectorLhs& lhs, const VectorRhs& rhs) + { + __m128 a = lhs.template packet::Alignment>(0); + __m128 b = rhs.template packet::Alignment>(0); + __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3)); + __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3)); + typename plain_matrix_type::type res; + pstoret(&res.x(),_mm_sub_ps(mul1,mul2)); + return res; + } +}; + + + + +template +struct quat_product +{ + enum { + BAlignment = traits::Alignment, + ResAlignment = traits >::Alignment + }; + + static inline Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) + { + const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); + + Quaternion res; + + const double* a = _a.coeffs().data(); + Packet2d b_xy = _b.coeffs().template packet(0); + Packet2d b_zw = _b.coeffs().template packet(2); + Packet2d a_xx = pset1(a[0]); + Packet2d a_yy = pset1(a[1]); + Packet2d a_zz = pset1(a[2]); + Packet2d a_ww = pset1(a[3]); + + // two temporaries: + Packet2d t1, t2; + + /* + * t1 = ww*xy + yy*zw + * t2 = zz*xy - xx*zw + * res.xy = t1 +/- swap(t2) + */ + t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw)); + t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw)); +#ifdef EIGEN_VECTORIZE_SSE3 + EIGEN_UNUSED_VARIABLE(mask) + pstoret(&res.x(), _mm_addsub_pd(t1, preverse(t2))); +#else + pstoret(&res.x(), padd(t1, pxor(mask,preverse(t2)))); +#endif + + /* + * t1 = ww*zw - yy*xy + * t2 = zz*zw + xx*xy + * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2) + */ + t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy)); + t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy)); +#ifdef EIGEN_VECTORIZE_SSE3 + EIGEN_UNUSED_VARIABLE(mask) + pstoret(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2))); +#else + pstoret(&res.z(), psub(t1, pxor(mask,preverse(t2)))); +#endif + + return res; +} +}; + +template +struct quat_conj +{ + enum { + ResAlignment = traits >::Alignment + }; + static inline Quaternion run(const QuaternionBase& q) + { + Quaternion res; + const __m128d mask0 = _mm_setr_pd(-0.,-0.); + const __m128d mask2 = _mm_setr_pd(-0.,0.); + pstoret(&res.x(), _mm_xor_pd(mask0, q.coeffs().template packet::Alignment>(0))); + pstoret(&res.z(), _mm_xor_pd(mask2, q.coeffs().template packet::Alignment>(2))); + return res; + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GEOMETRY_SSE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Householder/BlockHouseholder.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Householder/BlockHouseholder.h new file mode 100644 index 000000000..01a7ed188 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Householder/BlockHouseholder.h @@ -0,0 +1,103 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Vincent Lejeune +// Copyright (C) 2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BLOCK_HOUSEHOLDER_H +#define EIGEN_BLOCK_HOUSEHOLDER_H + +// This file contains some helper function to deal with block householder reflectors + +namespace Eigen { + +namespace internal { + +/** \internal */ +// template +// void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs) +// { +// typedef typename VectorsType::Scalar Scalar; +// const Index nbVecs = vectors.cols(); +// eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs); +// +// for(Index i = 0; i < nbVecs; i++) +// { +// Index rs = vectors.rows() - i; +// // Warning, note that hCoeffs may alias with vectors. +// // It is then necessary to copy it before modifying vectors(i,i). +// typename CoeffsType::Scalar h = hCoeffs(i); +// // This hack permits to pass trough nested Block<> and Transpose<> expressions. +// Scalar *Vii_ptr = const_cast(vectors.data() + vectors.outerStride()*i + vectors.innerStride()*i); +// Scalar Vii = *Vii_ptr; +// *Vii_ptr = Scalar(1); +// triFactor.col(i).head(i).noalias() = -h * vectors.block(i, 0, rs, i).adjoint() +// * vectors.col(i).tail(rs); +// *Vii_ptr = Vii; +// // FIXME add .noalias() once the triangular product can work inplace +// triFactor.col(i).head(i) = triFactor.block(0,0,i,i).template triangularView() +// * triFactor.col(i).head(i); +// triFactor(i,i) = hCoeffs(i); +// } +// } + +/** \internal */ +// This variant avoid modifications in vectors +template +void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs) +{ + const Index nbVecs = vectors.cols(); + eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs); + + for(Index i = nbVecs-1; i >=0 ; --i) + { + Index rs = vectors.rows() - i - 1; + Index rt = nbVecs-i-1; + + if(rt>0) + { + triFactor.row(i).tail(rt).noalias() = -hCoeffs(i) * vectors.col(i).tail(rs).adjoint() + * vectors.bottomRightCorner(rs, rt).template triangularView(); + + // FIXME add .noalias() once the triangular product can work inplace + triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template triangularView(); + + } + triFactor(i,i) = hCoeffs(i); + } +} + +/** \internal + * if forward then perform mat = H0 * H1 * H2 * mat + * otherwise perform mat = H2 * H1 * H0 * mat + */ +template +void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs, bool forward) +{ + enum { TFactorSize = MatrixType::ColsAtCompileTime }; + Index nbVecs = vectors.cols(); + Matrix T(nbVecs,nbVecs); + + if(forward) make_block_householder_triangular_factor(T, vectors, hCoeffs); + else make_block_householder_triangular_factor(T, vectors, hCoeffs.conjugate()); + const TriangularView V(vectors); + + // A -= V T V^* A + Matrix tmp = V.adjoint() * mat; + // FIXME add .noalias() once the triangular product can work inplace + if(forward) tmp = T.template triangularView() * tmp; + else tmp = T.template triangularView().adjoint() * tmp; + mat.noalias() -= V * tmp; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BLOCK_HOUSEHOLDER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Householder/Householder.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Householder/Householder.h new file mode 100644 index 000000000..80de2c305 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Householder/Householder.h @@ -0,0 +1,172 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Benoit Jacob +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_HOUSEHOLDER_H +#define EIGEN_HOUSEHOLDER_H + +namespace Eigen { + +namespace internal { +template struct decrement_size +{ + enum { + ret = n==Dynamic ? n : n-1 + }; +}; +} + +/** Computes the elementary reflector H such that: + * \f$ H *this = [ beta 0 ... 0]^T \f$ + * where the transformation H is: + * \f$ H = I - tau v v^*\f$ + * and the vector v is: + * \f$ v^T = [1 essential^T] \f$ + * + * The essential part of the vector \c v is stored in *this. + * + * On output: + * \param tau the scaling factor of the Householder transformation + * \param beta the result of H * \c *this + * + * \sa MatrixBase::makeHouseholder(), MatrixBase::applyHouseholderOnTheLeft(), + * MatrixBase::applyHouseholderOnTheRight() + */ +template +void MatrixBase::makeHouseholderInPlace(Scalar& tau, RealScalar& beta) +{ + VectorBlock::ret> essentialPart(derived(), 1, size()-1); + makeHouseholder(essentialPart, tau, beta); +} + +/** Computes the elementary reflector H such that: + * \f$ H *this = [ beta 0 ... 0]^T \f$ + * where the transformation H is: + * \f$ H = I - tau v v^*\f$ + * and the vector v is: + * \f$ v^T = [1 essential^T] \f$ + * + * On output: + * \param essential the essential part of the vector \c v + * \param tau the scaling factor of the Householder transformation + * \param beta the result of H * \c *this + * + * \sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(), + * MatrixBase::applyHouseholderOnTheRight() + */ +template +template +void MatrixBase::makeHouseholder( + EssentialPart& essential, + Scalar& tau, + RealScalar& beta) const +{ + using std::sqrt; + using numext::conj; + + EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart) + VectorBlock tail(derived(), 1, size()-1); + + RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm(); + Scalar c0 = coeff(0); + const RealScalar tol = (std::numeric_limits::min)(); + + if(tailSqNorm <= tol && numext::abs2(numext::imag(c0))<=tol) + { + tau = RealScalar(0); + beta = numext::real(c0); + essential.setZero(); + } + else + { + beta = sqrt(numext::abs2(c0) + tailSqNorm); + if (numext::real(c0)>=RealScalar(0)) + beta = -beta; + essential = tail / (c0 - beta); + tau = conj((beta - c0) / beta); + } +} + +/** Apply the elementary reflector H given by + * \f$ H = I - tau v v^*\f$ + * with + * \f$ v^T = [1 essential^T] \f$ + * from the left to a vector or matrix. + * + * On input: + * \param essential the essential part of the vector \c v + * \param tau the scaling factor of the Householder transformation + * \param workspace a pointer to working space with at least + * this->cols() * essential.size() entries + * + * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), + * MatrixBase::applyHouseholderOnTheRight() + */ +template +template +void MatrixBase::applyHouseholderOnTheLeft( + const EssentialPart& essential, + const Scalar& tau, + Scalar* workspace) +{ + if(rows() == 1) + { + *this *= Scalar(1)-tau; + } + else if(tau!=Scalar(0)) + { + Map::type> tmp(workspace,cols()); + Block bottom(derived(), 1, 0, rows()-1, cols()); + tmp.noalias() = essential.adjoint() * bottom; + tmp += this->row(0); + this->row(0) -= tau * tmp; + bottom.noalias() -= tau * essential * tmp; + } +} + +/** Apply the elementary reflector H given by + * \f$ H = I - tau v v^*\f$ + * with + * \f$ v^T = [1 essential^T] \f$ + * from the right to a vector or matrix. + * + * On input: + * \param essential the essential part of the vector \c v + * \param tau the scaling factor of the Householder transformation + * \param workspace a pointer to working space with at least + * this->cols() * essential.size() entries + * + * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), + * MatrixBase::applyHouseholderOnTheLeft() + */ +template +template +void MatrixBase::applyHouseholderOnTheRight( + const EssentialPart& essential, + const Scalar& tau, + Scalar* workspace) +{ + if(cols() == 1) + { + *this *= Scalar(1)-tau; + } + else if(tau!=Scalar(0)) + { + Map::type> tmp(workspace,rows()); + Block right(derived(), 0, 1, rows(), cols()-1); + tmp.noalias() = right * essential.conjugate(); + tmp += this->col(0); + this->col(0) -= tau * tmp; + right.noalias() -= tau * tmp * essential.transpose(); + } +} + +} // end namespace Eigen + +#endif // EIGEN_HOUSEHOLDER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Householder/HouseholderSequence.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Householder/HouseholderSequence.h new file mode 100644 index 000000000..3ce0a693d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Householder/HouseholderSequence.h @@ -0,0 +1,470 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_HOUSEHOLDER_SEQUENCE_H +#define EIGEN_HOUSEHOLDER_SEQUENCE_H + +namespace Eigen { + +/** \ingroup Householder_Module + * \householder_module + * \class HouseholderSequence + * \brief Sequence of Householder reflections acting on subspaces with decreasing size + * \tparam VectorsType type of matrix containing the Householder vectors + * \tparam CoeffsType type of vector containing the Householder coefficients + * \tparam Side either OnTheLeft (the default) or OnTheRight + * + * This class represents a product sequence of Householder reflections where the first Householder reflection + * acts on the whole space, the second Householder reflection leaves the one-dimensional subspace spanned by + * the first unit vector invariant, the third Householder reflection leaves the two-dimensional subspace + * spanned by the first two unit vectors invariant, and so on up to the last reflection which leaves all but + * one dimensions invariant and acts only on the last dimension. Such sequences of Householder reflections + * are used in several algorithms to zero out certain parts of a matrix. Indeed, the methods + * HessenbergDecomposition::matrixQ(), Tridiagonalization::matrixQ(), HouseholderQR::householderQ(), + * and ColPivHouseholderQR::householderQ() all return a %HouseholderSequence. + * + * More precisely, the class %HouseholderSequence represents an \f$ n \times n \f$ matrix \f$ H \f$ of the + * form \f$ H = \prod_{i=0}^{n-1} H_i \f$ where the i-th Householder reflection is \f$ H_i = I - h_i v_i + * v_i^* \f$. The i-th Householder coefficient \f$ h_i \f$ is a scalar and the i-th Householder vector \f$ + * v_i \f$ is a vector of the form + * \f[ + * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. + * \f] + * The last \f$ n-i \f$ entries of \f$ v_i \f$ are called the essential part of the Householder vector. + * + * Typical usages are listed below, where H is a HouseholderSequence: + * \code + * A.applyOnTheRight(H); // A = A * H + * A.applyOnTheLeft(H); // A = H * A + * A.applyOnTheRight(H.adjoint()); // A = A * H^* + * A.applyOnTheLeft(H.adjoint()); // A = H^* * A + * MatrixXd Q = H; // conversion to a dense matrix + * \endcode + * In addition to the adjoint, you can also apply the inverse (=adjoint), the transpose, and the conjugate operators. + * + * See the documentation for HouseholderSequence(const VectorsType&, const CoeffsType&) for an example. + * + * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() + */ + +namespace internal { + +template +struct traits > +{ + typedef typename VectorsType::Scalar Scalar; + typedef typename VectorsType::StorageIndex StorageIndex; + typedef typename VectorsType::StorageKind StorageKind; + enum { + RowsAtCompileTime = Side==OnTheLeft ? traits::RowsAtCompileTime + : traits::ColsAtCompileTime, + ColsAtCompileTime = RowsAtCompileTime, + MaxRowsAtCompileTime = Side==OnTheLeft ? traits::MaxRowsAtCompileTime + : traits::MaxColsAtCompileTime, + MaxColsAtCompileTime = MaxRowsAtCompileTime, + Flags = 0 + }; +}; + +struct HouseholderSequenceShape {}; + +template +struct evaluator_traits > + : public evaluator_traits_base > +{ + typedef HouseholderSequenceShape Shape; +}; + +template +struct hseq_side_dependent_impl +{ + typedef Block EssentialVectorType; + typedef HouseholderSequence HouseholderSequenceType; + static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) + { + Index start = k+1+h.m_shift; + return Block(h.m_vectors, start, k, h.rows()-start, 1); + } +}; + +template +struct hseq_side_dependent_impl +{ + typedef Transpose > EssentialVectorType; + typedef HouseholderSequence HouseholderSequenceType; + static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) + { + Index start = k+1+h.m_shift; + return Block(h.m_vectors, k, start, 1, h.rows()-start).transpose(); + } +}; + +template struct matrix_type_times_scalar_type +{ + typedef typename ScalarBinaryOpTraits::ReturnType + ResultScalar; + typedef Matrix Type; +}; + +} // end namespace internal + +template class HouseholderSequence + : public EigenBase > +{ + typedef typename internal::hseq_side_dependent_impl::EssentialVectorType EssentialVectorType; + + public: + enum { + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime + }; + typedef typename internal::traits::Scalar Scalar; + + typedef HouseholderSequence< + typename internal::conditional::IsComplex, + typename internal::remove_all::type, + VectorsType>::type, + typename internal::conditional::IsComplex, + typename internal::remove_all::type, + CoeffsType>::type, + Side + > ConjugateReturnType; + + /** \brief Constructor. + * \param[in] v %Matrix containing the essential parts of the Householder vectors + * \param[in] h Vector containing the Householder coefficients + * + * Constructs the Householder sequence with coefficients given by \p h and vectors given by \p v. The + * i-th Householder coefficient \f$ h_i \f$ is given by \p h(i) and the essential part of the i-th + * Householder vector \f$ v_i \f$ is given by \p v(k,i) with \p k > \p i (the subdiagonal part of the + * i-th column). If \p v has fewer columns than rows, then the Householder sequence contains as many + * Householder reflections as there are columns. + * + * \note The %HouseholderSequence object stores \p v and \p h by reference. + * + * Example: \include HouseholderSequence_HouseholderSequence.cpp + * Output: \verbinclude HouseholderSequence_HouseholderSequence.out + * + * \sa setLength(), setShift() + */ + HouseholderSequence(const VectorsType& v, const CoeffsType& h) + : m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()), + m_shift(0) + { + } + + /** \brief Copy constructor. */ + HouseholderSequence(const HouseholderSequence& other) + : m_vectors(other.m_vectors), + m_coeffs(other.m_coeffs), + m_trans(other.m_trans), + m_length(other.m_length), + m_shift(other.m_shift) + { + } + + /** \brief Number of rows of transformation viewed as a matrix. + * \returns Number of rows + * \details This equals the dimension of the space that the transformation acts on. + */ + Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); } + + /** \brief Number of columns of transformation viewed as a matrix. + * \returns Number of columns + * \details This equals the dimension of the space that the transformation acts on. + */ + Index cols() const { return rows(); } + + /** \brief Essential part of a Householder vector. + * \param[in] k Index of Householder reflection + * \returns Vector containing non-trivial entries of k-th Householder vector + * + * This function returns the essential part of the Householder vector \f$ v_i \f$. This is a vector of + * length \f$ n-i \f$ containing the last \f$ n-i \f$ entries of the vector + * \f[ + * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. + * \f] + * The index \f$ i \f$ equals \p k + shift(), corresponding to the k-th column of the matrix \p v + * passed to the constructor. + * + * \sa setShift(), shift() + */ + const EssentialVectorType essentialVector(Index k) const + { + eigen_assert(k >= 0 && k < m_length); + return internal::hseq_side_dependent_impl::essentialVector(*this, k); + } + + /** \brief %Transpose of the Householder sequence. */ + HouseholderSequence transpose() const + { + return HouseholderSequence(*this).setTrans(!m_trans); + } + + /** \brief Complex conjugate of the Householder sequence. */ + ConjugateReturnType conjugate() const + { + return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate()) + .setTrans(m_trans) + .setLength(m_length) + .setShift(m_shift); + } + + /** \brief Adjoint (conjugate transpose) of the Householder sequence. */ + ConjugateReturnType adjoint() const + { + return conjugate().setTrans(!m_trans); + } + + /** \brief Inverse of the Householder sequence (equals the adjoint). */ + ConjugateReturnType inverse() const { return adjoint(); } + + /** \internal */ + template inline void evalTo(DestType& dst) const + { + Matrix workspace(rows()); + evalTo(dst, workspace); + } + + /** \internal */ + template + void evalTo(Dest& dst, Workspace& workspace) const + { + workspace.resize(rows()); + Index vecs = m_length; + if(internal::is_same_dense(dst,m_vectors)) + { + // in-place + dst.diagonal().setOnes(); + dst.template triangularView().setZero(); + for(Index k = vecs-1; k >= 0; --k) + { + Index cornerSize = rows() - k - m_shift; + if(m_trans) + dst.bottomRightCorner(cornerSize, cornerSize) + .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data()); + else + dst.bottomRightCorner(cornerSize, cornerSize) + .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), workspace.data()); + + // clear the off diagonal vector + dst.col(k).tail(rows()-k-1).setZero(); + } + // clear the remaining columns if needed + for(Index k = 0; k= 0; --k) + { + Index cornerSize = rows() - k - m_shift; + if(m_trans) + dst.bottomRightCorner(cornerSize, cornerSize) + .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0)); + else + dst.bottomRightCorner(cornerSize, cornerSize) + .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0)); + } + } + } + + /** \internal */ + template inline void applyThisOnTheRight(Dest& dst) const + { + Matrix workspace(dst.rows()); + applyThisOnTheRight(dst, workspace); + } + + /** \internal */ + template + inline void applyThisOnTheRight(Dest& dst, Workspace& workspace) const + { + workspace.resize(dst.rows()); + for(Index k = 0; k < m_length; ++k) + { + Index actual_k = m_trans ? m_length-k-1 : k; + dst.rightCols(rows()-m_shift-actual_k) + .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data()); + } + } + + /** \internal */ + template inline void applyThisOnTheLeft(Dest& dst) const + { + Matrix workspace; + applyThisOnTheLeft(dst, workspace); + } + + /** \internal */ + template + inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace) const + { + const Index BlockSize = 48; + // if the entries are large enough, then apply the reflectors by block + if(m_length>=BlockSize && dst.cols()>1) + { + for(Index i = 0; i < m_length; i+=BlockSize) + { + Index end = m_trans ? (std::min)(m_length,i+BlockSize) : m_length-i; + Index k = m_trans ? i : (std::max)(Index(0),end-BlockSize); + Index bs = end-k; + Index start = k + m_shift; + + typedef Block::type,Dynamic,Dynamic> SubVectorsType; + SubVectorsType sub_vecs1(m_vectors.const_cast_derived(), Side==OnTheRight ? k : start, + Side==OnTheRight ? start : k, + Side==OnTheRight ? bs : m_vectors.rows()-start, + Side==OnTheRight ? m_vectors.cols()-start : bs); + typename internal::conditional, SubVectorsType&>::type sub_vecs(sub_vecs1); + Block sub_dst(dst,dst.rows()-rows()+m_shift+k,0, rows()-m_shift-k,dst.cols()); + apply_block_householder_on_the_left(sub_dst, sub_vecs, m_coeffs.segment(k, bs), !m_trans); + } + } + else + { + workspace.resize(dst.cols()); + for(Index k = 0; k < m_length; ++k) + { + Index actual_k = m_trans ? k : m_length-k-1; + dst.bottomRows(rows()-m_shift-actual_k) + .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data()); + } + } + } + + /** \brief Computes the product of a Householder sequence with a matrix. + * \param[in] other %Matrix being multiplied. + * \returns Expression object representing the product. + * + * This function computes \f$ HM \f$ where \f$ H \f$ is the Householder sequence represented by \p *this + * and \f$ M \f$ is the matrix \p other. + */ + template + typename internal::matrix_type_times_scalar_type::Type operator*(const MatrixBase& other) const + { + typename internal::matrix_type_times_scalar_type::Type + res(other.template cast::ResultScalar>()); + applyThisOnTheLeft(res); + return res; + } + + template friend struct internal::hseq_side_dependent_impl; + + /** \brief Sets the length of the Householder sequence. + * \param [in] length New value for the length. + * + * By default, the length \f$ n \f$ of the Householder sequence \f$ H = H_0 H_1 \ldots H_{n-1} \f$ is set + * to the number of columns of the matrix \p v passed to the constructor, or the number of rows if that + * is smaller. After this function is called, the length equals \p length. + * + * \sa length() + */ + HouseholderSequence& setLength(Index length) + { + m_length = length; + return *this; + } + + /** \brief Sets the shift of the Householder sequence. + * \param [in] shift New value for the shift. + * + * By default, a %HouseholderSequence object represents \f$ H = H_0 H_1 \ldots H_{n-1} \f$ and the i-th + * column of the matrix \p v passed to the constructor corresponds to the i-th Householder + * reflection. After this function is called, the object represents \f$ H = H_{\mathrm{shift}} + * H_{\mathrm{shift}+1} \ldots H_{n-1} \f$ and the i-th column of \p v corresponds to the (shift+i)-th + * Householder reflection. + * + * \sa shift() + */ + HouseholderSequence& setShift(Index shift) + { + m_shift = shift; + return *this; + } + + Index length() const { return m_length; } /**< \brief Returns the length of the Householder sequence. */ + Index shift() const { return m_shift; } /**< \brief Returns the shift of the Householder sequence. */ + + /* Necessary for .adjoint() and .conjugate() */ + template friend class HouseholderSequence; + + protected: + + /** \brief Sets the transpose flag. + * \param [in] trans New value of the transpose flag. + * + * By default, the transpose flag is not set. If the transpose flag is set, then this object represents + * \f$ H^T = H_{n-1}^T \ldots H_1^T H_0^T \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$. + * + * \sa trans() + */ + HouseholderSequence& setTrans(bool trans) + { + m_trans = trans; + return *this; + } + + bool trans() const { return m_trans; } /**< \brief Returns the transpose flag. */ + + typename VectorsType::Nested m_vectors; + typename CoeffsType::Nested m_coeffs; + bool m_trans; + Index m_length; + Index m_shift; +}; + +/** \brief Computes the product of a matrix with a Householder sequence. + * \param[in] other %Matrix being multiplied. + * \param[in] h %HouseholderSequence being multiplied. + * \returns Expression object representing the product. + * + * This function computes \f$ MH \f$ where \f$ M \f$ is the matrix \p other and \f$ H \f$ is the + * Householder sequence represented by \p h. + */ +template +typename internal::matrix_type_times_scalar_type::Type operator*(const MatrixBase& other, const HouseholderSequence& h) +{ + typename internal::matrix_type_times_scalar_type::Type + res(other.template cast::ResultScalar>()); + h.applyThisOnTheRight(res); + return res; +} + +/** \ingroup Householder_Module \householder_module + * \brief Convenience function for constructing a Householder sequence. + * \returns A HouseholderSequence constructed from the specified arguments. + */ +template +HouseholderSequence householderSequence(const VectorsType& v, const CoeffsType& h) +{ + return HouseholderSequence(v, h); +} + +/** \ingroup Householder_Module \householder_module + * \brief Convenience function for constructing a Householder sequence. + * \returns A HouseholderSequence constructed from the specified arguments. + * \details This function differs from householderSequence() in that the template argument \p OnTheSide of + * the constructed HouseholderSequence is set to OnTheRight, instead of the default OnTheLeft. + */ +template +HouseholderSequence rightHouseholderSequence(const VectorsType& v, const CoeffsType& h) +{ + return HouseholderSequence(v, h); +} + +} // end namespace Eigen + +#endif // EIGEN_HOUSEHOLDER_SEQUENCE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h new file mode 100644 index 000000000..f66c846ef --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h @@ -0,0 +1,226 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BASIC_PRECONDITIONERS_H +#define EIGEN_BASIC_PRECONDITIONERS_H + +namespace Eigen { + +/** \ingroup IterativeLinearSolvers_Module + * \brief A preconditioner based on the digonal entries + * + * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix. + * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: + \code + A.diagonal().asDiagonal() . x = b + \endcode + * + * \tparam _Scalar the type of the scalar. + * + * \implsparsesolverconcept + * + * This preconditioner is suitable for both selfadjoint and general problems. + * The diagonal entries are pre-inverted and stored into a dense vector. + * + * \note A variant that has yet to be implemented would attempt to preserve the norm of each column. + * + * \sa class LeastSquareDiagonalPreconditioner, class ConjugateGradient + */ +template +class DiagonalPreconditioner +{ + typedef _Scalar Scalar; + typedef Matrix Vector; + public: + typedef typename Vector::StorageIndex StorageIndex; + enum { + ColsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic + }; + + DiagonalPreconditioner() : m_isInitialized(false) {} + + template + explicit DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols()) + { + compute(mat); + } + + Index rows() const { return m_invdiag.size(); } + Index cols() const { return m_invdiag.size(); } + + template + DiagonalPreconditioner& analyzePattern(const MatType& ) + { + return *this; + } + + template + DiagonalPreconditioner& factorize(const MatType& mat) + { + m_invdiag.resize(mat.cols()); + for(int j=0; j + DiagonalPreconditioner& compute(const MatType& mat) + { + return factorize(mat); + } + + /** \internal */ + template + void _solve_impl(const Rhs& b, Dest& x) const + { + x = m_invdiag.array() * b.array() ; + } + + template inline const Solve + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized."); + eigen_assert(m_invdiag.size()==b.rows() + && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); + return Solve(*this, b.derived()); + } + + ComputationInfo info() { return Success; } + + protected: + Vector m_invdiag; + bool m_isInitialized; +}; + +/** \ingroup IterativeLinearSolvers_Module + * \brief Jacobi preconditioner for LeastSquaresConjugateGradient + * + * This class allows to approximately solve for A' A x = A' b problems assuming A' A is a diagonal matrix. + * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: + \code + (A.adjoint() * A).diagonal().asDiagonal() * x = b + \endcode + * + * \tparam _Scalar the type of the scalar. + * + * \implsparsesolverconcept + * + * The diagonal entries are pre-inverted and stored into a dense vector. + * + * \sa class LeastSquaresConjugateGradient, class DiagonalPreconditioner + */ +template +class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar> +{ + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef DiagonalPreconditioner<_Scalar> Base; + using Base::m_invdiag; + public: + + LeastSquareDiagonalPreconditioner() : Base() {} + + template + explicit LeastSquareDiagonalPreconditioner(const MatType& mat) : Base() + { + compute(mat); + } + + template + LeastSquareDiagonalPreconditioner& analyzePattern(const MatType& ) + { + return *this; + } + + template + LeastSquareDiagonalPreconditioner& factorize(const MatType& mat) + { + // Compute the inverse squared-norm of each column of mat + m_invdiag.resize(mat.cols()); + if(MatType::IsRowMajor) + { + m_invdiag.setZero(); + for(Index j=0; jRealScalar(0)) + m_invdiag(j) = RealScalar(1)/numext::real(m_invdiag(j)); + } + else + { + for(Index j=0; jRealScalar(0)) + m_invdiag(j) = RealScalar(1)/sum; + else + m_invdiag(j) = RealScalar(1); + } + } + Base::m_isInitialized = true; + return *this; + } + + template + LeastSquareDiagonalPreconditioner& compute(const MatType& mat) + { + return factorize(mat); + } + + ComputationInfo info() { return Success; } + + protected: +}; + +/** \ingroup IterativeLinearSolvers_Module + * \brief A naive preconditioner which approximates any matrix as the identity matrix + * + * \implsparsesolverconcept + * + * \sa class DiagonalPreconditioner + */ +class IdentityPreconditioner +{ + public: + + IdentityPreconditioner() {} + + template + explicit IdentityPreconditioner(const MatrixType& ) {} + + template + IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; } + + template + IdentityPreconditioner& factorize(const MatrixType& ) { return *this; } + + template + IdentityPreconditioner& compute(const MatrixType& ) { return *this; } + + template + inline const Rhs& solve(const Rhs& b) const { return b; } + + ComputationInfo info() { return Success; } +}; + +} // end namespace Eigen + +#endif // EIGEN_BASIC_PRECONDITIONERS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h new file mode 100644 index 000000000..454f46814 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h @@ -0,0 +1,228 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2014 Gael Guennebaud +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BICGSTAB_H +#define EIGEN_BICGSTAB_H + +namespace Eigen { + +namespace internal { + +/** \internal Low-level bi conjugate gradient stabilized algorithm + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + * \return false in the case of numerical issue, for example a break down of BiCGSTAB. + */ +template +bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, Index& iters, + typename Dest::RealScalar& tol_error) +{ + using std::sqrt; + using std::abs; + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + RealScalar tol = tol_error; + Index maxIters = iters; + + Index n = mat.cols(); + VectorType r = rhs - mat * x; + VectorType r0 = r; + + RealScalar r0_sqnorm = r0.squaredNorm(); + RealScalar rhs_sqnorm = rhs.squaredNorm(); + if(rhs_sqnorm == 0) + { + x.setZero(); + return true; + } + Scalar rho = 1; + Scalar alpha = 1; + Scalar w = 1; + + VectorType v = VectorType::Zero(n), p = VectorType::Zero(n); + VectorType y(n), z(n); + VectorType kt(n), ks(n); + + VectorType s(n), t(n); + + RealScalar tol2 = tol*tol*rhs_sqnorm; + RealScalar eps2 = NumTraits::epsilon()*NumTraits::epsilon(); + Index i = 0; + Index restarts = 0; + + while ( r.squaredNorm() > tol2 && iRealScalar(0)) + w = t.dot(s) / tmp; + else + w = Scalar(0); + x += alpha * y + w * z; + r = s - w * t; + ++i; + } + tol_error = sqrt(r.squaredNorm()/rhs_sqnorm); + iters = i; + return true; +} + +} + +template< typename _MatrixType, + typename _Preconditioner = DiagonalPreconditioner > +class BiCGSTAB; + +namespace internal { + +template< typename _MatrixType, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A bi conjugate gradient stabilized solver for sparse square problems + * + * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient + * stabilized algorithm. The vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * \implsparsesolverconcept + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * The tolerance corresponds to the relative residual error: |Ax-b|/|b| + * + * \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format. + * Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled. + * See \ref TopicMultiThreading for details. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + * \include BiCGSTAB_simple.cpp + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. + * + * BiCGSTAB can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename _MatrixType, typename _Preconditioner> +class BiCGSTAB : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + +public: + + /** Default constructor. */ + BiCGSTAB() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit BiCGSTAB(const EigenBase& A) : Base(A.derived()) {} + + ~BiCGSTAB() {} + + /** \internal */ + template + void _solve_with_guess_impl(const Rhs& b, Dest& x) const + { + bool failed = false; + for(Index j=0; j + void _solve_impl(const MatrixBase& b, Dest& x) const + { + x.resize(this->rows(),b.cols()); + x.setZero(); + _solve_with_guess_impl(b,x); + } + +protected: + +}; + +} // end namespace Eigen + +#endif // EIGEN_BICGSTAB_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h new file mode 100644 index 000000000..f7ce47134 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h @@ -0,0 +1,246 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CONJUGATE_GRADIENT_H +#define EIGEN_CONJUGATE_GRADIENT_H + +namespace Eigen { + +namespace internal { + +/** \internal Low-level conjugate gradient algorithm + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + */ +template +EIGEN_DONT_INLINE +void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, Index& iters, + typename Dest::RealScalar& tol_error) +{ + using std::sqrt; + using std::abs; + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + + RealScalar tol = tol_error; + Index maxIters = iters; + + Index n = mat.cols(); + + VectorType residual = rhs - mat * x; //initial residual + + RealScalar rhsNorm2 = rhs.squaredNorm(); + if(rhsNorm2 == 0) + { + x.setZero(); + iters = 0; + tol_error = 0; + return; + } + const RealScalar considerAsZero = (std::numeric_limits::min)(); + RealScalar threshold = numext::maxi(tol*tol*rhsNorm2,considerAsZero); + RealScalar residualNorm2 = residual.squaredNorm(); + if (residualNorm2 < threshold) + { + iters = 0; + tol_error = sqrt(residualNorm2 / rhsNorm2); + return; + } + + VectorType p(n); + p = precond.solve(residual); // initial search direction + + VectorType z(n), tmp(n); + RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM + Index i = 0; + while(i < maxIters) + { + tmp.noalias() = mat * p; // the bottleneck of the algorithm + + Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir + x += alpha * p; // update solution + residual -= alpha * tmp; // update residual + + residualNorm2 = residual.squaredNorm(); + if(residualNorm2 < threshold) + break; + + z = precond.solve(residual); // approximately solve for "A z = residual" + + RealScalar absOld = absNew; + absNew = numext::real(residual.dot(z)); // update the absolute value of r + RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction + p = z + beta * p; // update search direction + i++; + } + tol_error = sqrt(residualNorm2 / rhsNorm2); + iters = i; +} + +} + +template< typename _MatrixType, int _UpLo=Lower, + typename _Preconditioner = DiagonalPreconditioner > +class ConjugateGradient; + +namespace internal { + +template< typename _MatrixType, int _UpLo, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A conjugate gradient solver for sparse (or dense) self-adjoint problems + * + * This class allows to solve for A.x = b linear problems using an iterative conjugate gradient algorithm. + * The matrix A must be selfadjoint. The matrix A and the vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower, + * \c Upper, or \c Lower|Upper in which the full matrix entries will be considered. + * Default is \c Lower, best performance is \c Lower|Upper. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * \implsparsesolverconcept + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * The tolerance corresponds to the relative residual error: |Ax-b|/|b| + * + * \b Performance: Even though the default value of \c _UpLo is \c Lower, significantly higher performance is + * achieved when using a complete matrix and \b Lower|Upper as the \a _UpLo template parameter. Moreover, in this + * case multi-threading can be exploited if the user code is compiled with OpenMP enabled. + * See \ref TopicMultiThreading for details. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + \code + int n = 10000; + VectorXd x(n), b(n); + SparseMatrix A(n,n); + // fill A and b + ConjugateGradient, Lower|Upper> cg; + cg.compute(A); + x = cg.solve(b); + std::cout << "#iterations: " << cg.iterations() << std::endl; + std::cout << "estimated error: " << cg.error() << std::endl; + // update b, and solve again + x = cg.solve(b); + \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. + * + * ConjugateGradient can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. + * + * \sa class LeastSquaresConjugateGradient, class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename _MatrixType, int _UpLo, typename _Preconditioner> +class ConjugateGradient : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + + enum { + UpLo = _UpLo + }; + +public: + + /** Default constructor. */ + ConjugateGradient() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit ConjugateGradient(const EigenBase& A) : Base(A.derived()) {} + + ~ConjugateGradient() {} + + /** \internal */ + template + void _solve_with_guess_impl(const Rhs& b, Dest& x) const + { + typedef typename Base::MatrixWrapper MatrixWrapper; + typedef typename Base::ActualMatrixType ActualMatrixType; + enum { + TransposeInput = (!MatrixWrapper::MatrixFree) + && (UpLo==(Lower|Upper)) + && (!MatrixType::IsRowMajor) + && (!NumTraits::IsComplex) + }; + typedef typename internal::conditional, ActualMatrixType const&>::type RowMajorWrapper; + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY); + typedef typename internal::conditional::Type + >::type SelfAdjointWrapper; + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + + for(Index j=0; j + void _solve_impl(const MatrixBase& b, Dest& x) const + { + x.setZero(); + _solve_with_guess_impl(b.derived(),x); + } + +protected: + +}; + +} // end namespace Eigen + +#endif // EIGEN_CONJUGATE_GRADIENT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h new file mode 100644 index 000000000..e45c272b4 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h @@ -0,0 +1,400 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// Copyright (C) 2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INCOMPLETE_CHOlESKY_H +#define EIGEN_INCOMPLETE_CHOlESKY_H + +#include +#include + +namespace Eigen { +/** + * \brief Modified Incomplete Cholesky with dual threshold + * + * References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with + * Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999 + * + * \tparam Scalar the scalar type of the input matrices + * \tparam _UpLo The triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * \tparam _OrderingType The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering, + * unless EIGEN_MPL2_ONLY is defined, in which case the default is NaturalOrdering. + * + * \implsparsesolverconcept + * + * It performs the following incomplete factorization: \f$ S P A P' S \approx L L' \f$ + * where L is a lower triangular factor, S is a diagonal scaling matrix, and P is a + * fill-in reducing permutation as computed by the ordering method. + * + * \b Shifting \b strategy: Let \f$ B = S P A P' S \f$ be the scaled matrix on which the factorization is carried out, + * and \f$ \beta \f$ be the minimum value of the diagonal. If \f$ \beta > 0 \f$ then, the factorization is directly performed + * on the matrix B. Otherwise, the factorization is performed on the shifted matrix \f$ B + (\sigma+|\beta| I \f$ where + * \f$ \sigma \f$ is the initial shift value as returned and set by setInitialShift() method. The default value is \f$ \sigma = 10^{-3} \f$. + * If the factorization fails, then the shift in doubled until it succeed or a maximum of ten attempts. If it still fails, as returned by + * the info() method, then you can either increase the initial shift, or better use another preconditioning technique. + * + */ +template +#else +NaturalOrdering +#endif +> +class IncompleteCholesky : public SparseSolverBase > +{ + protected: + typedef SparseSolverBase > Base; + using Base::m_isInitialized; + public: + typedef typename NumTraits::Real RealScalar; + typedef _OrderingType OrderingType; + typedef typename OrderingType::PermutationType PermutationType; + typedef typename PermutationType::StorageIndex StorageIndex; + typedef SparseMatrix FactorType; + typedef Matrix VectorSx; + typedef Matrix VectorRx; + typedef Matrix VectorIx; + typedef std::vector > VectorList; + enum { UpLo = _UpLo }; + enum { + ColsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic + }; + public: + + /** Default constructor leaving the object in a partly non-initialized stage. + * + * You must call compute() or the pair analyzePattern()/factorize() to make it valid. + * + * \sa IncompleteCholesky(const MatrixType&) + */ + IncompleteCholesky() : m_initialShift(1e-3),m_factorizationIsOk(false) {} + + /** Constructor computing the incomplete factorization for the given matrix \a matrix. + */ + template + IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_factorizationIsOk(false) + { + compute(matrix); + } + + /** \returns number of rows of the factored matrix */ + Index rows() const { return m_L.rows(); } + + /** \returns number of columns of the factored matrix */ + Index cols() const { return m_L.cols(); } + + + /** \brief Reports whether previous computation was successful. + * + * It triggers an assertion if \c *this has not been initialized through the respective constructor, + * or a call to compute() or analyzePattern(). + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the matrix appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "IncompleteCholesky is not initialized."); + return m_info; + } + + /** \brief Set the initial shift parameter \f$ \sigma \f$. + */ + void setInitialShift(RealScalar shift) { m_initialShift = shift; } + + /** \brief Computes the fill reducing permutation vector using the sparsity pattern of \a mat + */ + template + void analyzePattern(const MatrixType& mat) + { + OrderingType ord; + PermutationType pinv; + ord(mat.template selfadjointView(), pinv); + if(pinv.size()>0) m_perm = pinv.inverse(); + else m_perm.resize(0); + m_L.resize(mat.rows(), mat.cols()); + m_analysisIsOk = true; + m_isInitialized = true; + m_info = Success; + } + + /** \brief Performs the numerical factorization of the input matrix \a mat + * + * The method analyzePattern() or compute() must have been called beforehand + * with a matrix having the same pattern. + * + * \sa compute(), analyzePattern() + */ + template + void factorize(const MatrixType& mat); + + /** Computes or re-computes the incomplete Cholesky factorization of the input matrix \a mat + * + * It is a shortcut for a sequential call to the analyzePattern() and factorize() methods. + * + * \sa analyzePattern(), factorize() + */ + template + void compute(const MatrixType& mat) + { + analyzePattern(mat); + factorize(mat); + } + + // internal + template + void _solve_impl(const Rhs& b, Dest& x) const + { + eigen_assert(m_factorizationIsOk && "factorize() should be called first"); + if (m_perm.rows() == b.rows()) x = m_perm * b; + else x = b; + x = m_scale.asDiagonal() * x; + x = m_L.template triangularView().solve(x); + x = m_L.adjoint().template triangularView().solve(x); + x = m_scale.asDiagonal() * x; + if (m_perm.rows() == b.rows()) + x = m_perm.inverse() * x; + } + + /** \returns the sparse lower triangular factor L */ + const FactorType& matrixL() const { eigen_assert("m_factorizationIsOk"); return m_L; } + + /** \returns a vector representing the scaling factor S */ + const VectorRx& scalingS() const { eigen_assert("m_factorizationIsOk"); return m_scale; } + + /** \returns the fill-in reducing permutation P (can be empty for a natural ordering) */ + const PermutationType& permutationP() const { eigen_assert("m_analysisIsOk"); return m_perm; } + + protected: + FactorType m_L; // The lower part stored in CSC + VectorRx m_scale; // The vector for scaling the matrix + RealScalar m_initialShift; // The initial shift parameter + bool m_analysisIsOk; + bool m_factorizationIsOk; + ComputationInfo m_info; + PermutationType m_perm; + + private: + inline void updateList(Ref colPtr, Ref rowIdx, Ref vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol); +}; + +// Based on the following paper: +// C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with +// Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999 +// http://ftp.mcs.anl.gov/pub/tech_reports/reports/P682.pdf +template +template +void IncompleteCholesky::factorize(const _MatrixType& mat) +{ + using std::sqrt; + eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); + + // Dropping strategy : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added + + // Apply the fill-reducing permutation computed in analyzePattern() + if (m_perm.rows() == mat.rows() ) // To detect the null permutation + { + // The temporary is needed to make sure that the diagonal entry is properly sorted + FactorType tmp(mat.rows(), mat.cols()); + tmp = mat.template selfadjointView<_UpLo>().twistedBy(m_perm); + m_L.template selfadjointView() = tmp.template selfadjointView(); + } + else + { + m_L.template selfadjointView() = mat.template selfadjointView<_UpLo>(); + } + + Index n = m_L.cols(); + Index nnz = m_L.nonZeros(); + Map vals(m_L.valuePtr(), nnz); //values + Map rowIdx(m_L.innerIndexPtr(), nnz); //Row indices + Map colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row + VectorIx firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization + VectorList listCol(n); // listCol(j) is a linked list of columns to update column j + VectorSx col_vals(n); // Store a nonzero values in each column + VectorIx col_irow(n); // Row indices of nonzero elements in each column + VectorIx col_pattern(n); + col_pattern.fill(-1); + StorageIndex col_nnz; + + + // Computes the scaling factors + m_scale.resize(n); + m_scale.setZero(); + for (Index j = 0; j < n; j++) + for (Index k = colPtr[j]; k < colPtr[j+1]; k++) + { + m_scale(j) += numext::abs2(vals(k)); + if(rowIdx[k]!=j) + m_scale(rowIdx[k]) += numext::abs2(vals(k)); + } + + m_scale = m_scale.cwiseSqrt().cwiseSqrt(); + + for (Index j = 0; j < n; ++j) + if(m_scale(j)>(std::numeric_limits::min)()) + m_scale(j) = RealScalar(1)/m_scale(j); + else + m_scale(j) = 1; + + // TODO disable scaling if not needed, i.e., if it is roughly uniform? (this will make solve() faster) + + // Scale and compute the shift for the matrix + RealScalar mindiag = NumTraits::highest(); + for (Index j = 0; j < n; j++) + { + for (Index k = colPtr[j]; k < colPtr[j+1]; k++) + vals[k] *= (m_scale(j)*m_scale(rowIdx[k])); + eigen_internal_assert(rowIdx[colPtr[j]]==j && "IncompleteCholesky: only the lower triangular part must be stored"); + mindiag = numext::mini(numext::real(vals[colPtr[j]]), mindiag); + } + + FactorType L_save = m_L; + + RealScalar shift = 0; + if(mindiag <= RealScalar(0.)) + shift = m_initialShift - mindiag; + + m_info = NumericalIssue; + + // Try to perform the incomplete factorization using the current shift + int iter = 0; + do + { + // Apply the shift to the diagonal elements of the matrix + for (Index j = 0; j < n; j++) + vals[colPtr[j]] += shift; + + // jki version of the Cholesky factorization + Index j=0; + for (; j < n; ++j) + { + // Left-looking factorization of the j-th column + // First, load the j-th column into col_vals + Scalar diag = vals[colPtr[j]]; // It is assumed that only the lower part is stored + col_nnz = 0; + for (Index i = colPtr[j] + 1; i < colPtr[j+1]; i++) + { + StorageIndex l = rowIdx[i]; + col_vals(col_nnz) = vals[i]; + col_irow(col_nnz) = l; + col_pattern(l) = col_nnz; + col_nnz++; + } + { + typename std::list::iterator k; + // Browse all previous columns that will update column j + for(k = listCol[j].begin(); k != listCol[j].end(); k++) + { + Index jk = firstElt(*k); // First element to use in the column + eigen_internal_assert(rowIdx[jk]==j); + Scalar v_j_jk = numext::conj(vals[jk]); + + jk += 1; + for (Index i = jk; i < colPtr[*k+1]; i++) + { + StorageIndex l = rowIdx[i]; + if(col_pattern[l]<0) + { + col_vals(col_nnz) = vals[i] * v_j_jk; + col_irow[col_nnz] = l; + col_pattern(l) = col_nnz; + col_nnz++; + } + else + col_vals(col_pattern[l]) -= vals[i] * v_j_jk; + } + updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol); + } + } + + // Scale the current column + if(numext::real(diag) <= 0) + { + if(++iter>=10) + return; + + // increase shift + shift = numext::maxi(m_initialShift,RealScalar(2)*shift); + // restore m_L, col_pattern, and listCol + vals = Map(L_save.valuePtr(), nnz); + rowIdx = Map(L_save.innerIndexPtr(), nnz); + colPtr = Map(L_save.outerIndexPtr(), n+1); + col_pattern.fill(-1); + for(Index i=0; i cvals = col_vals.head(col_nnz); + Ref cirow = col_irow.head(col_nnz); + internal::QuickSplit(cvals,cirow, p); + // Insert the largest p elements in the matrix + Index cpt = 0; + for (Index i = colPtr[j]+1; i < colPtr[j+1]; i++) + { + vals[i] = col_vals(cpt); + rowIdx[i] = col_irow(cpt); + // restore col_pattern: + col_pattern(col_irow(cpt)) = -1; + cpt++; + } + // Get the first smallest row index and put it after the diagonal element + Index jk = colPtr(j)+1; + updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol); + } + + if(j==n) + { + m_factorizationIsOk = true; + m_info = Success; + } + } while(m_info!=Success); +} + +template +inline void IncompleteCholesky::updateList(Ref colPtr, Ref rowIdx, Ref vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol) +{ + if (jk < colPtr(col+1) ) + { + Index p = colPtr(col+1) - jk; + Index minpos; + rowIdx.segment(jk,p).minCoeff(&minpos); + minpos += jk; + if (rowIdx(minpos) != rowIdx(jk)) + { + //Swap + std::swap(rowIdx(jk),rowIdx(minpos)); + std::swap(vals(jk),vals(minpos)); + } + firstElt(col) = internal::convert_index(jk); + listCol[rowIdx(jk)].push_back(internal::convert_index(col)); + } +} + +} // end namespace Eigen + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h new file mode 100644 index 000000000..338e6f10a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h @@ -0,0 +1,462 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// Copyright (C) 2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INCOMPLETE_LUT_H +#define EIGEN_INCOMPLETE_LUT_H + + +namespace Eigen { + +namespace internal { + +/** \internal + * Compute a quick-sort split of a vector + * On output, the vector row is permuted such that its elements satisfy + * abs(row(i)) >= abs(row(ncut)) if incut + * \param row The vector of values + * \param ind The array of index for the elements in @p row + * \param ncut The number of largest elements to keep + **/ +template +Index QuickSplit(VectorV &row, VectorI &ind, Index ncut) +{ + typedef typename VectorV::RealScalar RealScalar; + using std::swap; + using std::abs; + Index mid; + Index n = row.size(); /* length of the vector */ + Index first, last ; + + ncut--; /* to fit the zero-based indices */ + first = 0; + last = n-1; + if (ncut < first || ncut > last ) return 0; + + do { + mid = first; + RealScalar abskey = abs(row(mid)); + for (Index j = first + 1; j <= last; j++) { + if ( abs(row(j)) > abskey) { + ++mid; + swap(row(mid), row(j)); + swap(ind(mid), ind(j)); + } + } + /* Interchange for the pivot element */ + swap(row(mid), row(first)); + swap(ind(mid), ind(first)); + + if (mid > ncut) last = mid - 1; + else if (mid < ncut ) first = mid + 1; + } while (mid != ncut ); + + return 0; /* mid is equal to ncut */ +} + +}// end namespace internal + +/** \ingroup IterativeLinearSolvers_Module + * \class IncompleteLUT + * \brief Incomplete LU factorization with dual-threshold strategy + * + * \implsparsesolverconcept + * + * During the numerical factorization, two dropping rules are used : + * 1) any element whose magnitude is less than some tolerance is dropped. + * This tolerance is obtained by multiplying the input tolerance @p droptol + * by the average magnitude of all the original elements in the current row. + * 2) After the elimination of the row, only the @p fill largest elements in + * the L part and the @p fill largest elements in the U part are kept + * (in addition to the diagonal element ). Note that @p fill is computed from + * the input parameter @p fillfactor which is used the ratio to control the fill_in + * relatively to the initial number of nonzero elements. + * + * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements) + * and when @p fill=n/2 with @p droptol being different to zero. + * + * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, + * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994. + * + * NOTE : The following implementation is derived from the ILUT implementation + * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota + * released under the terms of the GNU LGPL: + * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README + * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2. + * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012: + * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html + * alternatively, on GMANE: + * http://comments.gmane.org/gmane.comp.lib.eigen/3302 + */ +template +class IncompleteLUT : public SparseSolverBase > +{ + protected: + typedef SparseSolverBase Base; + using Base::m_isInitialized; + public: + typedef _Scalar Scalar; + typedef _StorageIndex StorageIndex; + typedef typename NumTraits::Real RealScalar; + typedef Matrix Vector; + typedef Matrix VectorI; + typedef SparseMatrix FactorType; + + enum { + ColsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic + }; + + public: + + IncompleteLUT() + : m_droptol(NumTraits::dummy_precision()), m_fillfactor(10), + m_analysisIsOk(false), m_factorizationIsOk(false) + {} + + template + explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits::dummy_precision(), int fillfactor = 10) + : m_droptol(droptol),m_fillfactor(fillfactor), + m_analysisIsOk(false),m_factorizationIsOk(false) + { + eigen_assert(fillfactor != 0); + compute(mat); + } + + Index rows() const { return m_lu.rows(); } + + Index cols() const { return m_lu.cols(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); + return m_info; + } + + template + void analyzePattern(const MatrixType& amat); + + template + void factorize(const MatrixType& amat); + + /** + * Compute an incomplete LU factorization with dual threshold on the matrix mat + * No pivoting is done in this version + * + **/ + template + IncompleteLUT& compute(const MatrixType& amat) + { + analyzePattern(amat); + factorize(amat); + return *this; + } + + void setDroptol(const RealScalar& droptol); + void setFillfactor(int fillfactor); + + template + void _solve_impl(const Rhs& b, Dest& x) const + { + x = m_Pinv * b; + x = m_lu.template triangularView().solve(x); + x = m_lu.template triangularView().solve(x); + x = m_P * x; + } + +protected: + + /** keeps off-diagonal entries; drops diagonal entries */ + struct keep_diag { + inline bool operator() (const Index& row, const Index& col, const Scalar&) const + { + return row!=col; + } + }; + +protected: + + FactorType m_lu; + RealScalar m_droptol; + int m_fillfactor; + bool m_analysisIsOk; + bool m_factorizationIsOk; + ComputationInfo m_info; + PermutationMatrix m_P; // Fill-reducing permutation + PermutationMatrix m_Pinv; // Inverse permutation +}; + +/** + * Set control parameter droptol + * \param droptol Drop any element whose magnitude is less than this tolerance + **/ +template +void IncompleteLUT::setDroptol(const RealScalar& droptol) +{ + this->m_droptol = droptol; +} + +/** + * Set control parameter fillfactor + * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row. + **/ +template +void IncompleteLUT::setFillfactor(int fillfactor) +{ + this->m_fillfactor = fillfactor; +} + +template +template +void IncompleteLUT::analyzePattern(const _MatrixType& amat) +{ + // Compute the Fill-reducing permutation + // Since ILUT does not perform any numerical pivoting, + // it is highly preferable to keep the diagonal through symmetric permutations. +#ifndef EIGEN_MPL2_ONLY + // To this end, let's symmetrize the pattern and perform AMD on it. + SparseMatrix mat1 = amat; + SparseMatrix mat2 = amat.transpose(); + // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. + // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered... + SparseMatrix AtA = mat2 + mat1; + AMDOrdering ordering; + ordering(AtA,m_P); + m_Pinv = m_P.inverse(); // cache the inverse permutation +#else + // If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine. + SparseMatrix mat1 = amat; + COLAMDOrdering ordering; + ordering(mat1,m_Pinv); + m_P = m_Pinv.inverse(); +#endif + + m_analysisIsOk = true; + m_factorizationIsOk = false; + m_isInitialized = true; +} + +template +template +void IncompleteLUT::factorize(const _MatrixType& amat) +{ + using std::sqrt; + using std::swap; + using std::abs; + using internal::convert_index; + + eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); + Index n = amat.cols(); // Size of the matrix + m_lu.resize(n,n); + // Declare Working vectors and variables + Vector u(n) ; // real values of the row -- maximum size is n -- + VectorI ju(n); // column position of the values in u -- maximum size is n + VectorI jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1 + + // Apply the fill-reducing permutation + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + SparseMatrix mat; + mat = amat.twistedBy(m_Pinv); + + // Initialization + jr.fill(-1); + ju.fill(0); + u.fill(0); + + // number of largest elements to keep in each row: + Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1; + if (fill_in > n) fill_in = n; + + // number of largest nonzero elements to keep in the L and the U part of the current row: + Index nnzL = fill_in/2; + Index nnzU = nnzL; + m_lu.reserve(n * (nnzL + nnzU + 1)); + + // global loop over the rows of the sparse matrix + for (Index ii = 0; ii < n; ii++) + { + // 1 - copy the lower and the upper part of the row i of mat in the working vector u + + Index sizeu = 1; // number of nonzero elements in the upper part of the current row + Index sizel = 0; // number of nonzero elements in the lower part of the current row + ju(ii) = convert_index(ii); + u(ii) = 0; + jr(ii) = convert_index(ii); + RealScalar rownorm = 0; + + typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii + for (; j_it; ++j_it) + { + Index k = j_it.index(); + if (k < ii) + { + // copy the lower part + ju(sizel) = convert_index(k); + u(sizel) = j_it.value(); + jr(k) = convert_index(sizel); + ++sizel; + } + else if (k == ii) + { + u(ii) = j_it.value(); + } + else + { + // copy the upper part + Index jpos = ii + sizeu; + ju(jpos) = convert_index(k); + u(jpos) = j_it.value(); + jr(k) = convert_index(jpos); + ++sizeu; + } + rownorm += numext::abs2(j_it.value()); + } + + // 2 - detect possible zero row + if(rownorm==0) + { + m_info = NumericalIssue; + return; + } + // Take the 2-norm of the current row as a relative tolerance + rownorm = sqrt(rownorm); + + // 3 - eliminate the previous nonzero rows + Index jj = 0; + Index len = 0; + while (jj < sizel) + { + // In order to eliminate in the correct order, + // we must select first the smallest column index among ju(jj:sizel) + Index k; + Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment + k += jj; + if (minrow != ju(jj)) + { + // swap the two locations + Index j = ju(jj); + swap(ju(jj), ju(k)); + jr(minrow) = convert_index(jj); + jr(j) = convert_index(k); + swap(u(jj), u(k)); + } + // Reset this location + jr(minrow) = -1; + + // Start elimination + typename FactorType::InnerIterator ki_it(m_lu, minrow); + while (ki_it && ki_it.index() < minrow) ++ki_it; + eigen_internal_assert(ki_it && ki_it.col()==minrow); + Scalar fact = u(jj) / ki_it.value(); + + // drop too small elements + if(abs(fact) <= m_droptol) + { + jj++; + continue; + } + + // linear combination of the current row ii and the row minrow + ++ki_it; + for (; ki_it; ++ki_it) + { + Scalar prod = fact * ki_it.value(); + Index j = ki_it.index(); + Index jpos = jr(j); + if (jpos == -1) // fill-in element + { + Index newpos; + if (j >= ii) // dealing with the upper part + { + newpos = ii + sizeu; + sizeu++; + eigen_internal_assert(sizeu<=n); + } + else // dealing with the lower part + { + newpos = sizel; + sizel++; + eigen_internal_assert(sizel<=ii); + } + ju(newpos) = convert_index(j); + u(newpos) = -prod; + jr(j) = convert_index(newpos); + } + else + u(jpos) -= prod; + } + // store the pivot element + u(len) = fact; + ju(len) = convert_index(minrow); + ++len; + + jj++; + } // end of the elimination on the row ii + + // reset the upper part of the pointer jr to zero + for(Index k = 0; k m_droptol * rownorm ) + { + ++len; + u(ii + len) = u(ii + k); + ju(ii + len) = ju(ii + k); + } + } + sizeu = len + 1; // +1 to take into account the diagonal element + len = (std::min)(sizeu, nnzU); + typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1)); + typename VectorI::SegmentReturnType juu(ju.segment(ii+1, sizeu-1)); + internal::QuickSplit(uu, juu, len); + + // store the largest elements of the U part + for(Index k = ii + 1; k < ii + len; k++) + m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); + } + m_lu.finalize(); + m_lu.makeCompressed(); + + m_factorizationIsOk = true; + m_info = Success; +} + +} // end namespace Eigen + +#endif // EIGEN_INCOMPLETE_LUT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h new file mode 100644 index 000000000..7c2326eb7 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h @@ -0,0 +1,394 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H +#define EIGEN_ITERATIVE_SOLVER_BASE_H + +namespace Eigen { + +namespace internal { + +template +struct is_ref_compatible_impl +{ +private: + template + struct any_conversion + { + template any_conversion(const volatile T&); + template any_conversion(T&); + }; + struct yes {int a[1];}; + struct no {int a[2];}; + + template + static yes test(const Ref&, int); + template + static no test(any_conversion, ...); + +public: + static MatrixType ms_from; + enum { value = sizeof(test(ms_from, 0))==sizeof(yes) }; +}; + +template +struct is_ref_compatible +{ + enum { value = is_ref_compatible_impl::type>::value }; +}; + +template::value> +class generic_matrix_wrapper; + +// We have an explicit matrix at hand, compatible with Ref<> +template +class generic_matrix_wrapper +{ +public: + typedef Ref ActualMatrixType; + template struct ConstSelfAdjointViewReturnType { + typedef typename ActualMatrixType::template ConstSelfAdjointViewReturnType::Type Type; + }; + + enum { + MatrixFree = false + }; + + generic_matrix_wrapper() + : m_dummy(0,0), m_matrix(m_dummy) + {} + + template + generic_matrix_wrapper(const InputType &mat) + : m_matrix(mat) + {} + + const ActualMatrixType& matrix() const + { + return m_matrix; + } + + template + void grab(const EigenBase &mat) + { + m_matrix.~Ref(); + ::new (&m_matrix) Ref(mat.derived()); + } + + void grab(const Ref &mat) + { + if(&(mat.derived()) != &m_matrix) + { + m_matrix.~Ref(); + ::new (&m_matrix) Ref(mat); + } + } + +protected: + MatrixType m_dummy; // used to default initialize the Ref<> object + ActualMatrixType m_matrix; +}; + +// MatrixType is not compatible with Ref<> -> matrix-free wrapper +template +class generic_matrix_wrapper +{ +public: + typedef MatrixType ActualMatrixType; + template struct ConstSelfAdjointViewReturnType + { + typedef ActualMatrixType Type; + }; + + enum { + MatrixFree = true + }; + + generic_matrix_wrapper() + : mp_matrix(0) + {} + + generic_matrix_wrapper(const MatrixType &mat) + : mp_matrix(&mat) + {} + + const ActualMatrixType& matrix() const + { + return *mp_matrix; + } + + void grab(const MatrixType &mat) + { + mp_matrix = &mat; + } + +protected: + const ActualMatrixType *mp_matrix; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief Base class for linear iterative solvers + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename Derived> +class IterativeSolverBase : public SparseSolverBase +{ +protected: + typedef SparseSolverBase Base; + using Base::m_isInitialized; + +public: + typedef typename internal::traits::MatrixType MatrixType; + typedef typename internal::traits::Preconditioner Preconditioner; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef typename MatrixType::RealScalar RealScalar; + + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + +public: + + using Base::derived; + + /** Default constructor. */ + IterativeSolverBase() + { + init(); + } + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit IterativeSolverBase(const EigenBase& A) + : m_matrixWrapper(A.derived()) + { + init(); + compute(matrix()); + } + + ~IterativeSolverBase() {} + + /** Initializes the iterative solver for the sparsity pattern of the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly calls analyzePattern on the preconditioner. In the future + * we might, for instance, implement column reordering for faster matrix vector products. + */ + template + Derived& analyzePattern(const EigenBase& A) + { + grab(A.derived()); + m_preconditioner.analyzePattern(matrix()); + m_isInitialized = true; + m_analysisIsOk = true; + m_info = m_preconditioner.info(); + return derived(); + } + + /** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly calls factorize on the preconditioner. + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + Derived& factorize(const EigenBase& A) + { + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + grab(A.derived()); + m_preconditioner.factorize(matrix()); + m_factorizationIsOk = true; + m_info = m_preconditioner.info(); + return derived(); + } + + /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly initializes/computes the preconditioner. In the future + * we might, for instance, implement column reordering for faster matrix vector products. + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + Derived& compute(const EigenBase& A) + { + grab(A.derived()); + m_preconditioner.compute(matrix()); + m_isInitialized = true; + m_analysisIsOk = true; + m_factorizationIsOk = true; + m_info = m_preconditioner.info(); + return derived(); + } + + /** \internal */ + Index rows() const { return matrix().rows(); } + + /** \internal */ + Index cols() const { return matrix().cols(); } + + /** \returns the tolerance threshold used by the stopping criteria. + * \sa setTolerance() + */ + RealScalar tolerance() const { return m_tolerance; } + + /** Sets the tolerance threshold used by the stopping criteria. + * + * This value is used as an upper bound to the relative residual error: |Ax-b|/|b|. + * The default value is the machine precision given by NumTraits::epsilon() + */ + Derived& setTolerance(const RealScalar& tolerance) + { + m_tolerance = tolerance; + return derived(); + } + + /** \returns a read-write reference to the preconditioner for custom configuration. */ + Preconditioner& preconditioner() { return m_preconditioner; } + + /** \returns a read-only reference to the preconditioner. */ + const Preconditioner& preconditioner() const { return m_preconditioner; } + + /** \returns the max number of iterations. + * It is either the value setted by setMaxIterations or, by default, + * twice the number of columns of the matrix. + */ + Index maxIterations() const + { + return (m_maxIterations<0) ? 2*matrix().cols() : m_maxIterations; + } + + /** Sets the max number of iterations. + * Default is twice the number of columns of the matrix. + */ + Derived& setMaxIterations(Index maxIters) + { + m_maxIterations = maxIters; + return derived(); + } + + /** \returns the number of iterations performed during the last solve */ + Index iterations() const + { + eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); + return m_iterations; + } + + /** \returns the tolerance error reached during the last solve. + * It is a close approximation of the true relative residual error |Ax-b|/|b|. + */ + RealScalar error() const + { + eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); + return m_error; + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A + * and \a x0 as an initial solution. + * + * \sa solve(), compute() + */ + template + inline const SolveWithGuess + solveWithGuess(const MatrixBase& b, const Guess& x0) const + { + eigen_assert(m_isInitialized && "Solver is not initialized."); + eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); + return SolveWithGuess(derived(), b.derived(), x0); + } + + /** \returns Success if the iterations converged, and NoConvergence otherwise. */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); + return m_info; + } + + /** \internal */ + template + void _solve_impl(const Rhs& b, SparseMatrixBase &aDest) const + { + eigen_assert(rows()==b.rows()); + + Index rhsCols = b.cols(); + Index size = b.rows(); + DestDerived& dest(aDest.derived()); + typedef typename DestDerived::Scalar DestScalar; + Eigen::Matrix tb(size); + Eigen::Matrix tx(cols()); + // We do not directly fill dest because sparse expressions have to be free of aliasing issue. + // For non square least-square problems, b and dest might not have the same size whereas they might alias each-other. + typename DestDerived::PlainObject tmp(cols(),rhsCols); + for(Index k=0; k::epsilon(); + } + + typedef internal::generic_matrix_wrapper MatrixWrapper; + typedef typename MatrixWrapper::ActualMatrixType ActualMatrixType; + + const ActualMatrixType& matrix() const + { + return m_matrixWrapper.matrix(); + } + + template + void grab(const InputType &A) + { + m_matrixWrapper.grab(A); + } + + MatrixWrapper m_matrixWrapper; + Preconditioner m_preconditioner; + + Index m_maxIterations; + RealScalar m_tolerance; + + mutable RealScalar m_error; + mutable Index m_iterations; + mutable ComputationInfo m_info; + mutable bool m_analysisIsOk, m_factorizationIsOk; +}; + +} // end namespace Eigen + +#endif // EIGEN_ITERATIVE_SOLVER_BASE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h new file mode 100644 index 000000000..0aea0e099 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h @@ -0,0 +1,216 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H +#define EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H + +namespace Eigen { + +namespace internal { + +/** \internal Low-level conjugate gradient algorithm for least-square problems + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of A'Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + */ +template +EIGEN_DONT_INLINE +void least_square_conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, Index& iters, + typename Dest::RealScalar& tol_error) +{ + using std::sqrt; + using std::abs; + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + + RealScalar tol = tol_error; + Index maxIters = iters; + + Index m = mat.rows(), n = mat.cols(); + + VectorType residual = rhs - mat * x; + VectorType normal_residual = mat.adjoint() * residual; + + RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm(); + if(rhsNorm2 == 0) + { + x.setZero(); + iters = 0; + tol_error = 0; + return; + } + RealScalar threshold = tol*tol*rhsNorm2; + RealScalar residualNorm2 = normal_residual.squaredNorm(); + if (residualNorm2 < threshold) + { + iters = 0; + tol_error = sqrt(residualNorm2 / rhsNorm2); + return; + } + + VectorType p(n); + p = precond.solve(normal_residual); // initial search direction + + VectorType z(n), tmp(m); + RealScalar absNew = numext::real(normal_residual.dot(p)); // the square of the absolute value of r scaled by invM + Index i = 0; + while(i < maxIters) + { + tmp.noalias() = mat * p; + + Scalar alpha = absNew / tmp.squaredNorm(); // the amount we travel on dir + x += alpha * p; // update solution + residual -= alpha * tmp; // update residual + normal_residual = mat.adjoint() * residual; // update residual of the normal equation + + residualNorm2 = normal_residual.squaredNorm(); + if(residualNorm2 < threshold) + break; + + z = precond.solve(normal_residual); // approximately solve for "A'A z = normal_residual" + + RealScalar absOld = absNew; + absNew = numext::real(normal_residual.dot(z)); // update the absolute value of r + RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction + p = z + beta * p; // update search direction + i++; + } + tol_error = sqrt(residualNorm2 / rhsNorm2); + iters = i; +} + +} + +template< typename _MatrixType, + typename _Preconditioner = LeastSquareDiagonalPreconditioner > +class LeastSquaresConjugateGradient; + +namespace internal { + +template< typename _MatrixType, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A conjugate gradient solver for sparse (or dense) least-square problems + * + * This class allows to solve for A x = b linear problems using an iterative conjugate gradient algorithm. + * The matrix A can be non symmetric and rectangular, but the matrix A' A should be positive-definite to guaranty stability. + * Otherwise, the SparseLU or SparseQR classes might be preferable. + * The matrix A and the vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. + * \tparam _Preconditioner the type of the preconditioner. Default is LeastSquareDiagonalPreconditioner + * + * \implsparsesolverconcept + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + \code + int m=1000000, n = 10000; + VectorXd x(n), b(m); + SparseMatrix A(m,n); + // fill A and b + LeastSquaresConjugateGradient > lscg; + lscg.compute(A); + x = lscg.solve(b); + std::cout << "#iterations: " << lscg.iterations() << std::endl; + std::cout << "estimated error: " << lscg.error() << std::endl; + // update b, and solve again + x = lscg.solve(b); + \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. + * + * \sa class ConjugateGradient, SparseLU, SparseQR + */ +template< typename _MatrixType, typename _Preconditioner> +class LeastSquaresConjugateGradient : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + +public: + + /** Default constructor. */ + LeastSquaresConjugateGradient() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit LeastSquaresConjugateGradient(const EigenBase& A) : Base(A.derived()) {} + + ~LeastSquaresConjugateGradient() {} + + /** \internal */ + template + void _solve_with_guess_impl(const Rhs& b, Dest& x) const + { + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + + for(Index j=0; j + void _solve_impl(const MatrixBase& b, Dest& x) const + { + x.setZero(); + _solve_with_guess_impl(b.derived(),x); + } + +}; + +} // end namespace Eigen + +#endif // EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h new file mode 100644 index 000000000..0ace45177 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h @@ -0,0 +1,115 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SOLVEWITHGUESS_H +#define EIGEN_SOLVEWITHGUESS_H + +namespace Eigen { + +template class SolveWithGuess; + +/** \class SolveWithGuess + * \ingroup IterativeLinearSolvers_Module + * + * \brief Pseudo expression representing a solving operation + * + * \tparam Decomposition the type of the matrix or decomposion object + * \tparam Rhstype the type of the right-hand side + * + * This class represents an expression of A.solve(B) + * and most of the time this is the only way it is used. + * + */ +namespace internal { + + +template +struct traits > + : traits > +{}; + +} + + +template +class SolveWithGuess : public internal::generic_xpr_base, MatrixXpr, typename internal::traits::StorageKind>::type +{ +public: + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::PlainObject PlainObject; + typedef typename internal::generic_xpr_base, MatrixXpr, typename internal::traits::StorageKind>::type Base; + typedef typename internal::ref_selector::type Nested; + + SolveWithGuess(const Decomposition &dec, const RhsType &rhs, const GuessType &guess) + : m_dec(dec), m_rhs(rhs), m_guess(guess) + {} + + EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); } + EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); } + + EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; } + EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; } + EIGEN_DEVICE_FUNC const GuessType& guess() const { return m_guess; } + +protected: + const Decomposition &m_dec; + const RhsType &m_rhs; + const GuessType &m_guess; + +private: + Scalar coeff(Index row, Index col) const; + Scalar coeff(Index i) const; +}; + +namespace internal { + +// Evaluator of SolveWithGuess -> eval into a temporary +template +struct evaluator > + : public evaluator::PlainObject> +{ + typedef SolveWithGuess SolveType; + typedef typename SolveType::PlainObject PlainObject; + typedef evaluator Base; + + evaluator(const SolveType& solve) + : m_result(solve.rows(), solve.cols()) + { + ::new (static_cast(this)) Base(m_result); + m_result = solve.guess(); + solve.dec()._solve_with_guess_impl(solve.rhs(), m_result); + } + +protected: + PlainObject m_result; +}; + +// Specialization for "dst = dec.solveWithGuess(rhs)" +// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere +template +struct Assignment, internal::assign_op, Dense2Dense> +{ + typedef SolveWithGuess SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + dst = src.guess(); + src.dec()._solve_with_guess_impl(src.rhs(), dst/*, src.guess()*/); + } +}; + +} // end namepsace internal + +} // end namespace Eigen + +#endif // EIGEN_SOLVEWITHGUESS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Jacobi/Jacobi.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Jacobi/Jacobi.h new file mode 100644 index 000000000..1998c6322 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/Jacobi/Jacobi.h @@ -0,0 +1,462 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_JACOBI_H +#define EIGEN_JACOBI_H + +namespace Eigen { + +/** \ingroup Jacobi_Module + * \jacobi_module + * \class JacobiRotation + * \brief Rotation given by a cosine-sine pair. + * + * This class represents a Jacobi or Givens rotation. + * This is a 2D rotation in the plane \c J of angle \f$ \theta \f$ defined by + * its cosine \c c and sine \c s as follow: + * \f$ J = \left ( \begin{array}{cc} c & \overline s \\ -s & \overline c \end{array} \right ) \f$ + * + * You can apply the respective counter-clockwise rotation to a column vector \c v by + * applying its adjoint on the left: \f$ v = J^* v \f$ that translates to the following Eigen code: + * \code + * v.applyOnTheLeft(J.adjoint()); + * \endcode + * + * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() + */ +template class JacobiRotation +{ + public: + typedef typename NumTraits::Real RealScalar; + + /** Default constructor without any initialization. */ + JacobiRotation() {} + + /** Construct a planar rotation from a cosine-sine pair (\a c, \c s). */ + JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {} + + Scalar& c() { return m_c; } + Scalar c() const { return m_c; } + Scalar& s() { return m_s; } + Scalar s() const { return m_s; } + + /** Concatenates two planar rotation */ + JacobiRotation operator*(const JacobiRotation& other) + { + using numext::conj; + return JacobiRotation(m_c * other.m_c - conj(m_s) * other.m_s, + conj(m_c * conj(other.m_s) + conj(m_s) * conj(other.m_c))); + } + + /** Returns the transposed transformation */ + JacobiRotation transpose() const { using numext::conj; return JacobiRotation(m_c, -conj(m_s)); } + + /** Returns the adjoint transformation */ + JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); } + + template + bool makeJacobi(const MatrixBase&, Index p, Index q); + bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z); + + void makeGivens(const Scalar& p, const Scalar& q, Scalar* r=0); + + protected: + void makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type); + void makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type); + + Scalar m_c, m_s; +}; + +/** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix + * \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$ + * + * \sa MatrixBase::makeJacobi(const MatrixBase&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() + */ +template +bool JacobiRotation::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z) +{ + using std::sqrt; + using std::abs; + RealScalar deno = RealScalar(2)*abs(y); + if(deno < (std::numeric_limits::min)()) + { + m_c = Scalar(1); + m_s = Scalar(0); + return false; + } + else + { + RealScalar tau = (x-z)/deno; + RealScalar w = sqrt(numext::abs2(tau) + RealScalar(1)); + RealScalar t; + if(tau>RealScalar(0)) + { + t = RealScalar(1) / (tau + w); + } + else + { + t = RealScalar(1) / (tau - w); + } + RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1); + RealScalar n = RealScalar(1) / sqrt(numext::abs2(t)+RealScalar(1)); + m_s = - sign_t * (numext::conj(y) / abs(y)) * abs(t) * n; + m_c = n; + return true; + } +} + +/** Makes \c *this as a Jacobi rotation \c J such that applying \a J on both the right and left sides of the 2x2 selfadjoint matrix + * \f$ B = \left ( \begin{array}{cc} \text{this}_{pp} & \text{this}_{pq} \\ (\text{this}_{pq})^* & \text{this}_{qq} \end{array} \right )\f$ yields + * a diagonal matrix \f$ A = J^* B J \f$ + * + * Example: \include Jacobi_makeJacobi.cpp + * Output: \verbinclude Jacobi_makeJacobi.out + * + * \sa JacobiRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() + */ +template +template +inline bool JacobiRotation::makeJacobi(const MatrixBase& m, Index p, Index q) +{ + return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q))); +} + +/** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector + * \f$ V = \left ( \begin{array}{c} p \\ q \end{array} \right )\f$ yields: + * \f$ G^* V = \left ( \begin{array}{c} r \\ 0 \end{array} \right )\f$. + * + * The value of \a r is returned if \a r is not null (the default is null). + * Also note that G is built such that the cosine is always real. + * + * Example: \include Jacobi_makeGivens.cpp + * Output: \verbinclude Jacobi_makeGivens.out + * + * This function implements the continuous Givens rotation generation algorithm + * found in Anderson (2000), Discontinuous Plane Rotations and the Symmetric Eigenvalue Problem. + * LAPACK Working Note 150, University of Tennessee, UT-CS-00-454, December 4, 2000. + * + * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() + */ +template +void JacobiRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* r) +{ + makeGivens(p, q, r, typename internal::conditional::IsComplex, internal::true_type, internal::false_type>::type()); +} + + +// specialization for complexes +template +void JacobiRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type) +{ + using std::sqrt; + using std::abs; + using numext::conj; + + if(q==Scalar(0)) + { + m_c = numext::real(p)<0 ? Scalar(-1) : Scalar(1); + m_s = 0; + if(r) *r = m_c * p; + } + else if(p==Scalar(0)) + { + m_c = 0; + m_s = -q/abs(q); + if(r) *r = abs(q); + } + else + { + RealScalar p1 = numext::norm1(p); + RealScalar q1 = numext::norm1(q); + if(p1>=q1) + { + Scalar ps = p / p1; + RealScalar p2 = numext::abs2(ps); + Scalar qs = q / p1; + RealScalar q2 = numext::abs2(qs); + + RealScalar u = sqrt(RealScalar(1) + q2/p2); + if(numext::real(p) +void JacobiRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type) +{ + using std::sqrt; + using std::abs; + if(q==Scalar(0)) + { + m_c = p abs(q)) + { + Scalar t = q/p; + Scalar u = sqrt(Scalar(1) + numext::abs2(t)); + if(p +void apply_rotation_in_the_plane(DenseBase& xpr_x, DenseBase& xpr_y, const JacobiRotation& j); +} + +/** \jacobi_module + * Applies the rotation in the plane \a j to the rows \a p and \a q of \c *this, i.e., it computes B = J * B, + * with \f$ B = \left ( \begin{array}{cc} \text{*this.row}(p) \\ \text{*this.row}(q) \end{array} \right ) \f$. + * + * \sa class JacobiRotation, MatrixBase::applyOnTheRight(), internal::apply_rotation_in_the_plane() + */ +template +template +inline void MatrixBase::applyOnTheLeft(Index p, Index q, const JacobiRotation& j) +{ + RowXpr x(this->row(p)); + RowXpr y(this->row(q)); + internal::apply_rotation_in_the_plane(x, y, j); +} + +/** \ingroup Jacobi_Module + * Applies the rotation in the plane \a j to the columns \a p and \a q of \c *this, i.e., it computes B = B * J + * with \f$ B = \left ( \begin{array}{cc} \text{*this.col}(p) & \text{*this.col}(q) \end{array} \right ) \f$. + * + * \sa class JacobiRotation, MatrixBase::applyOnTheLeft(), internal::apply_rotation_in_the_plane() + */ +template +template +inline void MatrixBase::applyOnTheRight(Index p, Index q, const JacobiRotation& j) +{ + ColXpr x(this->col(p)); + ColXpr y(this->col(q)); + internal::apply_rotation_in_the_plane(x, y, j.transpose()); +} + +namespace internal { + +template +struct apply_rotation_in_the_plane_selector +{ + static inline void run(Scalar *x, Index incrx, Scalar *y, Index incry, Index size, OtherScalar c, OtherScalar s) + { + for(Index i=0; i +struct apply_rotation_in_the_plane_selector +{ + static inline void run(Scalar *x, Index incrx, Scalar *y, Index incry, Index size, OtherScalar c, OtherScalar s) + { + enum { + PacketSize = packet_traits::size, + OtherPacketSize = packet_traits::size + }; + typedef typename packet_traits::type Packet; + typedef typename packet_traits::type OtherPacket; + + /*** dynamic-size vectorized paths ***/ + if(SizeAtCompileTime == Dynamic && ((incrx==1 && incry==1) || PacketSize == 1)) + { + // both vectors are sequentially stored in memory => vectorization + enum { Peeling = 2 }; + + Index alignedStart = internal::first_default_aligned(y, size); + Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize; + + const OtherPacket pc = pset1(c); + const OtherPacket ps = pset1(s); + conj_helper::IsComplex,false> pcj; + conj_helper pm; + + for(Index i=0; i(px); + Packet yi = pload(py); + pstore(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi))); + pstore(py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi))); + px += PacketSize; + py += PacketSize; + } + } + else + { + Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize); + for(Index i=alignedStart; i(px); + Packet xi1 = ploadu(px+PacketSize); + Packet yi = pload (py); + Packet yi1 = pload (py+PacketSize); + pstoreu(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi))); + pstoreu(px+PacketSize, padd(pm.pmul(pc,xi1),pcj.pmul(ps,yi1))); + pstore (py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi))); + pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pm.pmul(ps,xi1))); + px += Peeling*PacketSize; + py += Peeling*PacketSize; + } + if(alignedEnd!=peelingEnd) + { + Packet xi = ploadu(x+peelingEnd); + Packet yi = pload (y+peelingEnd); + pstoreu(x+peelingEnd, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi))); + pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi))); + } + } + + for(Index i=alignedEnd; i0) // FIXME should be compared to the required alignment + { + const OtherPacket pc = pset1(c); + const OtherPacket ps = pset1(s); + conj_helper::IsComplex,false> pcj; + conj_helper pm; + Scalar* EIGEN_RESTRICT px = x; + Scalar* EIGEN_RESTRICT py = y; + for(Index i=0; i(px); + Packet yi = pload(py); + pstore(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi))); + pstore(py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi))); + px += PacketSize; + py += PacketSize; + } + } + + /*** non-vectorized path ***/ + else + { + apply_rotation_in_the_plane_selector::run(x,incrx,y,incry,size,c,s); + } + } +}; + +template +void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(DenseBase& xpr_x, DenseBase& xpr_y, const JacobiRotation& j) +{ + typedef typename VectorX::Scalar Scalar; + const bool Vectorizable = (VectorX::Flags & VectorY::Flags & PacketAccessBit) + && (int(packet_traits::size) == int(packet_traits::size)); + + eigen_assert(xpr_x.size() == xpr_y.size()); + Index size = xpr_x.size(); + Index incrx = xpr_x.derived().innerStride(); + Index incry = xpr_y.derived().innerStride(); + + Scalar* EIGEN_RESTRICT x = &xpr_x.derived().coeffRef(0); + Scalar* EIGEN_RESTRICT y = &xpr_y.derived().coeffRef(0); + + OtherScalar c = j.c(); + OtherScalar s = j.s(); + if (c==OtherScalar(1) && s==OtherScalar(0)) + return; + + apply_rotation_in_the_plane_selector< + Scalar,OtherScalar, + VectorX::SizeAtCompileTime, + EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment, evaluator::Alignment), + Vectorizable>::run(x,incrx,y,incry,size,c,s); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_JACOBI_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/Determinant.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/Determinant.h new file mode 100644 index 000000000..d6a3c1e5a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/Determinant.h @@ -0,0 +1,101 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DETERMINANT_H +#define EIGEN_DETERMINANT_H + +namespace Eigen { + +namespace internal { + +template +inline const typename Derived::Scalar bruteforce_det3_helper +(const MatrixBase& matrix, int a, int b, int c) +{ + return matrix.coeff(0,a) + * (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b)); +} + +template +const typename Derived::Scalar bruteforce_det4_helper +(const MatrixBase& matrix, int j, int k, int m, int n) +{ + return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1)) + * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3)); +} + +template struct determinant_impl +{ + static inline typename traits::Scalar run(const Derived& m) + { + if(Derived::ColsAtCompileTime==Dynamic && m.rows()==0) + return typename traits::Scalar(1); + return m.partialPivLu().determinant(); + } +}; + +template struct determinant_impl +{ + static inline typename traits::Scalar run(const Derived& m) + { + return m.coeff(0,0); + } +}; + +template struct determinant_impl +{ + static inline typename traits::Scalar run(const Derived& m) + { + return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1); + } +}; + +template struct determinant_impl +{ + static inline typename traits::Scalar run(const Derived& m) + { + return bruteforce_det3_helper(m,0,1,2) + - bruteforce_det3_helper(m,1,0,2) + + bruteforce_det3_helper(m,2,0,1); + } +}; + +template struct determinant_impl +{ + static typename traits::Scalar run(const Derived& m) + { + // trick by Martin Costabel to compute 4x4 det with only 30 muls + return bruteforce_det4_helper(m,0,1,2,3) + - bruteforce_det4_helper(m,0,2,1,3) + + bruteforce_det4_helper(m,0,3,1,2) + + bruteforce_det4_helper(m,1,2,0,3) + - bruteforce_det4_helper(m,1,3,0,2) + + bruteforce_det4_helper(m,2,3,0,1); + } +}; + +} // end namespace internal + +/** \lu_module + * + * \returns the determinant of this matrix + */ +template +inline typename internal::traits::Scalar MatrixBase::determinant() const +{ + eigen_assert(rows() == cols()); + typedef typename internal::nested_eval::type Nested; + return internal::determinant_impl::type>::run(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_DETERMINANT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/FullPivLU.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/FullPivLU.h new file mode 100644 index 000000000..03b6af706 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/FullPivLU.h @@ -0,0 +1,891 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LU_H +#define EIGEN_LU_H + +namespace Eigen { + +namespace internal { +template struct traits > + : traits<_MatrixType> +{ + typedef MatrixXpr XprKind; + typedef SolverStorage StorageKind; + enum { Flags = 0 }; +}; + +} // end namespace internal + +/** \ingroup LU_Module + * + * \class FullPivLU + * + * \brief LU decomposition of a matrix with complete pivoting, and related features + * + * \tparam _MatrixType the type of the matrix of which we are computing the LU decomposition + * + * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is + * decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is + * upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU + * decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any + * zeros are at the end. + * + * This decomposition provides the generic approach to solving systems of linear equations, computing + * the rank, invertibility, inverse, kernel, and determinant. + * + * This LU decomposition is very stable and well tested with large matrices. However there are use cases where the SVD + * decomposition is inherently more stable and/or flexible. For example, when computing the kernel of a matrix, + * working with the SVD allows to select the smallest singular values of the matrix, something that + * the LU decomposition doesn't see. + * + * The data of the LU decomposition can be directly accessed through the methods matrixLU(), + * permutationP(), permutationQ(). + * + * As an exemple, here is how the original matrix can be retrieved: + * \include class_FullPivLU.cpp + * Output: \verbinclude class_FullPivLU.out + * + * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. + * + * \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse() + */ +template class FullPivLU + : public SolverBase > +{ + public: + typedef _MatrixType MatrixType; + typedef SolverBase Base; + + EIGEN_GENERIC_PUBLIC_INTERFACE(FullPivLU) + // FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int + enum { + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + typedef typename internal::plain_row_type::type IntRowVectorType; + typedef typename internal::plain_col_type::type IntColVectorType; + typedef PermutationMatrix PermutationQType; + typedef PermutationMatrix PermutationPType; + typedef typename MatrixType::PlainObject PlainObject; + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via LU::compute(const MatrixType&). + */ + FullPivLU(); + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa FullPivLU() + */ + FullPivLU(Index rows, Index cols); + + /** Constructor. + * + * \param matrix the matrix of which to compute the LU decomposition. + * It is required to be nonzero. + */ + template + explicit FullPivLU(const EigenBase& matrix); + + /** \brief Constructs a LU factorization from a given matrix + * + * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. + * + * \sa FullPivLU(const EigenBase&) + */ + template + explicit FullPivLU(EigenBase& matrix); + + /** Computes the LU decomposition of the given matrix. + * + * \param matrix the matrix of which to compute the LU decomposition. + * It is required to be nonzero. + * + * \returns a reference to *this + */ + template + FullPivLU& compute(const EigenBase& matrix) { + m_lu = matrix.derived(); + computeInPlace(); + return *this; + } + + /** \returns the LU decomposition matrix: the upper-triangular part is U, the + * unit-lower-triangular part is L (at least for square matrices; in the non-square + * case, special care is needed, see the documentation of class FullPivLU). + * + * \sa matrixL(), matrixU() + */ + inline const MatrixType& matrixLU() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return m_lu; + } + + /** \returns the number of nonzero pivots in the LU decomposition. + * Here nonzero is meant in the exact sense, not in a fuzzy sense. + * So that notion isn't really intrinsically interesting, but it is + * still useful when implementing algorithms. + * + * \sa rank() + */ + inline Index nonzeroPivots() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return m_nonzero_pivots; + } + + /** \returns the absolute value of the biggest pivot, i.e. the biggest + * diagonal coefficient of U. + */ + RealScalar maxPivot() const { return m_maxpivot; } + + /** \returns the permutation matrix P + * + * \sa permutationQ() + */ + EIGEN_DEVICE_FUNC inline const PermutationPType& permutationP() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return m_p; + } + + /** \returns the permutation matrix Q + * + * \sa permutationP() + */ + inline const PermutationQType& permutationQ() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return m_q; + } + + /** \returns the kernel of the matrix, also called its null-space. The columns of the returned matrix + * will form a basis of the kernel. + * + * \note If the kernel has dimension zero, then the returned matrix is a column-vector filled with zeros. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + * + * Example: \include FullPivLU_kernel.cpp + * Output: \verbinclude FullPivLU_kernel.out + * + * \sa image() + */ + inline const internal::kernel_retval kernel() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return internal::kernel_retval(*this); + } + + /** \returns the image of the matrix, also called its column-space. The columns of the returned matrix + * will form a basis of the image (column-space). + * + * \param originalMatrix the original matrix, of which *this is the LU decomposition. + * The reason why it is needed to pass it here, is that this allows + * a large optimization, as otherwise this method would need to reconstruct it + * from the LU decomposition. + * + * \note If the image has dimension zero, then the returned matrix is a column-vector filled with zeros. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + * + * Example: \include FullPivLU_image.cpp + * Output: \verbinclude FullPivLU_image.out + * + * \sa kernel() + */ + inline const internal::image_retval + image(const MatrixType& originalMatrix) const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return internal::image_retval(*this, originalMatrix); + } + + /** \return a solution x to the equation Ax=b, where A is the matrix of which + * *this is the LU decomposition. + * + * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix, + * the only requirement in order for the equation to make sense is that + * b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition. + * + * \returns a solution. + * + * \note_about_checking_solutions + * + * \note_about_arbitrary_choice_of_solution + * \note_about_using_kernel_to_study_multiple_solutions + * + * Example: \include FullPivLU_solve.cpp + * Output: \verbinclude FullPivLU_solve.out + * + * \sa TriangularView::solve(), kernel(), inverse() + */ + // FIXME this is a copy-paste of the base-class member to add the isInitialized assertion. + template + inline const Solve + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return Solve(*this, b.derived()); + } + + /** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is + the LU decomposition. + */ + inline RealScalar rcond() const + { + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return internal::rcond_estimate_helper(m_l1_norm, *this); + } + + /** \returns the determinant of the matrix of which + * *this is the LU decomposition. It has only linear complexity + * (that is, O(n) where n is the dimension of the square matrix) + * as the LU decomposition has already been computed. + * + * \note This is only for square matrices. + * + * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers + * optimized paths. + * + * \warning a determinant can be very big or small, so for matrices + * of large enough dimension, there is a risk of overflow/underflow. + * + * \sa MatrixBase::determinant() + */ + typename internal::traits::Scalar determinant() const; + + /** Allows to prescribe a threshold to be used by certain methods, such as rank(), + * who need to determine when pivots are to be considered nonzero. This is not used for the + * LU decomposition itself. + * + * When it needs to get the threshold value, Eigen calls threshold(). By default, this + * uses a formula to automatically determine a reasonable threshold. + * Once you have called the present method setThreshold(const RealScalar&), + * your value is used instead. + * + * \param threshold The new value to use as the threshold. + * + * A pivot will be considered nonzero if its absolute value is strictly greater than + * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$ + * where maxpivot is the biggest pivot. + * + * If you want to come back to the default behavior, call setThreshold(Default_t) + */ + FullPivLU& setThreshold(const RealScalar& threshold) + { + m_usePrescribedThreshold = true; + m_prescribedThreshold = threshold; + return *this; + } + + /** Allows to come back to the default behavior, letting Eigen use its default formula for + * determining the threshold. + * + * You should pass the special object Eigen::Default as parameter here. + * \code lu.setThreshold(Eigen::Default); \endcode + * + * See the documentation of setThreshold(const RealScalar&). + */ + FullPivLU& setThreshold(Default_t) + { + m_usePrescribedThreshold = false; + return *this; + } + + /** Returns the threshold that will be used by certain methods such as rank(). + * + * See the documentation of setThreshold(const RealScalar&). + */ + RealScalar threshold() const + { + eigen_assert(m_isInitialized || m_usePrescribedThreshold); + return m_usePrescribedThreshold ? m_prescribedThreshold + // this formula comes from experimenting (see "LU precision tuning" thread on the list) + // and turns out to be identical to Higham's formula used already in LDLt. + : NumTraits::epsilon() * m_lu.diagonalSize(); + } + + /** \returns the rank of the matrix of which *this is the LU decomposition. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline Index rank() const + { + using std::abs; + eigen_assert(m_isInitialized && "LU is not initialized."); + RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); + Index result = 0; + for(Index i = 0; i < m_nonzero_pivots; ++i) + result += (abs(m_lu.coeff(i,i)) > premultiplied_threshold); + return result; + } + + /** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline Index dimensionOfKernel() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return cols() - rank(); + } + + /** \returns true if the matrix of which *this is the LU decomposition represents an injective + * linear map, i.e. has trivial kernel; false otherwise. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline bool isInjective() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return rank() == cols(); + } + + /** \returns true if the matrix of which *this is the LU decomposition represents a surjective + * linear map; false otherwise. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline bool isSurjective() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return rank() == rows(); + } + + /** \returns true if the matrix of which *this is the LU decomposition is invertible. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline bool isInvertible() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return isInjective() && (m_lu.rows() == m_lu.cols()); + } + + /** \returns the inverse of the matrix of which *this is the LU decomposition. + * + * \note If this matrix is not invertible, the returned matrix has undefined coefficients. + * Use isInvertible() to first determine whether this matrix is invertible. + * + * \sa MatrixBase::inverse() + */ + inline const Inverse inverse() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!"); + return Inverse(*this); + } + + MatrixType reconstructedMatrix() const; + + EIGEN_DEVICE_FUNC inline Index rows() const { return m_lu.rows(); } + EIGEN_DEVICE_FUNC inline Index cols() const { return m_lu.cols(); } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC + void _solve_impl(const RhsType &rhs, DstType &dst) const; + + template + EIGEN_DEVICE_FUNC + void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const; + #endif + + protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + void computeInPlace(); + + MatrixType m_lu; + PermutationPType m_p; + PermutationQType m_q; + IntColVectorType m_rowsTranspositions; + IntRowVectorType m_colsTranspositions; + Index m_nonzero_pivots; + RealScalar m_l1_norm; + RealScalar m_maxpivot, m_prescribedThreshold; + signed char m_det_pq; + bool m_isInitialized, m_usePrescribedThreshold; +}; + +template +FullPivLU::FullPivLU() + : m_isInitialized(false), m_usePrescribedThreshold(false) +{ +} + +template +FullPivLU::FullPivLU(Index rows, Index cols) + : m_lu(rows, cols), + m_p(rows), + m_q(cols), + m_rowsTranspositions(rows), + m_colsTranspositions(cols), + m_isInitialized(false), + m_usePrescribedThreshold(false) +{ +} + +template +template +FullPivLU::FullPivLU(const EigenBase& matrix) + : m_lu(matrix.rows(), matrix.cols()), + m_p(matrix.rows()), + m_q(matrix.cols()), + m_rowsTranspositions(matrix.rows()), + m_colsTranspositions(matrix.cols()), + m_isInitialized(false), + m_usePrescribedThreshold(false) +{ + compute(matrix.derived()); +} + +template +template +FullPivLU::FullPivLU(EigenBase& matrix) + : m_lu(matrix.derived()), + m_p(matrix.rows()), + m_q(matrix.cols()), + m_rowsTranspositions(matrix.rows()), + m_colsTranspositions(matrix.cols()), + m_isInitialized(false), + m_usePrescribedThreshold(false) +{ + computeInPlace(); +} + +template +void FullPivLU::computeInPlace() +{ + check_template_parameters(); + + // the permutations are stored as int indices, so just to be sure: + eigen_assert(m_lu.rows()<=NumTraits::highest() && m_lu.cols()<=NumTraits::highest()); + + m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff(); + + const Index size = m_lu.diagonalSize(); + const Index rows = m_lu.rows(); + const Index cols = m_lu.cols(); + + // will store the transpositions, before we accumulate them at the end. + // can't accumulate on-the-fly because that will be done in reverse order for the rows. + m_rowsTranspositions.resize(m_lu.rows()); + m_colsTranspositions.resize(m_lu.cols()); + Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i + + m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) + m_maxpivot = RealScalar(0); + + for(Index k = 0; k < size; ++k) + { + // First, we need to find the pivot. + + // biggest coefficient in the remaining bottom-right corner (starting at row k, col k) + Index row_of_biggest_in_corner, col_of_biggest_in_corner; + typedef internal::scalar_score_coeff_op Scoring; + typedef typename Scoring::result_type Score; + Score biggest_in_corner; + biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k) + .unaryExpr(Scoring()) + .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner); + row_of_biggest_in_corner += k; // correct the values! since they were computed in the corner, + col_of_biggest_in_corner += k; // need to add k to them. + + if(biggest_in_corner==Score(0)) + { + // before exiting, make sure to initialize the still uninitialized transpositions + // in a sane state without destroying what we already have. + m_nonzero_pivots = k; + for(Index i = k; i < size; ++i) + { + m_rowsTranspositions.coeffRef(i) = i; + m_colsTranspositions.coeffRef(i) = i; + } + break; + } + + RealScalar abs_pivot = internal::abs_knowing_score()(m_lu(row_of_biggest_in_corner, col_of_biggest_in_corner), biggest_in_corner); + if(abs_pivot > m_maxpivot) m_maxpivot = abs_pivot; + + // Now that we've found the pivot, we need to apply the row/col swaps to + // bring it to the location (k,k). + + m_rowsTranspositions.coeffRef(k) = row_of_biggest_in_corner; + m_colsTranspositions.coeffRef(k) = col_of_biggest_in_corner; + if(k != row_of_biggest_in_corner) { + m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner)); + ++number_of_transpositions; + } + if(k != col_of_biggest_in_corner) { + m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner)); + ++number_of_transpositions; + } + + // Now that the pivot is at the right location, we update the remaining + // bottom-right corner by Gaussian elimination. + + if(k= 0; --k) + m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k)); + + m_q.setIdentity(cols); + for(Index k = 0; k < size; ++k) + m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k)); + + m_det_pq = (number_of_transpositions%2) ? -1 : 1; + + m_isInitialized = true; +} + +template +typename internal::traits::Scalar FullPivLU::determinant() const +{ + eigen_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the determinant of a non-square matrix!"); + return Scalar(m_det_pq) * Scalar(m_lu.diagonal().prod()); +} + +/** \returns the matrix represented by the decomposition, + * i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$. + * This function is provided for debug purposes. */ +template +MatrixType FullPivLU::reconstructedMatrix() const +{ + eigen_assert(m_isInitialized && "LU is not initialized."); + const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols()); + // LU + MatrixType res(m_lu.rows(),m_lu.cols()); + // FIXME the .toDenseMatrix() should not be needed... + res = m_lu.leftCols(smalldim) + .template triangularView().toDenseMatrix() + * m_lu.topRows(smalldim) + .template triangularView().toDenseMatrix(); + + // P^{-1}(LU) + res = m_p.inverse() * res; + + // (P^{-1}LU)Q^{-1} + res = res * m_q.inverse(); + + return res; +} + +/********* Implementation of kernel() **************************************************/ + +namespace internal { +template +struct kernel_retval > + : kernel_retval_base > +{ + EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<_MatrixType>) + + enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED( + MatrixType::MaxColsAtCompileTime, + MatrixType::MaxRowsAtCompileTime) + }; + + template void evalTo(Dest& dst) const + { + using std::abs; + const Index cols = dec().matrixLU().cols(), dimker = cols - rank(); + if(dimker == 0) + { + // The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's + // avoid crashing/asserting as that depends on floating point calculations. Let's + // just return a single column vector filled with zeros. + dst.setZero(); + return; + } + + /* Let us use the following lemma: + * + * Lemma: If the matrix A has the LU decomposition PAQ = LU, + * then Ker A = Q(Ker U). + * + * Proof: trivial: just keep in mind that P, Q, L are invertible. + */ + + /* Thus, all we need to do is to compute Ker U, and then apply Q. + * + * U is upper triangular, with eigenvalues sorted so that any zeros appear at the end. + * Thus, the diagonal of U ends with exactly + * dimKer zero's. Let us use that to construct dimKer linearly + * independent vectors in Ker U. + */ + + Matrix pivots(rank()); + RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); + Index p = 0; + for(Index i = 0; i < dec().nonzeroPivots(); ++i) + if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) + pivots.coeffRef(p++) = i; + eigen_internal_assert(p == rank()); + + // we construct a temporaty trapezoid matrix m, by taking the U matrix and + // permuting the rows and cols to bring the nonnegligible pivots to the top of + // the main diagonal. We need that to be able to apply our triangular solvers. + // FIXME when we get triangularView-for-rectangular-matrices, this can be simplified + Matrix + m(dec().matrixLU().block(0, 0, rank(), cols)); + for(Index i = 0; i < rank(); ++i) + { + if(i) m.row(i).head(i).setZero(); + m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i); + } + m.block(0, 0, rank(), rank()); + m.block(0, 0, rank(), rank()).template triangularView().setZero(); + for(Index i = 0; i < rank(); ++i) + m.col(i).swap(m.col(pivots.coeff(i))); + + // ok, we have our trapezoid matrix, we can apply the triangular solver. + // notice that the math behind this suggests that we should apply this to the + // negative of the RHS, but for performance we just put the negative sign elsewhere, see below. + m.topLeftCorner(rank(), rank()) + .template triangularView().solveInPlace( + m.topRightCorner(rank(), dimker) + ); + + // now we must undo the column permutation that we had applied! + for(Index i = rank()-1; i >= 0; --i) + m.col(i).swap(m.col(pivots.coeff(i))); + + // see the negative sign in the next line, that's what we were talking about above. + for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker); + for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero(); + for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1); + } +}; + +/***** Implementation of image() *****************************************************/ + +template +struct image_retval > + : image_retval_base > +{ + EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<_MatrixType>) + + enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED( + MatrixType::MaxColsAtCompileTime, + MatrixType::MaxRowsAtCompileTime) + }; + + template void evalTo(Dest& dst) const + { + using std::abs; + if(rank() == 0) + { + // The Image is just {0}, so it doesn't have a basis properly speaking, but let's + // avoid crashing/asserting as that depends on floating point calculations. Let's + // just return a single column vector filled with zeros. + dst.setZero(); + return; + } + + Matrix pivots(rank()); + RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); + Index p = 0; + for(Index i = 0; i < dec().nonzeroPivots(); ++i) + if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) + pivots.coeffRef(p++) = i; + eigen_internal_assert(p == rank()); + + for(Index i = 0; i < rank(); ++i) + dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i))); + } +}; + +/***** Implementation of solve() *****************************************************/ + +} // end namespace internal + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void FullPivLU<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const +{ + /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}. + * So we proceed as follows: + * Step 1: compute c = P * rhs. + * Step 2: replace c by the solution x to Lx = c. Exists because L is invertible. + * Step 3: replace c by the solution x to Ux = c. May or may not exist. + * Step 4: result = Q * c; + */ + + const Index rows = this->rows(), + cols = this->cols(), + nonzero_pivots = this->rank(); + eigen_assert(rhs.rows() == rows); + const Index smalldim = (std::min)(rows, cols); + + if(nonzero_pivots == 0) + { + dst.setZero(); + return; + } + + typename RhsType::PlainObject c(rhs.rows(), rhs.cols()); + + // Step 1 + c = permutationP() * rhs; + + // Step 2 + m_lu.topLeftCorner(smalldim,smalldim) + .template triangularView() + .solveInPlace(c.topRows(smalldim)); + if(rows>cols) + c.bottomRows(rows-cols) -= m_lu.bottomRows(rows-cols) * c.topRows(cols); + + // Step 3 + m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots) + .template triangularView() + .solveInPlace(c.topRows(nonzero_pivots)); + + // Step 4 + for(Index i = 0; i < nonzero_pivots; ++i) + dst.row(permutationQ().indices().coeff(i)) = c.row(i); + for(Index i = nonzero_pivots; i < m_lu.cols(); ++i) + dst.row(permutationQ().indices().coeff(i)).setZero(); +} + +template +template +void FullPivLU<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const +{ + /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}, + * and since permutations are real and unitary, we can write this + * as A^T = Q U^T L^T P, + * So we proceed as follows: + * Step 1: compute c = Q^T rhs. + * Step 2: replace c by the solution x to U^T x = c. May or may not exist. + * Step 3: replace c by the solution x to L^T x = c. + * Step 4: result = P^T c. + * If Conjugate is true, replace "^T" by "^*" above. + */ + + const Index rows = this->rows(), cols = this->cols(), + nonzero_pivots = this->rank(); + eigen_assert(rhs.rows() == cols); + const Index smalldim = (std::min)(rows, cols); + + if(nonzero_pivots == 0) + { + dst.setZero(); + return; + } + + typename RhsType::PlainObject c(rhs.rows(), rhs.cols()); + + // Step 1 + c = permutationQ().inverse() * rhs; + + if (Conjugate) { + // Step 2 + m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots) + .template triangularView() + .adjoint() + .solveInPlace(c.topRows(nonzero_pivots)); + // Step 3 + m_lu.topLeftCorner(smalldim, smalldim) + .template triangularView() + .adjoint() + .solveInPlace(c.topRows(smalldim)); + } else { + // Step 2 + m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots) + .template triangularView() + .transpose() + .solveInPlace(c.topRows(nonzero_pivots)); + // Step 3 + m_lu.topLeftCorner(smalldim, smalldim) + .template triangularView() + .transpose() + .solveInPlace(c.topRows(smalldim)); + } + + // Step 4 + PermutationPType invp = permutationP().inverse().eval(); + for(Index i = 0; i < smalldim; ++i) + dst.row(invp.indices().coeff(i)) = c.row(i); + for(Index i = smalldim; i < rows; ++i) + dst.row(invp.indices().coeff(i)).setZero(); +} + +#endif + +namespace internal { + + +/***** Implementation of inverse() *****************************************************/ +template +struct Assignment >, internal::assign_op::Scalar>, Dense2Dense> +{ + typedef FullPivLU LuType; + typedef Inverse SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); + } +}; +} // end namespace internal + +/******* MatrixBase methods *****************************************************************/ + +/** \lu_module + * + * \return the full-pivoting LU decomposition of \c *this. + * + * \sa class FullPivLU + */ +template +inline const FullPivLU::PlainObject> +MatrixBase::fullPivLu() const +{ + return FullPivLU(eval()); +} + +} // end namespace Eigen + +#endif // EIGEN_LU_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/InverseImpl.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/InverseImpl.h new file mode 100644 index 000000000..f49f23360 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/InverseImpl.h @@ -0,0 +1,415 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Benoit Jacob +// Copyright (C) 2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INVERSE_IMPL_H +#define EIGEN_INVERSE_IMPL_H + +namespace Eigen { + +namespace internal { + +/********************************** +*** General case implementation *** +**********************************/ + +template +struct compute_inverse +{ + EIGEN_DEVICE_FUNC + static inline void run(const MatrixType& matrix, ResultType& result) + { + result = matrix.partialPivLu().inverse(); + } +}; + +template +struct compute_inverse_and_det_with_check { /* nothing! general case not supported. */ }; + +/**************************** +*** Size 1 implementation *** +****************************/ + +template +struct compute_inverse +{ + EIGEN_DEVICE_FUNC + static inline void run(const MatrixType& matrix, ResultType& result) + { + typedef typename MatrixType::Scalar Scalar; + internal::evaluator matrixEval(matrix); + result.coeffRef(0,0) = Scalar(1) / matrixEval.coeff(0,0); + } +}; + +template +struct compute_inverse_and_det_with_check +{ + EIGEN_DEVICE_FUNC + static inline void run( + const MatrixType& matrix, + const typename MatrixType::RealScalar& absDeterminantThreshold, + ResultType& result, + typename ResultType::Scalar& determinant, + bool& invertible + ) + { + using std::abs; + determinant = matrix.coeff(0,0); + invertible = abs(determinant) > absDeterminantThreshold; + if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant; + } +}; + +/**************************** +*** Size 2 implementation *** +****************************/ + +template +EIGEN_DEVICE_FUNC +inline void compute_inverse_size2_helper( + const MatrixType& matrix, const typename ResultType::Scalar& invdet, + ResultType& result) +{ + result.coeffRef(0,0) = matrix.coeff(1,1) * invdet; + result.coeffRef(1,0) = -matrix.coeff(1,0) * invdet; + result.coeffRef(0,1) = -matrix.coeff(0,1) * invdet; + result.coeffRef(1,1) = matrix.coeff(0,0) * invdet; +} + +template +struct compute_inverse +{ + EIGEN_DEVICE_FUNC + static inline void run(const MatrixType& matrix, ResultType& result) + { + typedef typename ResultType::Scalar Scalar; + const Scalar invdet = typename MatrixType::Scalar(1) / matrix.determinant(); + compute_inverse_size2_helper(matrix, invdet, result); + } +}; + +template +struct compute_inverse_and_det_with_check +{ + EIGEN_DEVICE_FUNC + static inline void run( + const MatrixType& matrix, + const typename MatrixType::RealScalar& absDeterminantThreshold, + ResultType& inverse, + typename ResultType::Scalar& determinant, + bool& invertible + ) + { + using std::abs; + typedef typename ResultType::Scalar Scalar; + determinant = matrix.determinant(); + invertible = abs(determinant) > absDeterminantThreshold; + if(!invertible) return; + const Scalar invdet = Scalar(1) / determinant; + compute_inverse_size2_helper(matrix, invdet, inverse); + } +}; + +/**************************** +*** Size 3 implementation *** +****************************/ + +template +EIGEN_DEVICE_FUNC +inline typename MatrixType::Scalar cofactor_3x3(const MatrixType& m) +{ + enum { + i1 = (i+1) % 3, + i2 = (i+2) % 3, + j1 = (j+1) % 3, + j2 = (j+2) % 3 + }; + return m.coeff(i1, j1) * m.coeff(i2, j2) + - m.coeff(i1, j2) * m.coeff(i2, j1); +} + +template +EIGEN_DEVICE_FUNC +inline void compute_inverse_size3_helper( + const MatrixType& matrix, + const typename ResultType::Scalar& invdet, + const Matrix& cofactors_col0, + ResultType& result) +{ + result.row(0) = cofactors_col0 * invdet; + result.coeffRef(1,0) = cofactor_3x3(matrix) * invdet; + result.coeffRef(1,1) = cofactor_3x3(matrix) * invdet; + result.coeffRef(1,2) = cofactor_3x3(matrix) * invdet; + result.coeffRef(2,0) = cofactor_3x3(matrix) * invdet; + result.coeffRef(2,1) = cofactor_3x3(matrix) * invdet; + result.coeffRef(2,2) = cofactor_3x3(matrix) * invdet; +} + +template +struct compute_inverse +{ + EIGEN_DEVICE_FUNC + static inline void run(const MatrixType& matrix, ResultType& result) + { + typedef typename ResultType::Scalar Scalar; + Matrix cofactors_col0; + cofactors_col0.coeffRef(0) = cofactor_3x3(matrix); + cofactors_col0.coeffRef(1) = cofactor_3x3(matrix); + cofactors_col0.coeffRef(2) = cofactor_3x3(matrix); + const Scalar det = (cofactors_col0.cwiseProduct(matrix.col(0))).sum(); + const Scalar invdet = Scalar(1) / det; + compute_inverse_size3_helper(matrix, invdet, cofactors_col0, result); + } +}; + +template +struct compute_inverse_and_det_with_check +{ + EIGEN_DEVICE_FUNC + static inline void run( + const MatrixType& matrix, + const typename MatrixType::RealScalar& absDeterminantThreshold, + ResultType& inverse, + typename ResultType::Scalar& determinant, + bool& invertible + ) + { + using std::abs; + typedef typename ResultType::Scalar Scalar; + Matrix cofactors_col0; + cofactors_col0.coeffRef(0) = cofactor_3x3(matrix); + cofactors_col0.coeffRef(1) = cofactor_3x3(matrix); + cofactors_col0.coeffRef(2) = cofactor_3x3(matrix); + determinant = (cofactors_col0.cwiseProduct(matrix.col(0))).sum(); + invertible = abs(determinant) > absDeterminantThreshold; + if(!invertible) return; + const Scalar invdet = Scalar(1) / determinant; + compute_inverse_size3_helper(matrix, invdet, cofactors_col0, inverse); + } +}; + +/**************************** +*** Size 4 implementation *** +****************************/ + +template +EIGEN_DEVICE_FUNC +inline const typename Derived::Scalar general_det3_helper +(const MatrixBase& matrix, int i1, int i2, int i3, int j1, int j2, int j3) +{ + return matrix.coeff(i1,j1) + * (matrix.coeff(i2,j2) * matrix.coeff(i3,j3) - matrix.coeff(i2,j3) * matrix.coeff(i3,j2)); +} + +template +EIGEN_DEVICE_FUNC +inline typename MatrixType::Scalar cofactor_4x4(const MatrixType& matrix) +{ + enum { + i1 = (i+1) % 4, + i2 = (i+2) % 4, + i3 = (i+3) % 4, + j1 = (j+1) % 4, + j2 = (j+2) % 4, + j3 = (j+3) % 4 + }; + return general_det3_helper(matrix, i1, i2, i3, j1, j2, j3) + + general_det3_helper(matrix, i2, i3, i1, j1, j2, j3) + + general_det3_helper(matrix, i3, i1, i2, j1, j2, j3); +} + +template +struct compute_inverse_size4 +{ + EIGEN_DEVICE_FUNC + static void run(const MatrixType& matrix, ResultType& result) + { + result.coeffRef(0,0) = cofactor_4x4(matrix); + result.coeffRef(1,0) = -cofactor_4x4(matrix); + result.coeffRef(2,0) = cofactor_4x4(matrix); + result.coeffRef(3,0) = -cofactor_4x4(matrix); + result.coeffRef(0,2) = cofactor_4x4(matrix); + result.coeffRef(1,2) = -cofactor_4x4(matrix); + result.coeffRef(2,2) = cofactor_4x4(matrix); + result.coeffRef(3,2) = -cofactor_4x4(matrix); + result.coeffRef(0,1) = -cofactor_4x4(matrix); + result.coeffRef(1,1) = cofactor_4x4(matrix); + result.coeffRef(2,1) = -cofactor_4x4(matrix); + result.coeffRef(3,1) = cofactor_4x4(matrix); + result.coeffRef(0,3) = -cofactor_4x4(matrix); + result.coeffRef(1,3) = cofactor_4x4(matrix); + result.coeffRef(2,3) = -cofactor_4x4(matrix); + result.coeffRef(3,3) = cofactor_4x4(matrix); + result /= (matrix.col(0).cwiseProduct(result.row(0).transpose())).sum(); + } +}; + +template +struct compute_inverse + : compute_inverse_size4 +{ +}; + +template +struct compute_inverse_and_det_with_check +{ + EIGEN_DEVICE_FUNC + static inline void run( + const MatrixType& matrix, + const typename MatrixType::RealScalar& absDeterminantThreshold, + ResultType& inverse, + typename ResultType::Scalar& determinant, + bool& invertible + ) + { + using std::abs; + determinant = matrix.determinant(); + invertible = abs(determinant) > absDeterminantThreshold; + if(invertible) compute_inverse::run(matrix, inverse); + } +}; + +/************************* +*** MatrixBase methods *** +*************************/ + +} // end namespace internal + +namespace internal { + +// Specialization for "dense = dense_xpr.inverse()" +template +struct Assignment, internal::assign_op, Dense2Dense> +{ + typedef Inverse SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + const int Size = EIGEN_PLAIN_ENUM_MIN(XprType::ColsAtCompileTime,DstXprType::ColsAtCompileTime); + EIGEN_ONLY_USED_FOR_DEBUG(Size); + eigen_assert(( (Size<=1) || (Size>4) || (extract_data(src.nestedExpression())!=extract_data(dst))) + && "Aliasing problem detected in inverse(), you need to do inverse().eval() here."); + + typedef typename internal::nested_eval::type ActualXprType; + typedef typename internal::remove_all::type ActualXprTypeCleanded; + + ActualXprType actual_xpr(src.nestedExpression()); + + compute_inverse::run(actual_xpr, dst); + } +}; + + +} // end namespace internal + +/** \lu_module + * + * \returns the matrix inverse of this matrix. + * + * For small fixed sizes up to 4x4, this method uses cofactors. + * In the general case, this method uses class PartialPivLU. + * + * \note This matrix must be invertible, otherwise the result is undefined. If you need an + * invertibility check, do the following: + * \li for fixed sizes up to 4x4, use computeInverseAndDetWithCheck(). + * \li for the general case, use class FullPivLU. + * + * Example: \include MatrixBase_inverse.cpp + * Output: \verbinclude MatrixBase_inverse.out + * + * \sa computeInverseAndDetWithCheck() + */ +template +inline const Inverse MatrixBase::inverse() const +{ + EIGEN_STATIC_ASSERT(!NumTraits::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES) + eigen_assert(rows() == cols()); + return Inverse(derived()); +} + +/** \lu_module + * + * Computation of matrix inverse and determinant, with invertibility check. + * + * This is only for fixed-size square matrices of size up to 4x4. + * + * \param inverse Reference to the matrix in which to store the inverse. + * \param determinant Reference to the variable in which to store the determinant. + * \param invertible Reference to the bool variable in which to store whether the matrix is invertible. + * \param absDeterminantThreshold Optional parameter controlling the invertibility check. + * The matrix will be declared invertible if the absolute value of its + * determinant is greater than this threshold. + * + * Example: \include MatrixBase_computeInverseAndDetWithCheck.cpp + * Output: \verbinclude MatrixBase_computeInverseAndDetWithCheck.out + * + * \sa inverse(), computeInverseWithCheck() + */ +template +template +inline void MatrixBase::computeInverseAndDetWithCheck( + ResultType& inverse, + typename ResultType::Scalar& determinant, + bool& invertible, + const RealScalar& absDeterminantThreshold + ) const +{ + // i'd love to put some static assertions there, but SFINAE means that they have no effect... + eigen_assert(rows() == cols()); + // for 2x2, it's worth giving a chance to avoid evaluating. + // for larger sizes, evaluating has negligible cost and limits code size. + typedef typename internal::conditional< + RowsAtCompileTime == 2, + typename internal::remove_all::type>::type, + PlainObject + >::type MatrixType; + internal::compute_inverse_and_det_with_check::run + (derived(), absDeterminantThreshold, inverse, determinant, invertible); +} + +/** \lu_module + * + * Computation of matrix inverse, with invertibility check. + * + * This is only for fixed-size square matrices of size up to 4x4. + * + * \param inverse Reference to the matrix in which to store the inverse. + * \param invertible Reference to the bool variable in which to store whether the matrix is invertible. + * \param absDeterminantThreshold Optional parameter controlling the invertibility check. + * The matrix will be declared invertible if the absolute value of its + * determinant is greater than this threshold. + * + * Example: \include MatrixBase_computeInverseWithCheck.cpp + * Output: \verbinclude MatrixBase_computeInverseWithCheck.out + * + * \sa inverse(), computeInverseAndDetWithCheck() + */ +template +template +inline void MatrixBase::computeInverseWithCheck( + ResultType& inverse, + bool& invertible, + const RealScalar& absDeterminantThreshold + ) const +{ + Scalar determinant; + // i'd love to put some static assertions there, but SFINAE means that they have no effect... + eigen_assert(rows() == cols()); + computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold); +} + +} // end namespace Eigen + +#endif // EIGEN_INVERSE_IMPL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/PartialPivLU.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/PartialPivLU.h new file mode 100644 index 000000000..d43961887 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/PartialPivLU.h @@ -0,0 +1,611 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2009 Benoit Jacob +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PARTIALLU_H +#define EIGEN_PARTIALLU_H + +namespace Eigen { + +namespace internal { +template struct traits > + : traits<_MatrixType> +{ + typedef MatrixXpr XprKind; + typedef SolverStorage StorageKind; + typedef traits<_MatrixType> BaseTraits; + enum { + Flags = BaseTraits::Flags & RowMajorBit, + CoeffReadCost = Dynamic + }; +}; + +template +struct enable_if_ref; +// { +// typedef Derived type; +// }; + +template +struct enable_if_ref,Derived> { + typedef Derived type; +}; + +} // end namespace internal + +/** \ingroup LU_Module + * + * \class PartialPivLU + * + * \brief LU decomposition of a matrix with partial pivoting, and related features + * + * \tparam _MatrixType the type of the matrix of which we are computing the LU decomposition + * + * This class represents a LU decomposition of a \b square \b invertible matrix, with partial pivoting: the matrix A + * is decomposed as A = PLU where L is unit-lower-triangular, U is upper-triangular, and P + * is a permutation matrix. + * + * Typically, partial pivoting LU decomposition is only considered numerically stable for square invertible + * matrices. Thus LAPACK's dgesv and dgesvx require the matrix to be square and invertible. The present class + * does the same. It will assert that the matrix is square, but it won't (actually it can't) check that the + * matrix is invertible: it is your task to check that you only use this decomposition on invertible matrices. + * + * The guaranteed safe alternative, working for all matrices, is the full pivoting LU decomposition, provided + * by class FullPivLU. + * + * This is \b not a rank-revealing LU decomposition. Many features are intentionally absent from this class, + * such as rank computation. If you need these features, use class FullPivLU. + * + * This LU decomposition is suitable to invert invertible matrices. It is what MatrixBase::inverse() uses + * in the general case. + * On the other hand, it is \b not suitable to determine whether a given matrix is invertible. + * + * The data of the LU decomposition can be directly accessed through the methods matrixLU(), permutationP(). + * + * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. + * + * \sa MatrixBase::partialPivLu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse(), class FullPivLU + */ +template class PartialPivLU + : public SolverBase > +{ + public: + + typedef _MatrixType MatrixType; + typedef SolverBase Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(PartialPivLU) + // FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int + enum { + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + typedef PermutationMatrix PermutationType; + typedef Transpositions TranspositionType; + typedef typename MatrixType::PlainObject PlainObject; + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via PartialPivLU::compute(const MatrixType&). + */ + PartialPivLU(); + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa PartialPivLU() + */ + explicit PartialPivLU(Index size); + + /** Constructor. + * + * \param matrix the matrix of which to compute the LU decomposition. + * + * \warning The matrix should have full rank (e.g. if it's square, it should be invertible). + * If you need to deal with non-full rank, use class FullPivLU instead. + */ + template + explicit PartialPivLU(const EigenBase& matrix); + + /** Constructor for \link InplaceDecomposition inplace decomposition \endlink + * + * \param matrix the matrix of which to compute the LU decomposition. + * + * \warning The matrix should have full rank (e.g. if it's square, it should be invertible). + * If you need to deal with non-full rank, use class FullPivLU instead. + */ + template + explicit PartialPivLU(EigenBase& matrix); + + template + PartialPivLU& compute(const EigenBase& matrix) { + m_lu = matrix.derived(); + compute(); + return *this; + } + + /** \returns the LU decomposition matrix: the upper-triangular part is U, the + * unit-lower-triangular part is L (at least for square matrices; in the non-square + * case, special care is needed, see the documentation of class FullPivLU). + * + * \sa matrixL(), matrixU() + */ + inline const MatrixType& matrixLU() const + { + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return m_lu; + } + + /** \returns the permutation matrix P. + */ + inline const PermutationType& permutationP() const + { + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return m_p; + } + + /** This method returns the solution x to the equation Ax=b, where A is the matrix of which + * *this is the LU decomposition. + * + * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix, + * the only requirement in order for the equation to make sense is that + * b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition. + * + * \returns the solution. + * + * Example: \include PartialPivLU_solve.cpp + * Output: \verbinclude PartialPivLU_solve.out + * + * Since this PartialPivLU class assumes anyway that the matrix A is invertible, the solution + * theoretically exists and is unique regardless of b. + * + * \sa TriangularView::solve(), inverse(), computeInverse() + */ + // FIXME this is a copy-paste of the base-class member to add the isInitialized assertion. + template + inline const Solve + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return Solve(*this, b.derived()); + } + + /** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is + the LU decomposition. + */ + inline RealScalar rcond() const + { + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return internal::rcond_estimate_helper(m_l1_norm, *this); + } + + /** \returns the inverse of the matrix of which *this is the LU decomposition. + * + * \warning The matrix being decomposed here is assumed to be invertible. If you need to check for + * invertibility, use class FullPivLU instead. + * + * \sa MatrixBase::inverse(), LU::inverse() + */ + inline const Inverse inverse() const + { + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return Inverse(*this); + } + + /** \returns the determinant of the matrix of which + * *this is the LU decomposition. It has only linear complexity + * (that is, O(n) where n is the dimension of the square matrix) + * as the LU decomposition has already been computed. + * + * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers + * optimized paths. + * + * \warning a determinant can be very big or small, so for matrices + * of large enough dimension, there is a risk of overflow/underflow. + * + * \sa MatrixBase::determinant() + */ + Scalar determinant() const; + + MatrixType reconstructedMatrix() const; + + inline Index rows() const { return m_lu.rows(); } + inline Index cols() const { return m_lu.cols(); } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC + void _solve_impl(const RhsType &rhs, DstType &dst) const { + /* The decomposition PA = LU can be rewritten as A = P^{-1} L U. + * So we proceed as follows: + * Step 1: compute c = Pb. + * Step 2: replace c by the solution x to Lx = c. + * Step 3: replace c by the solution x to Ux = c. + */ + + eigen_assert(rhs.rows() == m_lu.rows()); + + // Step 1 + dst = permutationP() * rhs; + + // Step 2 + m_lu.template triangularView().solveInPlace(dst); + + // Step 3 + m_lu.template triangularView().solveInPlace(dst); + } + + template + EIGEN_DEVICE_FUNC + void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const { + /* The decomposition PA = LU can be rewritten as A = P^{-1} L U. + * So we proceed as follows: + * Step 1: compute c = Pb. + * Step 2: replace c by the solution x to Lx = c. + * Step 3: replace c by the solution x to Ux = c. + */ + + eigen_assert(rhs.rows() == m_lu.cols()); + + if (Conjugate) { + // Step 1 + dst = m_lu.template triangularView().adjoint().solve(rhs); + // Step 2 + m_lu.template triangularView().adjoint().solveInPlace(dst); + } else { + // Step 1 + dst = m_lu.template triangularView().transpose().solve(rhs); + // Step 2 + m_lu.template triangularView().transpose().solveInPlace(dst); + } + // Step 3 + dst = permutationP().transpose() * dst; + } + #endif + + protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + void compute(); + + MatrixType m_lu; + PermutationType m_p; + TranspositionType m_rowsTranspositions; + RealScalar m_l1_norm; + signed char m_det_p; + bool m_isInitialized; +}; + +template +PartialPivLU::PartialPivLU() + : m_lu(), + m_p(), + m_rowsTranspositions(), + m_l1_norm(0), + m_det_p(0), + m_isInitialized(false) +{ +} + +template +PartialPivLU::PartialPivLU(Index size) + : m_lu(size, size), + m_p(size), + m_rowsTranspositions(size), + m_l1_norm(0), + m_det_p(0), + m_isInitialized(false) +{ +} + +template +template +PartialPivLU::PartialPivLU(const EigenBase& matrix) + : m_lu(matrix.rows(),matrix.cols()), + m_p(matrix.rows()), + m_rowsTranspositions(matrix.rows()), + m_l1_norm(0), + m_det_p(0), + m_isInitialized(false) +{ + compute(matrix.derived()); +} + +template +template +PartialPivLU::PartialPivLU(EigenBase& matrix) + : m_lu(matrix.derived()), + m_p(matrix.rows()), + m_rowsTranspositions(matrix.rows()), + m_l1_norm(0), + m_det_p(0), + m_isInitialized(false) +{ + compute(); +} + +namespace internal { + +/** \internal This is the blocked version of fullpivlu_unblocked() */ +template +struct partial_lu_impl +{ + // FIXME add a stride to Map, so that the following mapping becomes easier, + // another option would be to create an expression being able to automatically + // warp any Map, Matrix, and Block expressions as a unique type, but since that's exactly + // a Map + stride, why not adding a stride to Map, and convenient ctors from a Matrix, + // and Block. + typedef Map > MapLU; + typedef Block MatrixType; + typedef Block BlockType; + typedef typename MatrixType::RealScalar RealScalar; + + /** \internal performs the LU decomposition in-place of the matrix \a lu + * using an unblocked algorithm. + * + * In addition, this function returns the row transpositions in the + * vector \a row_transpositions which must have a size equal to the number + * of columns of the matrix \a lu, and an integer \a nb_transpositions + * which returns the actual number of transpositions. + * + * \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise. + */ + static Index unblocked_lu(MatrixType& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions) + { + typedef scalar_score_coeff_op Scoring; + typedef typename Scoring::result_type Score; + const Index rows = lu.rows(); + const Index cols = lu.cols(); + const Index size = (std::min)(rows,cols); + nb_transpositions = 0; + Index first_zero_pivot = -1; + for(Index k = 0; k < size; ++k) + { + Index rrows = rows-k-1; + Index rcols = cols-k-1; + + Index row_of_biggest_in_col; + Score biggest_in_corner + = lu.col(k).tail(rows-k).unaryExpr(Scoring()).maxCoeff(&row_of_biggest_in_col); + row_of_biggest_in_col += k; + + row_transpositions[k] = PivIndex(row_of_biggest_in_col); + + if(biggest_in_corner != Score(0)) + { + if(k != row_of_biggest_in_col) + { + lu.row(k).swap(lu.row(row_of_biggest_in_col)); + ++nb_transpositions; + } + + // FIXME shall we introduce a safe quotient expression in cas 1/lu.coeff(k,k) + // overflow but not the actual quotient? + lu.col(k).tail(rrows) /= lu.coeff(k,k); + } + else if(first_zero_pivot==-1) + { + // the pivot is exactly zero, we record the index of the first pivot which is exactly 0, + // and continue the factorization such we still have A = PLU + first_zero_pivot = k; + } + + if(k > > + */ + static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256) + { + MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols); + MatrixType lu(lu1,0,0,rows,cols); + + const Index size = (std::min)(rows,cols); + + // if the matrix is too small, no blocking: + if(size<=16) + { + return unblocked_lu(lu, row_transpositions, nb_transpositions); + } + + // automatically adjust the number of subdivisions to the size + // of the matrix so that there is enough sub blocks: + Index blockSize; + { + blockSize = size/8; + blockSize = (blockSize/16)*16; + blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize); + } + + nb_transpositions = 0; + Index first_zero_pivot = -1; + for(Index k = 0; k < size; k+=blockSize) + { + Index bs = (std::min)(size-k,blockSize); // actual size of the block + Index trows = rows - k - bs; // trailing rows + Index tsize = size - k - bs; // trailing size + + // partition the matrix: + // A00 | A01 | A02 + // lu = A_0 | A_1 | A_2 = A10 | A11 | A12 + // A20 | A21 | A22 + BlockType A_0(lu,0,0,rows,k); + BlockType A_2(lu,0,k+bs,rows,tsize); + BlockType A11(lu,k,k,bs,bs); + BlockType A12(lu,k,k+bs,bs,tsize); + BlockType A21(lu,k+bs,k,trows,bs); + BlockType A22(lu,k+bs,k+bs,trows,tsize); + + PivIndex nb_transpositions_in_panel; + // recursively call the blocked LU algorithm on [A11^T A21^T]^T + // with a very small blocking size: + Index ret = blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride, + row_transpositions+k, nb_transpositions_in_panel, 16); + if(ret>=0 && first_zero_pivot==-1) + first_zero_pivot = k+ret; + + nb_transpositions += nb_transpositions_in_panel; + // update permutations and apply them to A_0 + for(Index i=k; i(k)); + A_0.row(i).swap(A_0.row(piv)); + } + + if(trows) + { + // apply permutations to A_2 + for(Index i=k;i().solveInPlace(A12); + + A22.noalias() -= A21 * A12; + } + } + return first_zero_pivot; + } +}; + +/** \internal performs the LU decomposition with partial pivoting in-place. + */ +template +void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::StorageIndex& nb_transpositions) +{ + eigen_assert(lu.cols() == row_transpositions.size()); + eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1); + + partial_lu_impl + + ::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions); +} + +} // end namespace internal + +template +void PartialPivLU::compute() +{ + check_template_parameters(); + + // the row permutation is stored as int indices, so just to be sure: + eigen_assert(m_lu.rows()::highest()); + + m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff(); + + eigen_assert(m_lu.rows() == m_lu.cols() && "PartialPivLU is only for square (and moreover invertible) matrices"); + const Index size = m_lu.rows(); + + m_rowsTranspositions.resize(size); + + typename TranspositionType::StorageIndex nb_transpositions; + internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions); + m_det_p = (nb_transpositions%2) ? -1 : 1; + + m_p = m_rowsTranspositions; + + m_isInitialized = true; +} + +template +typename PartialPivLU::Scalar PartialPivLU::determinant() const +{ + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return Scalar(m_det_p) * m_lu.diagonal().prod(); +} + +/** \returns the matrix represented by the decomposition, + * i.e., it returns the product: P^{-1} L U. + * This function is provided for debug purpose. */ +template +MatrixType PartialPivLU::reconstructedMatrix() const +{ + eigen_assert(m_isInitialized && "LU is not initialized."); + // LU + MatrixType res = m_lu.template triangularView().toDenseMatrix() + * m_lu.template triangularView(); + + // P^{-1}(LU) + res = m_p.inverse() * res; + + return res; +} + +/***** Implementation details *****************************************************/ + +namespace internal { + +/***** Implementation of inverse() *****************************************************/ +template +struct Assignment >, internal::assign_op::Scalar>, Dense2Dense> +{ + typedef PartialPivLU LuType; + typedef Inverse SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); + } +}; +} // end namespace internal + +/******** MatrixBase methods *******/ + +/** \lu_module + * + * \return the partial-pivoting LU decomposition of \c *this. + * + * \sa class PartialPivLU + */ +template +inline const PartialPivLU::PlainObject> +MatrixBase::partialPivLu() const +{ + return PartialPivLU(eval()); +} + +/** \lu_module + * + * Synonym of partialPivLu(). + * + * \return the partial-pivoting LU decomposition of \c *this. + * + * \sa class PartialPivLU + */ +template +inline const PartialPivLU::PlainObject> +MatrixBase::lu() const +{ + return PartialPivLU(eval()); +} + +} // end namespace Eigen + +#endif // EIGEN_PARTIALLU_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/PartialPivLU_LAPACKE.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/PartialPivLU_LAPACKE.h new file mode 100644 index 000000000..755168a94 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/PartialPivLU_LAPACKE.h @@ -0,0 +1,83 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to LAPACKe + * LU decomposition with partial pivoting based on LAPACKE_?getrf function. + ******************************************************************************** +*/ + +#ifndef EIGEN_PARTIALLU_LAPACK_H +#define EIGEN_PARTIALLU_LAPACK_H + +namespace Eigen { + +namespace internal { + +/** \internal Specialization for the data types supported by LAPACKe */ + +#define EIGEN_LAPACKE_LU_PARTPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \ +template \ +struct partial_lu_impl \ +{ \ + /* \internal performs the LU decomposition in-place of the matrix represented */ \ + static lapack_int blocked_lu(Index rows, Index cols, EIGTYPE* lu_data, Index luStride, lapack_int* row_transpositions, lapack_int& nb_transpositions, lapack_int maxBlockSize=256) \ + { \ + EIGEN_UNUSED_VARIABLE(maxBlockSize);\ + lapack_int matrix_order, first_zero_pivot; \ + lapack_int m, n, lda, *ipiv, info; \ + EIGTYPE* a; \ +/* Set up parameters for ?getrf */ \ + matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ + lda = convert_index(luStride); \ + a = lu_data; \ + ipiv = row_transpositions; \ + m = convert_index(rows); \ + n = convert_index(cols); \ + nb_transpositions = 0; \ +\ + info = LAPACKE_##LAPACKE_PREFIX##getrf( matrix_order, m, n, (LAPACKE_TYPE*)a, lda, ipiv ); \ +\ + for(int i=0;i= 0); \ +/* something should be done with nb_transpositions */ \ +\ + first_zero_pivot = info; \ + return first_zero_pivot; \ + } \ +}; + +EIGEN_LAPACKE_LU_PARTPIV(double, double, d) +EIGEN_LAPACKE_LU_PARTPIV(float, float, s) +EIGEN_LAPACKE_LU_PARTPIV(dcomplex, lapack_complex_double, z) +EIGEN_LAPACKE_LU_PARTPIV(scomplex, lapack_complex_float, c) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PARTIALLU_LAPACK_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/arch/Inverse_SSE.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/arch/Inverse_SSE.h new file mode 100644 index 000000000..ebb64a62b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/LU/arch/Inverse_SSE.h @@ -0,0 +1,338 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2001 Intel Corporation +// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// The SSE code for the 4x4 float and double matrix inverse in this file +// comes from the following Intel's library: +// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/ +// +// Here is the respective copyright and license statement: +// +// Copyright (c) 2001 Intel Corporation. +// +// Permition is granted to use, copy, distribute and prepare derivative works +// of this library for any purpose and without fee, provided, that the above +// copyright notice and this statement appear in all copies. +// Intel makes no representations about the suitability of this software for +// any purpose, and specifically disclaims all warranties. +// See LEGAL.TXT for all the legal information. + +#ifndef EIGEN_INVERSE_SSE_H +#define EIGEN_INVERSE_SSE_H + +namespace Eigen { + +namespace internal { + +template +struct compute_inverse_size4 +{ + enum { + MatrixAlignment = traits::Alignment, + ResultAlignment = traits::Alignment, + StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit) + }; + typedef typename conditional<(MatrixType::Flags&LinearAccessBit),MatrixType const &,typename MatrixType::PlainObject>::type ActualMatrixType; + + static void run(const MatrixType& mat, ResultType& result) + { + ActualMatrixType matrix(mat); + EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 }; + + // Load the full matrix into registers + __m128 _L1 = matrix.template packet( 0); + __m128 _L2 = matrix.template packet( 4); + __m128 _L3 = matrix.template packet( 8); + __m128 _L4 = matrix.template packet(12); + + // The inverse is calculated using "Divide and Conquer" technique. The + // original matrix is divide into four 2x2 sub-matrices. Since each + // register holds four matrix element, the smaller matrices are + // represented as a registers. Hence we get a better locality of the + // calculations. + + __m128 A, B, C, D; // the four sub-matrices + if(!StorageOrdersMatch) + { + A = _mm_unpacklo_ps(_L1, _L2); + B = _mm_unpacklo_ps(_L3, _L4); + C = _mm_unpackhi_ps(_L1, _L2); + D = _mm_unpackhi_ps(_L3, _L4); + } + else + { + A = _mm_movelh_ps(_L1, _L2); + B = _mm_movehl_ps(_L2, _L1); + C = _mm_movelh_ps(_L3, _L4); + D = _mm_movehl_ps(_L4, _L3); + } + + __m128 iA, iB, iC, iD, // partial inverse of the sub-matrices + DC, AB; + __m128 dA, dB, dC, dD; // determinant of the sub-matrices + __m128 det, d, d1, d2; + __m128 rd; // reciprocal of the determinant + + // AB = A# * B + AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B); + AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E))); + // DC = D# * C + DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C); + DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E))); + + // dA = |A| + dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A); + dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA)); + // dB = |B| + dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B); + dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB)); + + // dC = |C| + dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C); + dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC)); + // dD = |D| + dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D); + dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD)); + + // d = trace(AB*DC) = trace(A#*B*D#*C) + d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB); + + // iD = C*A#*B + iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB)); + iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB))); + // iA = B*D#*C + iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC)); + iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC))); + + // d = trace(AB*DC) = trace(A#*B*D#*C) [continue] + d = _mm_add_ps(d, _mm_movehl_ps(d, d)); + d = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1)); + d1 = _mm_mul_ss(dA,dD); + d2 = _mm_mul_ss(dB,dC); + + // iD = D*|A| - C*A#*B + iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD); + + // iA = A*|D| - B*D#*C; + iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA); + + // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C) + det = _mm_sub_ss(_mm_add_ss(d1,d2),d); + rd = _mm_div_ss(_mm_set_ss(1.0f), det); + +// #ifdef ZERO_SINGULAR +// rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd); +// #endif + + // iB = D * (A#B)# = D*B#*A + iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33)); + iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66))); + // iC = A * (D#C)# = A*C#*D + iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33)); + iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66))); + + rd = _mm_shuffle_ps(rd,rd,0); + rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP)); + + // iB = C*|B| - D*B#*A + iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB); + + // iC = B*|C| - A*C#*D; + iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC); + + // iX = iX / det + iA = _mm_mul_ps(rd,iA); + iB = _mm_mul_ps(rd,iB); + iC = _mm_mul_ps(rd,iC); + iD = _mm_mul_ps(rd,iD); + + Index res_stride = result.outerStride(); + float* res = result.data(); + pstoret(res+0, _mm_shuffle_ps(iA,iB,0x77)); + pstoret(res+res_stride, _mm_shuffle_ps(iA,iB,0x22)); + pstoret(res+2*res_stride, _mm_shuffle_ps(iC,iD,0x77)); + pstoret(res+3*res_stride, _mm_shuffle_ps(iC,iD,0x22)); + } + +}; + +template +struct compute_inverse_size4 +{ + enum { + MatrixAlignment = traits::Alignment, + ResultAlignment = traits::Alignment, + StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit) + }; + typedef typename conditional<(MatrixType::Flags&LinearAccessBit),MatrixType const &,typename MatrixType::PlainObject>::type ActualMatrixType; + + static void run(const MatrixType& mat, ResultType& result) + { + ActualMatrixType matrix(mat); + const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); + const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); + + // The inverse is calculated using "Divide and Conquer" technique. The + // original matrix is divide into four 2x2 sub-matrices. Since each + // register of the matrix holds two elements, the smaller matrices are + // consisted of two registers. Hence we get a better locality of the + // calculations. + + // the four sub-matrices + __m128d A1, A2, B1, B2, C1, C2, D1, D2; + + if(StorageOrdersMatch) + { + A1 = matrix.template packet( 0); B1 = matrix.template packet( 2); + A2 = matrix.template packet( 4); B2 = matrix.template packet( 6); + C1 = matrix.template packet( 8); D1 = matrix.template packet(10); + C2 = matrix.template packet(12); D2 = matrix.template packet(14); + } + else + { + __m128d tmp; + A1 = matrix.template packet( 0); C1 = matrix.template packet( 2); + A2 = matrix.template packet( 4); C2 = matrix.template packet( 6); + tmp = A1; + A1 = _mm_unpacklo_pd(A1,A2); + A2 = _mm_unpackhi_pd(tmp,A2); + tmp = C1; + C1 = _mm_unpacklo_pd(C1,C2); + C2 = _mm_unpackhi_pd(tmp,C2); + + B1 = matrix.template packet( 8); D1 = matrix.template packet(10); + B2 = matrix.template packet(12); D2 = matrix.template packet(14); + tmp = B1; + B1 = _mm_unpacklo_pd(B1,B2); + B2 = _mm_unpackhi_pd(tmp,B2); + tmp = D1; + D1 = _mm_unpacklo_pd(D1,D2); + D2 = _mm_unpackhi_pd(tmp,D2); + } + + __m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2, // partial invese of the sub-matrices + DC1, DC2, AB1, AB2; + __m128d dA, dB, dC, dD; // determinant of the sub-matrices + __m128d det, d1, d2, rd; + + // dA = |A| + dA = _mm_shuffle_pd(A2, A2, 1); + dA = _mm_mul_pd(A1, dA); + dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3)); + // dB = |B| + dB = _mm_shuffle_pd(B2, B2, 1); + dB = _mm_mul_pd(B1, dB); + dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3)); + + // AB = A# * B + AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3)); + AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0)); + AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3))); + AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0))); + + // dC = |C| + dC = _mm_shuffle_pd(C2, C2, 1); + dC = _mm_mul_pd(C1, dC); + dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3)); + // dD = |D| + dD = _mm_shuffle_pd(D2, D2, 1); + dD = _mm_mul_pd(D1, dD); + dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3)); + + // DC = D# * C + DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3)); + DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0)); + DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3))); + DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0))); + + // rd = trace(AB*DC) = trace(A#*B*D#*C) + d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0)); + d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3)); + rd = _mm_add_pd(d1, d2); + rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3)); + + // iD = C*A#*B + iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0)); + iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0)); + iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3))); + iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3))); + + // iA = B*D#*C + iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0)); + iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0)); + iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3))); + iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3))); + + // iD = D*|A| - C*A#*B + dA = _mm_shuffle_pd(dA,dA,0); + iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1); + iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2); + + // iA = A*|D| - B*D#*C; + dD = _mm_shuffle_pd(dD,dD,0); + iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1); + iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2); + + d1 = _mm_mul_sd(dA, dD); + d2 = _mm_mul_sd(dB, dC); + + // iB = D * (A#B)# = D*B#*A + iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1)); + iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1)); + iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2))); + iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2))); + + // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C) + det = _mm_add_sd(d1, d2); + det = _mm_sub_sd(det, rd); + + // iC = A * (D#C)# = A*C#*D + iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1)); + iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1)); + iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2))); + iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2))); + + rd = _mm_div_sd(_mm_set_sd(1.0), det); +// #ifdef ZERO_SINGULAR +// rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd); +// #endif + rd = _mm_shuffle_pd(rd,rd,0); + + // iB = C*|B| - D*B#*A + dB = _mm_shuffle_pd(dB,dB,0); + iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1); + iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2); + + d1 = _mm_xor_pd(rd, _Sign_PN); + d2 = _mm_xor_pd(rd, _Sign_NP); + + // iC = B*|C| - A*C#*D; + dC = _mm_shuffle_pd(dC,dC,0); + iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1); + iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2); + + Index res_stride = result.outerStride(); + double* res = result.data(); + pstoret(res+0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1)); + pstoret(res+res_stride, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2)); + pstoret(res+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1)); + pstoret(res+res_stride+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2)); + pstoret(res+2*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1)); + pstoret(res+3*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2)); + pstoret(res+2*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1)); + pstoret(res+3*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2)); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_INVERSE_SSE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/MetisSupport/MetisSupport.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/MetisSupport/MetisSupport.h new file mode 100644 index 000000000..4c15304ad --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/MetisSupport/MetisSupport.h @@ -0,0 +1,137 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +#ifndef METIS_SUPPORT_H +#define METIS_SUPPORT_H + +namespace Eigen { +/** + * Get the fill-reducing ordering from the METIS package + * + * If A is the original matrix and Ap is the permuted matrix, + * the fill-reducing permutation is defined as follows : + * Row (column) i of A is the matperm(i) row (column) of Ap. + * WARNING: As computed by METIS, this corresponds to the vector iperm (instead of perm) + */ +template +class MetisOrdering +{ +public: + typedef PermutationMatrix PermutationType; + typedef Matrix IndexVector; + + template + void get_symmetrized_graph(const MatrixType& A) + { + Index m = A.cols(); + eigen_assert((A.rows() == A.cols()) && "ONLY FOR SQUARED MATRICES"); + // Get the transpose of the input matrix + MatrixType At = A.transpose(); + // Get the number of nonzeros elements in each row/col of At+A + Index TotNz = 0; + IndexVector visited(m); + visited.setConstant(-1); + for (StorageIndex j = 0; j < m; j++) + { + // Compute the union structure of of A(j,:) and At(j,:) + visited(j) = j; // Do not include the diagonal element + // Get the nonzeros in row/column j of A + for (typename MatrixType::InnerIterator it(A, j); it; ++it) + { + Index idx = it.index(); // Get the row index (for column major) or column index (for row major) + if (visited(idx) != j ) + { + visited(idx) = j; + ++TotNz; + } + } + //Get the nonzeros in row/column j of At + for (typename MatrixType::InnerIterator it(At, j); it; ++it) + { + Index idx = it.index(); + if(visited(idx) != j) + { + visited(idx) = j; + ++TotNz; + } + } + } + // Reserve place for A + At + m_indexPtr.resize(m+1); + m_innerIndices.resize(TotNz); + + // Now compute the real adjacency list of each column/row + visited.setConstant(-1); + StorageIndex CurNz = 0; + for (StorageIndex j = 0; j < m; j++) + { + m_indexPtr(j) = CurNz; + + visited(j) = j; // Do not include the diagonal element + // Add the pattern of row/column j of A to A+At + for (typename MatrixType::InnerIterator it(A,j); it; ++it) + { + StorageIndex idx = it.index(); // Get the row index (for column major) or column index (for row major) + if (visited(idx) != j ) + { + visited(idx) = j; + m_innerIndices(CurNz) = idx; + CurNz++; + } + } + //Add the pattern of row/column j of At to A+At + for (typename MatrixType::InnerIterator it(At, j); it; ++it) + { + StorageIndex idx = it.index(); + if(visited(idx) != j) + { + visited(idx) = j; + m_innerIndices(CurNz) = idx; + ++CurNz; + } + } + } + m_indexPtr(m) = CurNz; + } + + template + void operator() (const MatrixType& A, PermutationType& matperm) + { + StorageIndex m = internal::convert_index(A.cols()); // must be StorageIndex, because it is passed by address to METIS + IndexVector perm(m),iperm(m); + // First, symmetrize the matrix graph. + get_symmetrized_graph(A); + int output_error; + + // Call the fill-reducing routine from METIS + output_error = METIS_NodeND(&m, m_indexPtr.data(), m_innerIndices.data(), NULL, NULL, perm.data(), iperm.data()); + + if(output_error != METIS_OK) + { + //FIXME The ordering interface should define a class of possible errors + std::cerr << "ERROR WHILE CALLING THE METIS PACKAGE \n"; + return; + } + + // Get the fill-reducing permutation + //NOTE: If Ap is the permuted matrix then perm and iperm vectors are defined as follows + // Row (column) i of Ap is the perm(i) row(column) of A, and row (column) i of A is the iperm(i) row(column) of Ap + + matperm.resize(m); + for (int j = 0; j < m; j++) + matperm.indices()(iperm(j)) = j; + + } + + protected: + IndexVector m_indexPtr; // Pointer to the adjacenccy list of each row/column + IndexVector m_innerIndices; // Adjacency list +}; + +}// end namespace eigen +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/OrderingMethods/Amd.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/OrderingMethods/Amd.h new file mode 100644 index 000000000..f91ecb24e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/OrderingMethods/Amd.h @@ -0,0 +1,445 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud + +/* + +NOTE: this routine has been adapted from the CSparse library: + +Copyright (c) 2006, Timothy A. Davis. +http://www.suitesparse.com + +CSparse is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +CSparse is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this Module; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +*/ + +#include "../Core/util/NonMPL2.h" + +#ifndef EIGEN_SPARSE_AMD_H +#define EIGEN_SPARSE_AMD_H + +namespace Eigen { + +namespace internal { + +template inline T amd_flip(const T& i) { return -i-2; } +template inline T amd_unflip(const T& i) { return i<0 ? amd_flip(i) : i; } +template inline bool amd_marked(const T0* w, const T1& j) { return w[j]<0; } +template inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); } + +/* clear w */ +template +static StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n) +{ + StorageIndex k; + if(mark < 2 || (mark + lemax < 0)) + { + for(k = 0; k < n; k++) + if(w[k] != 0) + w[k] = 1; + mark = 2; + } + return (mark); /* at this point, w[0..n-1] < mark holds */ +} + +/* depth-first search and postorder of a tree rooted at node j */ +template +StorageIndex cs_tdfs(StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack) +{ + StorageIndex i, p, top = 0; + if(!head || !next || !post || !stack) return (-1); /* check inputs */ + stack[0] = j; /* place j on the stack */ + while (top >= 0) /* while (stack is not empty) */ + { + p = stack[top]; /* p = top of stack */ + i = head[p]; /* i = youngest child of p */ + if(i == -1) + { + top--; /* p has no unordered children left */ + post[k++] = p; /* node p is the kth postordered node */ + } + else + { + head[p] = next[i]; /* remove i from children of p */ + stack[++top] = i; /* start dfs on child node i */ + } + } + return k; +} + + +/** \internal + * \ingroup OrderingMethods_Module + * Approximate minimum degree ordering algorithm. + * + * \param[in] C the input selfadjoint matrix stored in compressed column major format. + * \param[out] perm the permutation P reducing the fill-in of the input matrix \a C + * + * Note that the input matrix \a C must be complete, that is both the upper and lower parts have to be stored, as well as the diagonal entries. + * On exit the values of C are destroyed */ +template +void minimum_degree_ordering(SparseMatrix& C, PermutationMatrix& perm) +{ + using std::sqrt; + + StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1, + k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi, + ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h; + + StorageIndex n = StorageIndex(C.cols()); + dense = std::max (16, StorageIndex(10 * sqrt(double(n)))); /* find dense threshold */ + dense = (std::min)(n-2, dense); + + StorageIndex cnz = StorageIndex(C.nonZeros()); + perm.resize(n+1); + t = cnz + cnz/5 + 2*n; /* add elbow room to C */ + C.resizeNonZeros(t); + + // get workspace + ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0); + StorageIndex* len = W; + StorageIndex* nv = W + (n+1); + StorageIndex* next = W + 2*(n+1); + StorageIndex* head = W + 3*(n+1); + StorageIndex* elen = W + 4*(n+1); + StorageIndex* degree = W + 5*(n+1); + StorageIndex* w = W + 6*(n+1); + StorageIndex* hhead = W + 7*(n+1); + StorageIndex* last = perm.indices().data(); /* use P as workspace for last */ + + /* --- Initialize quotient graph ---------------------------------------- */ + StorageIndex* Cp = C.outerIndexPtr(); + StorageIndex* Ci = C.innerIndexPtr(); + for(k = 0; k < n; k++) + len[k] = Cp[k+1] - Cp[k]; + len[n] = 0; + nzmax = t; + + for(i = 0; i <= n; i++) + { + head[i] = -1; // degree list i is empty + last[i] = -1; + next[i] = -1; + hhead[i] = -1; // hash list i is empty + nv[i] = 1; // node i is just one node + w[i] = 1; // node i is alive + elen[i] = 0; // Ek of node i is empty + degree[i] = len[i]; // degree of node i + } + mark = internal::cs_wclear(0, 0, w, n); /* clear w */ + + /* --- Initialize degree lists ------------------------------------------ */ + for(i = 0; i < n; i++) + { + bool has_diag = false; + for(p = Cp[i]; p dense || !has_diag) /* node i is dense or has no structural diagonal element */ + { + nv[i] = 0; /* absorb i into element n */ + elen[i] = -1; /* node i is dead */ + nel++; + Cp[i] = amd_flip (n); + nv[n]++; + } + else + { + if(head[d] != -1) last[head[d]] = i; + next[i] = head[d]; /* put node i in degree list d */ + head[d] = i; + } + } + + elen[n] = -2; /* n is a dead element */ + Cp[n] = -1; /* n is a root of assembly tree */ + w[n] = 0; /* n is a dead element */ + + while (nel < n) /* while (selecting pivots) do */ + { + /* --- Select node of minimum approximate degree -------------------- */ + for(k = -1; mindeg < n && (k = head[mindeg]) == -1; mindeg++) {} + if(next[k] != -1) last[next[k]] = -1; + head[mindeg] = next[k]; /* remove k from degree list */ + elenk = elen[k]; /* elenk = |Ek| */ + nvk = nv[k]; /* # of nodes k represents */ + nel += nvk; /* nv[k] nodes of A eliminated */ + + /* --- Garbage collection ------------------------------------------- */ + if(elenk > 0 && cnz + mindeg >= nzmax) + { + for(j = 0; j < n; j++) + { + if((p = Cp[j]) >= 0) /* j is a live node or element */ + { + Cp[j] = Ci[p]; /* save first entry of object */ + Ci[p] = amd_flip (j); /* first entry is now amd_flip(j) */ + } + } + for(q = 0, p = 0; p < cnz; ) /* scan all of memory */ + { + if((j = amd_flip (Ci[p++])) >= 0) /* found object j */ + { + Ci[q] = Cp[j]; /* restore first entry of object */ + Cp[j] = q++; /* new pointer to object j */ + for(k3 = 0; k3 < len[j]-1; k3++) Ci[q++] = Ci[p++]; + } + } + cnz = q; /* Ci[cnz...nzmax-1] now free */ + } + + /* --- Construct new element ---------------------------------------- */ + dk = 0; + nv[k] = -nvk; /* flag k as in Lk */ + p = Cp[k]; + pk1 = (elenk == 0) ? p : cnz; /* do in place if elen[k] == 0 */ + pk2 = pk1; + for(k1 = 1; k1 <= elenk + 1; k1++) + { + if(k1 > elenk) + { + e = k; /* search the nodes in k */ + pj = p; /* list of nodes starts at Ci[pj]*/ + ln = len[k] - elenk; /* length of list of nodes in k */ + } + else + { + e = Ci[p++]; /* search the nodes in e */ + pj = Cp[e]; + ln = len[e]; /* length of list of nodes in e */ + } + for(k2 = 1; k2 <= ln; k2++) + { + i = Ci[pj++]; + if((nvi = nv[i]) <= 0) continue; /* node i dead, or seen */ + dk += nvi; /* degree[Lk] += size of node i */ + nv[i] = -nvi; /* negate nv[i] to denote i in Lk*/ + Ci[pk2++] = i; /* place i in Lk */ + if(next[i] != -1) last[next[i]] = last[i]; + if(last[i] != -1) /* remove i from degree list */ + { + next[last[i]] = next[i]; + } + else + { + head[degree[i]] = next[i]; + } + } + if(e != k) + { + Cp[e] = amd_flip (k); /* absorb e into k */ + w[e] = 0; /* e is now a dead element */ + } + } + if(elenk != 0) cnz = pk2; /* Ci[cnz...nzmax] is free */ + degree[k] = dk; /* external degree of k - |Lk\i| */ + Cp[k] = pk1; /* element k is in Ci[pk1..pk2-1] */ + len[k] = pk2 - pk1; + elen[k] = -2; /* k is now an element */ + + /* --- Find set differences ----------------------------------------- */ + mark = internal::cs_wclear(mark, lemax, w, n); /* clear w if necessary */ + for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */ + { + i = Ci[pk]; + if((eln = elen[i]) <= 0) continue;/* skip if elen[i] empty */ + nvi = -nv[i]; /* nv[i] was negated */ + wnvi = mark - nvi; + for(p = Cp[i]; p <= Cp[i] + eln - 1; p++) /* scan Ei */ + { + e = Ci[p]; + if(w[e] >= mark) + { + w[e] -= nvi; /* decrement |Le\Lk| */ + } + else if(w[e] != 0) /* ensure e is a live element */ + { + w[e] = degree[e] + wnvi; /* 1st time e seen in scan 1 */ + } + } + } + + /* --- Degree update ------------------------------------------------ */ + for(pk = pk1; pk < pk2; pk++) /* scan2: degree update */ + { + i = Ci[pk]; /* consider node i in Lk */ + p1 = Cp[i]; + p2 = p1 + elen[i] - 1; + pn = p1; + for(h = 0, d = 0, p = p1; p <= p2; p++) /* scan Ei */ + { + e = Ci[p]; + if(w[e] != 0) /* e is an unabsorbed element */ + { + dext = w[e] - mark; /* dext = |Le\Lk| */ + if(dext > 0) + { + d += dext; /* sum up the set differences */ + Ci[pn++] = e; /* keep e in Ei */ + h += e; /* compute the hash of node i */ + } + else + { + Cp[e] = amd_flip (k); /* aggressive absorb. e->k */ + w[e] = 0; /* e is a dead element */ + } + } + } + elen[i] = pn - p1 + 1; /* elen[i] = |Ei| */ + p3 = pn; + p4 = p1 + len[i]; + for(p = p2 + 1; p < p4; p++) /* prune edges in Ai */ + { + j = Ci[p]; + if((nvj = nv[j]) <= 0) continue; /* node j dead or in Lk */ + d += nvj; /* degree(i) += |j| */ + Ci[pn++] = j; /* place j in node list of i */ + h += j; /* compute hash for node i */ + } + if(d == 0) /* check for mass elimination */ + { + Cp[i] = amd_flip (k); /* absorb i into k */ + nvi = -nv[i]; + dk -= nvi; /* |Lk| -= |i| */ + nvk += nvi; /* |k| += nv[i] */ + nel += nvi; + nv[i] = 0; + elen[i] = -1; /* node i is dead */ + } + else + { + degree[i] = std::min (degree[i], d); /* update degree(i) */ + Ci[pn] = Ci[p3]; /* move first node to end */ + Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */ + Ci[p1] = k; /* add k as 1st element in of Ei */ + len[i] = pn - p1 + 1; /* new len of adj. list of node i */ + h %= n; /* finalize hash of i */ + next[i] = hhead[h]; /* place i in hash bucket */ + hhead[h] = i; + last[i] = h; /* save hash of i in last[i] */ + } + } /* scan2 is done */ + degree[k] = dk; /* finalize |Lk| */ + lemax = std::max(lemax, dk); + mark = internal::cs_wclear(mark+lemax, lemax, w, n); /* clear w */ + + /* --- Supernode detection ------------------------------------------ */ + for(pk = pk1; pk < pk2; pk++) + { + i = Ci[pk]; + if(nv[i] >= 0) continue; /* skip if i is dead */ + h = last[i]; /* scan hash bucket of node i */ + i = hhead[h]; + hhead[h] = -1; /* hash bucket will be empty */ + for(; i != -1 && next[i] != -1; i = next[i], mark++) + { + ln = len[i]; + eln = elen[i]; + for(p = Cp[i]+1; p <= Cp[i] + ln-1; p++) w[Ci[p]] = mark; + jlast = i; + for(j = next[i]; j != -1; ) /* compare i with all j */ + { + ok = (len[j] == ln) && (elen[j] == eln); + for(p = Cp[j] + 1; ok && p <= Cp[j] + ln - 1; p++) + { + if(w[Ci[p]] != mark) ok = 0; /* compare i and j*/ + } + if(ok) /* i and j are identical */ + { + Cp[j] = amd_flip (i); /* absorb j into i */ + nv[i] += nv[j]; + nv[j] = 0; + elen[j] = -1; /* node j is dead */ + j = next[j]; /* delete j from hash bucket */ + next[jlast] = j; + } + else + { + jlast = j; /* j and i are different */ + j = next[j]; + } + } + } + } + + /* --- Finalize new element------------------------------------------ */ + for(p = pk1, pk = pk1; pk < pk2; pk++) /* finalize Lk */ + { + i = Ci[pk]; + if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */ + nv[i] = nvi; /* restore nv[i] */ + d = degree[i] + dk - nvi; /* compute external degree(i) */ + d = std::min (d, n - nel - nvi); + if(head[d] != -1) last[head[d]] = i; + next[i] = head[d]; /* put i back in degree list */ + last[i] = -1; + head[d] = i; + mindeg = std::min (mindeg, d); /* find new minimum degree */ + degree[i] = d; + Ci[p++] = i; /* place i in Lk */ + } + nv[k] = nvk; /* # nodes absorbed into k */ + if((len[k] = p-pk1) == 0) /* length of adj list of element k*/ + { + Cp[k] = -1; /* k is a root of the tree */ + w[k] = 0; /* k is now a dead element */ + } + if(elenk != 0) cnz = p; /* free unused space in Lk */ + } + + /* --- Postordering ----------------------------------------------------- */ + for(i = 0; i < n; i++) Cp[i] = amd_flip (Cp[i]);/* fix assembly tree */ + for(j = 0; j <= n; j++) head[j] = -1; + for(j = n; j >= 0; j--) /* place unordered nodes in lists */ + { + if(nv[j] > 0) continue; /* skip if j is an element */ + next[j] = head[Cp[j]]; /* place j in list of its parent */ + head[Cp[j]] = j; + } + for(e = n; e >= 0; e--) /* place elements in lists */ + { + if(nv[e] <= 0) continue; /* skip unless e is an element */ + if(Cp[e] != -1) + { + next[e] = head[Cp[e]]; /* place e in list of its parent */ + head[Cp[e]] = e; + } + } + for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */ + { + if(Cp[i] == -1) k = internal::cs_tdfs(i, k, head, next, perm.indices().data(), w); + } + + perm.indices().conservativeResize(n); +} + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_AMD_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h new file mode 100644 index 000000000..da85b4d6e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h @@ -0,0 +1,1843 @@ +// // This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Desire Nuentsa Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// This file is modified from the colamd/symamd library. The copyright is below + +// The authors of the code itself are Stefan I. Larimore and Timothy A. +// Davis (davis@cise.ufl.edu), University of Florida. The algorithm was +// developed in collaboration with John Gilbert, Xerox PARC, and Esmond +// Ng, Oak Ridge National Laboratory. +// +// Date: +// +// September 8, 2003. Version 2.3. +// +// Acknowledgements: +// +// This work was supported by the National Science Foundation, under +// grants DMS-9504974 and DMS-9803599. +// +// Notice: +// +// Copyright (c) 1998-2003 by the University of Florida. +// All Rights Reserved. +// +// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY +// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. +// +// Permission is hereby granted to use, copy, modify, and/or distribute +// this program, provided that the Copyright, this License, and the +// Availability of the original version is retained on all copies and made +// accessible to the end-user of any code or package that includes COLAMD +// or any modified version of COLAMD. +// +// Availability: +// +// The colamd/symamd library is available at +// +// http://www.suitesparse.com + + +#ifndef EIGEN_COLAMD_H +#define EIGEN_COLAMD_H + +namespace internal { +/* Ensure that debugging is turned off: */ +#ifndef COLAMD_NDEBUG +#define COLAMD_NDEBUG +#endif /* NDEBUG */ +/* ========================================================================== */ +/* === Knob and statistics definitions ====================================== */ +/* ========================================================================== */ + +/* size of the knobs [ ] array. Only knobs [0..1] are currently used. */ +#define COLAMD_KNOBS 20 + +/* number of output statistics. Only stats [0..6] are currently used. */ +#define COLAMD_STATS 20 + +/* knobs [0] and stats [0]: dense row knob and output statistic. */ +#define COLAMD_DENSE_ROW 0 + +/* knobs [1] and stats [1]: dense column knob and output statistic. */ +#define COLAMD_DENSE_COL 1 + +/* stats [2]: memory defragmentation count output statistic */ +#define COLAMD_DEFRAG_COUNT 2 + +/* stats [3]: colamd status: zero OK, > 0 warning or notice, < 0 error */ +#define COLAMD_STATUS 3 + +/* stats [4..6]: error info, or info on jumbled columns */ +#define COLAMD_INFO1 4 +#define COLAMD_INFO2 5 +#define COLAMD_INFO3 6 + +/* error codes returned in stats [3]: */ +#define COLAMD_OK (0) +#define COLAMD_OK_BUT_JUMBLED (1) +#define COLAMD_ERROR_A_not_present (-1) +#define COLAMD_ERROR_p_not_present (-2) +#define COLAMD_ERROR_nrow_negative (-3) +#define COLAMD_ERROR_ncol_negative (-4) +#define COLAMD_ERROR_nnz_negative (-5) +#define COLAMD_ERROR_p0_nonzero (-6) +#define COLAMD_ERROR_A_too_small (-7) +#define COLAMD_ERROR_col_length_negative (-8) +#define COLAMD_ERROR_row_index_out_of_bounds (-9) +#define COLAMD_ERROR_out_of_memory (-10) +#define COLAMD_ERROR_internal_error (-999) + +/* ========================================================================== */ +/* === Definitions ========================================================== */ +/* ========================================================================== */ + +#define ONES_COMPLEMENT(r) (-(r)-1) + +/* -------------------------------------------------------------------------- */ + +#define COLAMD_EMPTY (-1) + +/* Row and column status */ +#define ALIVE (0) +#define DEAD (-1) + +/* Column status */ +#define DEAD_PRINCIPAL (-1) +#define DEAD_NON_PRINCIPAL (-2) + +/* Macros for row and column status update and checking. */ +#define ROW_IS_DEAD(r) ROW_IS_MARKED_DEAD (Row[r].shared2.mark) +#define ROW_IS_MARKED_DEAD(row_mark) (row_mark < ALIVE) +#define ROW_IS_ALIVE(r) (Row [r].shared2.mark >= ALIVE) +#define COL_IS_DEAD(c) (Col [c].start < ALIVE) +#define COL_IS_ALIVE(c) (Col [c].start >= ALIVE) +#define COL_IS_DEAD_PRINCIPAL(c) (Col [c].start == DEAD_PRINCIPAL) +#define KILL_ROW(r) { Row [r].shared2.mark = DEAD ; } +#define KILL_PRINCIPAL_COL(c) { Col [c].start = DEAD_PRINCIPAL ; } +#define KILL_NON_PRINCIPAL_COL(c) { Col [c].start = DEAD_NON_PRINCIPAL ; } + +/* ========================================================================== */ +/* === Colamd reporting mechanism =========================================== */ +/* ========================================================================== */ + +// == Row and Column structures == +template +struct colamd_col +{ + IndexType start ; /* index for A of first row in this column, or DEAD */ + /* if column is dead */ + IndexType length ; /* number of rows in this column */ + union + { + IndexType thickness ; /* number of original columns represented by this */ + /* col, if the column is alive */ + IndexType parent ; /* parent in parent tree super-column structure, if */ + /* the column is dead */ + } shared1 ; + union + { + IndexType score ; /* the score used to maintain heap, if col is alive */ + IndexType order ; /* pivot ordering of this column, if col is dead */ + } shared2 ; + union + { + IndexType headhash ; /* head of a hash bucket, if col is at the head of */ + /* a degree list */ + IndexType hash ; /* hash value, if col is not in a degree list */ + IndexType prev ; /* previous column in degree list, if col is in a */ + /* degree list (but not at the head of a degree list) */ + } shared3 ; + union + { + IndexType degree_next ; /* next column, if col is in a degree list */ + IndexType hash_next ; /* next column, if col is in a hash list */ + } shared4 ; + +}; + +template +struct Colamd_Row +{ + IndexType start ; /* index for A of first col in this row */ + IndexType length ; /* number of principal columns in this row */ + union + { + IndexType degree ; /* number of principal & non-principal columns in row */ + IndexType p ; /* used as a row pointer in init_rows_cols () */ + } shared1 ; + union + { + IndexType mark ; /* for computing set differences and marking dead rows*/ + IndexType first_column ;/* first column in row (used in garbage collection) */ + } shared2 ; + +}; + +/* ========================================================================== */ +/* === Colamd recommended memory size ======================================= */ +/* ========================================================================== */ + +/* + The recommended length Alen of the array A passed to colamd is given by + the COLAMD_RECOMMENDED (nnz, n_row, n_col) macro. It returns -1 if any + argument is negative. 2*nnz space is required for the row and column + indices of the matrix. colamd_c (n_col) + colamd_r (n_row) space is + required for the Col and Row arrays, respectively, which are internal to + colamd. An additional n_col space is the minimal amount of "elbow room", + and nnz/5 more space is recommended for run time efficiency. + + This macro is not needed when using symamd. + + Explicit typecast to IndexType added Sept. 23, 2002, COLAMD version 2.2, to avoid + gcc -pedantic warning messages. +*/ +template +inline IndexType colamd_c(IndexType n_col) +{ return IndexType( ((n_col) + 1) * sizeof (colamd_col) / sizeof (IndexType) ) ; } + +template +inline IndexType colamd_r(IndexType n_row) +{ return IndexType(((n_row) + 1) * sizeof (Colamd_Row) / sizeof (IndexType)); } + +// Prototypes of non-user callable routines +template +static IndexType init_rows_cols (IndexType n_row, IndexType n_col, Colamd_Row Row [], colamd_col col [], IndexType A [], IndexType p [], IndexType stats[COLAMD_STATS] ); + +template +static void init_scoring (IndexType n_row, IndexType n_col, Colamd_Row Row [], colamd_col Col [], IndexType A [], IndexType head [], double knobs[COLAMD_KNOBS], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg); + +template +static IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, Colamd_Row Row [], colamd_col Col [], IndexType A [], IndexType head [], IndexType n_col2, IndexType max_deg, IndexType pfree); + +template +static void order_children (IndexType n_col, colamd_col Col [], IndexType p []); + +template +static void detect_super_cols (colamd_col Col [], IndexType A [], IndexType head [], IndexType row_start, IndexType row_length ) ; + +template +static IndexType garbage_collection (IndexType n_row, IndexType n_col, Colamd_Row Row [], colamd_col Col [], IndexType A [], IndexType *pfree) ; + +template +static inline IndexType clear_mark (IndexType n_row, Colamd_Row Row [] ) ; + +/* === No debugging ========================================================= */ + +#define COLAMD_DEBUG0(params) ; +#define COLAMD_DEBUG1(params) ; +#define COLAMD_DEBUG2(params) ; +#define COLAMD_DEBUG3(params) ; +#define COLAMD_DEBUG4(params) ; + +#define COLAMD_ASSERT(expression) ((void) 0) + + +/** + * \brief Returns the recommended value of Alen + * + * Returns recommended value of Alen for use by colamd. + * Returns -1 if any input argument is negative. + * The use of this routine or macro is optional. + * Note that the macro uses its arguments more than once, + * so be careful for side effects, if you pass expressions as arguments to COLAMD_RECOMMENDED. + * + * \param nnz nonzeros in A + * \param n_row number of rows in A + * \param n_col number of columns in A + * \return recommended value of Alen for use by colamd + */ +template +inline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType n_col) +{ + if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0) + return (-1); + else + return (2 * (nnz) + colamd_c (n_col) + colamd_r (n_row) + (n_col) + ((nnz) / 5)); +} + +/** + * \brief set default parameters The use of this routine is optional. + * + * Colamd: rows with more than (knobs [COLAMD_DENSE_ROW] * n_col) + * entries are removed prior to ordering. Columns with more than + * (knobs [COLAMD_DENSE_COL] * n_row) entries are removed prior to + * ordering, and placed last in the output column ordering. + * + * COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1, + * respectively, in colamd.h. Default values of these two knobs + * are both 0.5. Currently, only knobs [0] and knobs [1] are + * used, but future versions may use more knobs. If so, they will + * be properly set to their defaults by the future version of + * colamd_set_defaults, so that the code that calls colamd will + * not need to change, assuming that you either use + * colamd_set_defaults, or pass a (double *) NULL pointer as the + * knobs array to colamd or symamd. + * + * \param knobs parameter settings for colamd + */ + +static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS]) +{ + /* === Local variables ================================================== */ + + int i ; + + if (!knobs) + { + return ; /* no knobs to initialize */ + } + for (i = 0 ; i < COLAMD_KNOBS ; i++) + { + knobs [i] = 0 ; + } + knobs [COLAMD_DENSE_ROW] = 0.5 ; /* ignore rows over 50% dense */ + knobs [COLAMD_DENSE_COL] = 0.5 ; /* ignore columns over 50% dense */ +} + +/** + * \brief Computes a column ordering using the column approximate minimum degree ordering + * + * Computes a column ordering (Q) of A such that P(AQ)=LU or + * (AQ)'AQ=LL' have less fill-in and require fewer floating point + * operations than factorizing the unpermuted matrix A or A'A, + * respectively. + * + * + * \param n_row number of rows in A + * \param n_col number of columns in A + * \param Alen, size of the array A + * \param A row indices of the matrix, of size ALen + * \param p column pointers of A, of size n_col+1 + * \param knobs parameter settings for colamd + * \param stats colamd output statistics and error codes + */ +template +static bool colamd(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[COLAMD_KNOBS], IndexType stats[COLAMD_STATS]) +{ + /* === Local variables ================================================== */ + + IndexType i ; /* loop index */ + IndexType nnz ; /* nonzeros in A */ + IndexType Row_size ; /* size of Row [], in integers */ + IndexType Col_size ; /* size of Col [], in integers */ + IndexType need ; /* minimum required length of A */ + Colamd_Row *Row ; /* pointer into A of Row [0..n_row] array */ + colamd_col *Col ; /* pointer into A of Col [0..n_col] array */ + IndexType n_col2 ; /* number of non-dense, non-empty columns */ + IndexType n_row2 ; /* number of non-dense, non-empty rows */ + IndexType ngarbage ; /* number of garbage collections performed */ + IndexType max_deg ; /* maximum row degree */ + double default_knobs [COLAMD_KNOBS] ; /* default knobs array */ + + + /* === Check the input arguments ======================================== */ + + if (!stats) + { + COLAMD_DEBUG0 (("colamd: stats not present\n")) ; + return (false) ; + } + for (i = 0 ; i < COLAMD_STATS ; i++) + { + stats [i] = 0 ; + } + stats [COLAMD_STATUS] = COLAMD_OK ; + stats [COLAMD_INFO1] = -1 ; + stats [COLAMD_INFO2] = -1 ; + + if (!A) /* A is not present */ + { + stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ; + COLAMD_DEBUG0 (("colamd: A not present\n")) ; + return (false) ; + } + + if (!p) /* p is not present */ + { + stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ; + COLAMD_DEBUG0 (("colamd: p not present\n")) ; + return (false) ; + } + + if (n_row < 0) /* n_row must be >= 0 */ + { + stats [COLAMD_STATUS] = COLAMD_ERROR_nrow_negative ; + stats [COLAMD_INFO1] = n_row ; + COLAMD_DEBUG0 (("colamd: nrow negative %d\n", n_row)) ; + return (false) ; + } + + if (n_col < 0) /* n_col must be >= 0 */ + { + stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ; + stats [COLAMD_INFO1] = n_col ; + COLAMD_DEBUG0 (("colamd: ncol negative %d\n", n_col)) ; + return (false) ; + } + + nnz = p [n_col] ; + if (nnz < 0) /* nnz must be >= 0 */ + { + stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ; + stats [COLAMD_INFO1] = nnz ; + COLAMD_DEBUG0 (("colamd: number of entries negative %d\n", nnz)) ; + return (false) ; + } + + if (p [0] != 0) + { + stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ; + stats [COLAMD_INFO1] = p [0] ; + COLAMD_DEBUG0 (("colamd: p[0] not zero %d\n", p [0])) ; + return (false) ; + } + + /* === If no knobs, set default knobs =================================== */ + + if (!knobs) + { + colamd_set_defaults (default_knobs) ; + knobs = default_knobs ; + } + + /* === Allocate the Row and Col arrays from array A ===================== */ + + Col_size = colamd_c (n_col) ; + Row_size = colamd_r (n_row) ; + need = 2*nnz + n_col + Col_size + Row_size ; + + if (need > Alen) + { + /* not enough space in array A to perform the ordering */ + stats [COLAMD_STATUS] = COLAMD_ERROR_A_too_small ; + stats [COLAMD_INFO1] = need ; + stats [COLAMD_INFO2] = Alen ; + COLAMD_DEBUG0 (("colamd: Need Alen >= %d, given only Alen = %d\n", need,Alen)); + return (false) ; + } + + Alen -= Col_size + Row_size ; + Col = (colamd_col *) &A [Alen] ; + Row = (Colamd_Row *) &A [Alen + Col_size] ; + + /* === Construct the row and column data structures ===================== */ + + if (!Eigen::internal::init_rows_cols (n_row, n_col, Row, Col, A, p, stats)) + { + /* input matrix is invalid */ + COLAMD_DEBUG0 (("colamd: Matrix invalid\n")) ; + return (false) ; + } + + /* === Initialize scores, kill dense rows/columns ======================= */ + + Eigen::internal::init_scoring (n_row, n_col, Row, Col, A, p, knobs, + &n_row2, &n_col2, &max_deg) ; + + /* === Order the supercolumns =========================================== */ + + ngarbage = Eigen::internal::find_ordering (n_row, n_col, Alen, Row, Col, A, p, + n_col2, max_deg, 2*nnz) ; + + /* === Order the non-principal columns ================================== */ + + Eigen::internal::order_children (n_col, Col, p) ; + + /* === Return statistics in stats ======================================= */ + + stats [COLAMD_DENSE_ROW] = n_row - n_row2 ; + stats [COLAMD_DENSE_COL] = n_col - n_col2 ; + stats [COLAMD_DEFRAG_COUNT] = ngarbage ; + COLAMD_DEBUG0 (("colamd: done.\n")) ; + return (true) ; +} + +/* ========================================================================== */ +/* === NON-USER-CALLABLE ROUTINES: ========================================== */ +/* ========================================================================== */ + +/* There are no user-callable routines beyond this point in the file */ + + +/* ========================================================================== */ +/* === init_rows_cols ======================================================= */ +/* ========================================================================== */ + +/* + Takes the column form of the matrix in A and creates the row form of the + matrix. Also, row and column attributes are stored in the Col and Row + structs. If the columns are un-sorted or contain duplicate row indices, + this routine will also sort and remove duplicate row indices from the + column form of the matrix. Returns false if the matrix is invalid, + true otherwise. Not user-callable. +*/ +template +static IndexType init_rows_cols /* returns true if OK, or false otherwise */ + ( + /* === Parameters ======================================================= */ + + IndexType n_row, /* number of rows of A */ + IndexType n_col, /* number of columns of A */ + Colamd_Row Row [], /* of size n_row+1 */ + colamd_col Col [], /* of size n_col+1 */ + IndexType A [], /* row indices of A, of size Alen */ + IndexType p [], /* pointers to columns in A, of size n_col+1 */ + IndexType stats [COLAMD_STATS] /* colamd statistics */ + ) +{ + /* === Local variables ================================================== */ + + IndexType col ; /* a column index */ + IndexType row ; /* a row index */ + IndexType *cp ; /* a column pointer */ + IndexType *cp_end ; /* a pointer to the end of a column */ + IndexType *rp ; /* a row pointer */ + IndexType *rp_end ; /* a pointer to the end of a row */ + IndexType last_row ; /* previous row */ + + /* === Initialize columns, and check column pointers ==================== */ + + for (col = 0 ; col < n_col ; col++) + { + Col [col].start = p [col] ; + Col [col].length = p [col+1] - p [col] ; + + if ((Col [col].length) < 0) // extra parentheses to work-around gcc bug 10200 + { + /* column pointers must be non-decreasing */ + stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ; + stats [COLAMD_INFO1] = col ; + stats [COLAMD_INFO2] = Col [col].length ; + COLAMD_DEBUG0 (("colamd: col %d length %d < 0\n", col, Col [col].length)) ; + return (false) ; + } + + Col [col].shared1.thickness = 1 ; + Col [col].shared2.score = 0 ; + Col [col].shared3.prev = COLAMD_EMPTY ; + Col [col].shared4.degree_next = COLAMD_EMPTY ; + } + + /* p [0..n_col] no longer needed, used as "head" in subsequent routines */ + + /* === Scan columns, compute row degrees, and check row indices ========= */ + + stats [COLAMD_INFO3] = 0 ; /* number of duplicate or unsorted row indices*/ + + for (row = 0 ; row < n_row ; row++) + { + Row [row].length = 0 ; + Row [row].shared2.mark = -1 ; + } + + for (col = 0 ; col < n_col ; col++) + { + last_row = -1 ; + + cp = &A [p [col]] ; + cp_end = &A [p [col+1]] ; + + while (cp < cp_end) + { + row = *cp++ ; + + /* make sure row indices within range */ + if (row < 0 || row >= n_row) + { + stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ; + stats [COLAMD_INFO1] = col ; + stats [COLAMD_INFO2] = row ; + stats [COLAMD_INFO3] = n_row ; + COLAMD_DEBUG0 (("colamd: row %d col %d out of bounds\n", row, col)) ; + return (false) ; + } + + if (row <= last_row || Row [row].shared2.mark == col) + { + /* row index are unsorted or repeated (or both), thus col */ + /* is jumbled. This is a notice, not an error condition. */ + stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ; + stats [COLAMD_INFO1] = col ; + stats [COLAMD_INFO2] = row ; + (stats [COLAMD_INFO3]) ++ ; + COLAMD_DEBUG1 (("colamd: row %d col %d unsorted/duplicate\n",row,col)); + } + + if (Row [row].shared2.mark != col) + { + Row [row].length++ ; + } + else + { + /* this is a repeated entry in the column, */ + /* it will be removed */ + Col [col].length-- ; + } + + /* mark the row as having been seen in this column */ + Row [row].shared2.mark = col ; + + last_row = row ; + } + } + + /* === Compute row pointers ============================================= */ + + /* row form of the matrix starts directly after the column */ + /* form of matrix in A */ + Row [0].start = p [n_col] ; + Row [0].shared1.p = Row [0].start ; + Row [0].shared2.mark = -1 ; + for (row = 1 ; row < n_row ; row++) + { + Row [row].start = Row [row-1].start + Row [row-1].length ; + Row [row].shared1.p = Row [row].start ; + Row [row].shared2.mark = -1 ; + } + + /* === Create row form ================================================== */ + + if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED) + { + /* if cols jumbled, watch for repeated row indices */ + for (col = 0 ; col < n_col ; col++) + { + cp = &A [p [col]] ; + cp_end = &A [p [col+1]] ; + while (cp < cp_end) + { + row = *cp++ ; + if (Row [row].shared2.mark != col) + { + A [(Row [row].shared1.p)++] = col ; + Row [row].shared2.mark = col ; + } + } + } + } + else + { + /* if cols not jumbled, we don't need the mark (this is faster) */ + for (col = 0 ; col < n_col ; col++) + { + cp = &A [p [col]] ; + cp_end = &A [p [col+1]] ; + while (cp < cp_end) + { + A [(Row [*cp++].shared1.p)++] = col ; + } + } + } + + /* === Clear the row marks and set row degrees ========================== */ + + for (row = 0 ; row < n_row ; row++) + { + Row [row].shared2.mark = 0 ; + Row [row].shared1.degree = Row [row].length ; + } + + /* === See if we need to re-create columns ============================== */ + + if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED) + { + COLAMD_DEBUG0 (("colamd: reconstructing column form, matrix jumbled\n")) ; + + + /* === Compute col pointers ========================================= */ + + /* col form of the matrix starts at A [0]. */ + /* Note, we may have a gap between the col form and the row */ + /* form if there were duplicate entries, if so, it will be */ + /* removed upon the first garbage collection */ + Col [0].start = 0 ; + p [0] = Col [0].start ; + for (col = 1 ; col < n_col ; col++) + { + /* note that the lengths here are for pruned columns, i.e. */ + /* no duplicate row indices will exist for these columns */ + Col [col].start = Col [col-1].start + Col [col-1].length ; + p [col] = Col [col].start ; + } + + /* === Re-create col form =========================================== */ + + for (row = 0 ; row < n_row ; row++) + { + rp = &A [Row [row].start] ; + rp_end = rp + Row [row].length ; + while (rp < rp_end) + { + A [(p [*rp++])++] = row ; + } + } + } + + /* === Done. Matrix is not (or no longer) jumbled ====================== */ + + return (true) ; +} + + +/* ========================================================================== */ +/* === init_scoring ========================================================= */ +/* ========================================================================== */ + +/* + Kills dense or empty columns and rows, calculates an initial score for + each column, and places all columns in the degree lists. Not user-callable. +*/ +template +static void init_scoring + ( + /* === Parameters ======================================================= */ + + IndexType n_row, /* number of rows of A */ + IndexType n_col, /* number of columns of A */ + Colamd_Row Row [], /* of size n_row+1 */ + colamd_col Col [], /* of size n_col+1 */ + IndexType A [], /* column form and row form of A */ + IndexType head [], /* of size n_col+1 */ + double knobs [COLAMD_KNOBS],/* parameters */ + IndexType *p_n_row2, /* number of non-dense, non-empty rows */ + IndexType *p_n_col2, /* number of non-dense, non-empty columns */ + IndexType *p_max_deg /* maximum row degree */ + ) +{ + /* === Local variables ================================================== */ + + IndexType c ; /* a column index */ + IndexType r, row ; /* a row index */ + IndexType *cp ; /* a column pointer */ + IndexType deg ; /* degree of a row or column */ + IndexType *cp_end ; /* a pointer to the end of a column */ + IndexType *new_cp ; /* new column pointer */ + IndexType col_length ; /* length of pruned column */ + IndexType score ; /* current column score */ + IndexType n_col2 ; /* number of non-dense, non-empty columns */ + IndexType n_row2 ; /* number of non-dense, non-empty rows */ + IndexType dense_row_count ; /* remove rows with more entries than this */ + IndexType dense_col_count ; /* remove cols with more entries than this */ + IndexType min_score ; /* smallest column score */ + IndexType max_deg ; /* maximum row degree */ + IndexType next_col ; /* Used to add to degree list.*/ + + + /* === Extract knobs ==================================================== */ + + dense_row_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [COLAMD_DENSE_ROW] * n_col), n_col)) ; + dense_col_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [COLAMD_DENSE_COL] * n_row), n_row)) ; + COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ; + max_deg = 0 ; + n_col2 = n_col ; + n_row2 = n_row ; + + /* === Kill empty columns =============================================== */ + + /* Put the empty columns at the end in their natural order, so that LU */ + /* factorization can proceed as far as possible. */ + for (c = n_col-1 ; c >= 0 ; c--) + { + deg = Col [c].length ; + if (deg == 0) + { + /* this is a empty column, kill and order it last */ + Col [c].shared2.order = --n_col2 ; + KILL_PRINCIPAL_COL (c) ; + } + } + COLAMD_DEBUG1 (("colamd: null columns killed: %d\n", n_col - n_col2)) ; + + /* === Kill dense columns =============================================== */ + + /* Put the dense columns at the end, in their natural order */ + for (c = n_col-1 ; c >= 0 ; c--) + { + /* skip any dead columns */ + if (COL_IS_DEAD (c)) + { + continue ; + } + deg = Col [c].length ; + if (deg > dense_col_count) + { + /* this is a dense column, kill and order it last */ + Col [c].shared2.order = --n_col2 ; + /* decrement the row degrees */ + cp = &A [Col [c].start] ; + cp_end = cp + Col [c].length ; + while (cp < cp_end) + { + Row [*cp++].shared1.degree-- ; + } + KILL_PRINCIPAL_COL (c) ; + } + } + COLAMD_DEBUG1 (("colamd: Dense and null columns killed: %d\n", n_col - n_col2)) ; + + /* === Kill dense and empty rows ======================================== */ + + for (r = 0 ; r < n_row ; r++) + { + deg = Row [r].shared1.degree ; + COLAMD_ASSERT (deg >= 0 && deg <= n_col) ; + if (deg > dense_row_count || deg == 0) + { + /* kill a dense or empty row */ + KILL_ROW (r) ; + --n_row2 ; + } + else + { + /* keep track of max degree of remaining rows */ + max_deg = numext::maxi(max_deg, deg) ; + } + } + COLAMD_DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ; + + /* === Compute initial column scores ==================================== */ + + /* At this point the row degrees are accurate. They reflect the number */ + /* of "live" (non-dense) columns in each row. No empty rows exist. */ + /* Some "live" columns may contain only dead rows, however. These are */ + /* pruned in the code below. */ + + /* now find the initial matlab score for each column */ + for (c = n_col-1 ; c >= 0 ; c--) + { + /* skip dead column */ + if (COL_IS_DEAD (c)) + { + continue ; + } + score = 0 ; + cp = &A [Col [c].start] ; + new_cp = cp ; + cp_end = cp + Col [c].length ; + while (cp < cp_end) + { + /* get a row */ + row = *cp++ ; + /* skip if dead */ + if (ROW_IS_DEAD (row)) + { + continue ; + } + /* compact the column */ + *new_cp++ = row ; + /* add row's external degree */ + score += Row [row].shared1.degree - 1 ; + /* guard against integer overflow */ + score = numext::mini(score, n_col) ; + } + /* determine pruned column length */ + col_length = (IndexType) (new_cp - &A [Col [c].start]) ; + if (col_length == 0) + { + /* a newly-made null column (all rows in this col are "dense" */ + /* and have already been killed) */ + COLAMD_DEBUG2 (("Newly null killed: %d\n", c)) ; + Col [c].shared2.order = --n_col2 ; + KILL_PRINCIPAL_COL (c) ; + } + else + { + /* set column length and set score */ + COLAMD_ASSERT (score >= 0) ; + COLAMD_ASSERT (score <= n_col) ; + Col [c].length = col_length ; + Col [c].shared2.score = score ; + } + } + COLAMD_DEBUG1 (("colamd: Dense, null, and newly-null columns killed: %d\n", + n_col-n_col2)) ; + + /* At this point, all empty rows and columns are dead. All live columns */ + /* are "clean" (containing no dead rows) and simplicial (no supercolumns */ + /* yet). Rows may contain dead columns, but all live rows contain at */ + /* least one live column. */ + + /* === Initialize degree lists ========================================== */ + + + /* clear the hash buckets */ + for (c = 0 ; c <= n_col ; c++) + { + head [c] = COLAMD_EMPTY ; + } + min_score = n_col ; + /* place in reverse order, so low column indices are at the front */ + /* of the lists. This is to encourage natural tie-breaking */ + for (c = n_col-1 ; c >= 0 ; c--) + { + /* only add principal columns to degree lists */ + if (COL_IS_ALIVE (c)) + { + COLAMD_DEBUG4 (("place %d score %d minscore %d ncol %d\n", + c, Col [c].shared2.score, min_score, n_col)) ; + + /* === Add columns score to DList =============================== */ + + score = Col [c].shared2.score ; + + COLAMD_ASSERT (min_score >= 0) ; + COLAMD_ASSERT (min_score <= n_col) ; + COLAMD_ASSERT (score >= 0) ; + COLAMD_ASSERT (score <= n_col) ; + COLAMD_ASSERT (head [score] >= COLAMD_EMPTY) ; + + /* now add this column to dList at proper score location */ + next_col = head [score] ; + Col [c].shared3.prev = COLAMD_EMPTY ; + Col [c].shared4.degree_next = next_col ; + + /* if there already was a column with the same score, set its */ + /* previous pointer to this new column */ + if (next_col != COLAMD_EMPTY) + { + Col [next_col].shared3.prev = c ; + } + head [score] = c ; + + /* see if this score is less than current min */ + min_score = numext::mini(min_score, score) ; + + + } + } + + + /* === Return number of remaining columns, and max row degree =========== */ + + *p_n_col2 = n_col2 ; + *p_n_row2 = n_row2 ; + *p_max_deg = max_deg ; +} + + +/* ========================================================================== */ +/* === find_ordering ======================================================== */ +/* ========================================================================== */ + +/* + Order the principal columns of the supercolumn form of the matrix + (no supercolumns on input). Uses a minimum approximate column minimum + degree ordering method. Not user-callable. +*/ +template +static IndexType find_ordering /* return the number of garbage collections */ + ( + /* === Parameters ======================================================= */ + + IndexType n_row, /* number of rows of A */ + IndexType n_col, /* number of columns of A */ + IndexType Alen, /* size of A, 2*nnz + n_col or larger */ + Colamd_Row Row [], /* of size n_row+1 */ + colamd_col Col [], /* of size n_col+1 */ + IndexType A [], /* column form and row form of A */ + IndexType head [], /* of size n_col+1 */ + IndexType n_col2, /* Remaining columns to order */ + IndexType max_deg, /* Maximum row degree */ + IndexType pfree /* index of first free slot (2*nnz on entry) */ + ) +{ + /* === Local variables ================================================== */ + + IndexType k ; /* current pivot ordering step */ + IndexType pivot_col ; /* current pivot column */ + IndexType *cp ; /* a column pointer */ + IndexType *rp ; /* a row pointer */ + IndexType pivot_row ; /* current pivot row */ + IndexType *new_cp ; /* modified column pointer */ + IndexType *new_rp ; /* modified row pointer */ + IndexType pivot_row_start ; /* pointer to start of pivot row */ + IndexType pivot_row_degree ; /* number of columns in pivot row */ + IndexType pivot_row_length ; /* number of supercolumns in pivot row */ + IndexType pivot_col_score ; /* score of pivot column */ + IndexType needed_memory ; /* free space needed for pivot row */ + IndexType *cp_end ; /* pointer to the end of a column */ + IndexType *rp_end ; /* pointer to the end of a row */ + IndexType row ; /* a row index */ + IndexType col ; /* a column index */ + IndexType max_score ; /* maximum possible score */ + IndexType cur_score ; /* score of current column */ + unsigned int hash ; /* hash value for supernode detection */ + IndexType head_column ; /* head of hash bucket */ + IndexType first_col ; /* first column in hash bucket */ + IndexType tag_mark ; /* marker value for mark array */ + IndexType row_mark ; /* Row [row].shared2.mark */ + IndexType set_difference ; /* set difference size of row with pivot row */ + IndexType min_score ; /* smallest column score */ + IndexType col_thickness ; /* "thickness" (no. of columns in a supercol) */ + IndexType max_mark ; /* maximum value of tag_mark */ + IndexType pivot_col_thickness ; /* number of columns represented by pivot col */ + IndexType prev_col ; /* Used by Dlist operations. */ + IndexType next_col ; /* Used by Dlist operations. */ + IndexType ngarbage ; /* number of garbage collections performed */ + + + /* === Initialization and clear mark ==================================== */ + + max_mark = INT_MAX - n_col ; /* INT_MAX defined in */ + tag_mark = Eigen::internal::clear_mark (n_row, Row) ; + min_score = 0 ; + ngarbage = 0 ; + COLAMD_DEBUG1 (("colamd: Ordering, n_col2=%d\n", n_col2)) ; + + /* === Order the columns ================================================ */ + + for (k = 0 ; k < n_col2 ; /* 'k' is incremented below */) + { + + /* === Select pivot column, and order it ============================ */ + + /* make sure degree list isn't empty */ + COLAMD_ASSERT (min_score >= 0) ; + COLAMD_ASSERT (min_score <= n_col) ; + COLAMD_ASSERT (head [min_score] >= COLAMD_EMPTY) ; + + /* get pivot column from head of minimum degree list */ + while (min_score < n_col && head [min_score] == COLAMD_EMPTY) + { + min_score++ ; + } + pivot_col = head [min_score] ; + COLAMD_ASSERT (pivot_col >= 0 && pivot_col <= n_col) ; + next_col = Col [pivot_col].shared4.degree_next ; + head [min_score] = next_col ; + if (next_col != COLAMD_EMPTY) + { + Col [next_col].shared3.prev = COLAMD_EMPTY ; + } + + COLAMD_ASSERT (COL_IS_ALIVE (pivot_col)) ; + COLAMD_DEBUG3 (("Pivot col: %d\n", pivot_col)) ; + + /* remember score for defrag check */ + pivot_col_score = Col [pivot_col].shared2.score ; + + /* the pivot column is the kth column in the pivot order */ + Col [pivot_col].shared2.order = k ; + + /* increment order count by column thickness */ + pivot_col_thickness = Col [pivot_col].shared1.thickness ; + k += pivot_col_thickness ; + COLAMD_ASSERT (pivot_col_thickness > 0) ; + + /* === Garbage_collection, if necessary ============================= */ + + needed_memory = numext::mini(pivot_col_score, n_col - k) ; + if (pfree + needed_memory >= Alen) + { + pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ; + ngarbage++ ; + /* after garbage collection we will have enough */ + COLAMD_ASSERT (pfree + needed_memory < Alen) ; + /* garbage collection has wiped out the Row[].shared2.mark array */ + tag_mark = Eigen::internal::clear_mark (n_row, Row) ; + + } + + /* === Compute pivot row pattern ==================================== */ + + /* get starting location for this new merged row */ + pivot_row_start = pfree ; + + /* initialize new row counts to zero */ + pivot_row_degree = 0 ; + + /* tag pivot column as having been visited so it isn't included */ + /* in merged pivot row */ + Col [pivot_col].shared1.thickness = -pivot_col_thickness ; + + /* pivot row is the union of all rows in the pivot column pattern */ + cp = &A [Col [pivot_col].start] ; + cp_end = cp + Col [pivot_col].length ; + while (cp < cp_end) + { + /* get a row */ + row = *cp++ ; + COLAMD_DEBUG4 (("Pivot col pattern %d %d\n", ROW_IS_ALIVE (row), row)) ; + /* skip if row is dead */ + if (ROW_IS_DEAD (row)) + { + continue ; + } + rp = &A [Row [row].start] ; + rp_end = rp + Row [row].length ; + while (rp < rp_end) + { + /* get a column */ + col = *rp++ ; + /* add the column, if alive and untagged */ + col_thickness = Col [col].shared1.thickness ; + if (col_thickness > 0 && COL_IS_ALIVE (col)) + { + /* tag column in pivot row */ + Col [col].shared1.thickness = -col_thickness ; + COLAMD_ASSERT (pfree < Alen) ; + /* place column in pivot row */ + A [pfree++] = col ; + pivot_row_degree += col_thickness ; + } + } + } + + /* clear tag on pivot column */ + Col [pivot_col].shared1.thickness = pivot_col_thickness ; + max_deg = numext::maxi(max_deg, pivot_row_degree) ; + + + /* === Kill all rows used to construct pivot row ==================== */ + + /* also kill pivot row, temporarily */ + cp = &A [Col [pivot_col].start] ; + cp_end = cp + Col [pivot_col].length ; + while (cp < cp_end) + { + /* may be killing an already dead row */ + row = *cp++ ; + COLAMD_DEBUG3 (("Kill row in pivot col: %d\n", row)) ; + KILL_ROW (row) ; + } + + /* === Select a row index to use as the new pivot row =============== */ + + pivot_row_length = pfree - pivot_row_start ; + if (pivot_row_length > 0) + { + /* pick the "pivot" row arbitrarily (first row in col) */ + pivot_row = A [Col [pivot_col].start] ; + COLAMD_DEBUG3 (("Pivotal row is %d\n", pivot_row)) ; + } + else + { + /* there is no pivot row, since it is of zero length */ + pivot_row = COLAMD_EMPTY ; + COLAMD_ASSERT (pivot_row_length == 0) ; + } + COLAMD_ASSERT (Col [pivot_col].length > 0 || pivot_row_length == 0) ; + + /* === Approximate degree computation =============================== */ + + /* Here begins the computation of the approximate degree. The column */ + /* score is the sum of the pivot row "length", plus the size of the */ + /* set differences of each row in the column minus the pattern of the */ + /* pivot row itself. The column ("thickness") itself is also */ + /* excluded from the column score (we thus use an approximate */ + /* external degree). */ + + /* The time taken by the following code (compute set differences, and */ + /* add them up) is proportional to the size of the data structure */ + /* being scanned - that is, the sum of the sizes of each column in */ + /* the pivot row. Thus, the amortized time to compute a column score */ + /* is proportional to the size of that column (where size, in this */ + /* context, is the column "length", or the number of row indices */ + /* in that column). The number of row indices in a column is */ + /* monotonically non-decreasing, from the length of the original */ + /* column on input to colamd. */ + + /* === Compute set differences ====================================== */ + + COLAMD_DEBUG3 (("** Computing set differences phase. **\n")) ; + + /* pivot row is currently dead - it will be revived later. */ + + COLAMD_DEBUG3 (("Pivot row: ")) ; + /* for each column in pivot row */ + rp = &A [pivot_row_start] ; + rp_end = rp + pivot_row_length ; + while (rp < rp_end) + { + col = *rp++ ; + COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ; + COLAMD_DEBUG3 (("Col: %d\n", col)) ; + + /* clear tags used to construct pivot row pattern */ + col_thickness = -Col [col].shared1.thickness ; + COLAMD_ASSERT (col_thickness > 0) ; + Col [col].shared1.thickness = col_thickness ; + + /* === Remove column from degree list =========================== */ + + cur_score = Col [col].shared2.score ; + prev_col = Col [col].shared3.prev ; + next_col = Col [col].shared4.degree_next ; + COLAMD_ASSERT (cur_score >= 0) ; + COLAMD_ASSERT (cur_score <= n_col) ; + COLAMD_ASSERT (cur_score >= COLAMD_EMPTY) ; + if (prev_col == COLAMD_EMPTY) + { + head [cur_score] = next_col ; + } + else + { + Col [prev_col].shared4.degree_next = next_col ; + } + if (next_col != COLAMD_EMPTY) + { + Col [next_col].shared3.prev = prev_col ; + } + + /* === Scan the column ========================================== */ + + cp = &A [Col [col].start] ; + cp_end = cp + Col [col].length ; + while (cp < cp_end) + { + /* get a row */ + row = *cp++ ; + row_mark = Row [row].shared2.mark ; + /* skip if dead */ + if (ROW_IS_MARKED_DEAD (row_mark)) + { + continue ; + } + COLAMD_ASSERT (row != pivot_row) ; + set_difference = row_mark - tag_mark ; + /* check if the row has been seen yet */ + if (set_difference < 0) + { + COLAMD_ASSERT (Row [row].shared1.degree <= max_deg) ; + set_difference = Row [row].shared1.degree ; + } + /* subtract column thickness from this row's set difference */ + set_difference -= col_thickness ; + COLAMD_ASSERT (set_difference >= 0) ; + /* absorb this row if the set difference becomes zero */ + if (set_difference == 0) + { + COLAMD_DEBUG3 (("aggressive absorption. Row: %d\n", row)) ; + KILL_ROW (row) ; + } + else + { + /* save the new mark */ + Row [row].shared2.mark = set_difference + tag_mark ; + } + } + } + + + /* === Add up set differences for each column ======================= */ + + COLAMD_DEBUG3 (("** Adding set differences phase. **\n")) ; + + /* for each column in pivot row */ + rp = &A [pivot_row_start] ; + rp_end = rp + pivot_row_length ; + while (rp < rp_end) + { + /* get a column */ + col = *rp++ ; + COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ; + hash = 0 ; + cur_score = 0 ; + cp = &A [Col [col].start] ; + /* compact the column */ + new_cp = cp ; + cp_end = cp + Col [col].length ; + + COLAMD_DEBUG4 (("Adding set diffs for Col: %d.\n", col)) ; + + while (cp < cp_end) + { + /* get a row */ + row = *cp++ ; + COLAMD_ASSERT(row >= 0 && row < n_row) ; + row_mark = Row [row].shared2.mark ; + /* skip if dead */ + if (ROW_IS_MARKED_DEAD (row_mark)) + { + continue ; + } + COLAMD_ASSERT (row_mark > tag_mark) ; + /* compact the column */ + *new_cp++ = row ; + /* compute hash function */ + hash += row ; + /* add set difference */ + cur_score += row_mark - tag_mark ; + /* integer overflow... */ + cur_score = numext::mini(cur_score, n_col) ; + } + + /* recompute the column's length */ + Col [col].length = (IndexType) (new_cp - &A [Col [col].start]) ; + + /* === Further mass elimination ================================= */ + + if (Col [col].length == 0) + { + COLAMD_DEBUG4 (("further mass elimination. Col: %d\n", col)) ; + /* nothing left but the pivot row in this column */ + KILL_PRINCIPAL_COL (col) ; + pivot_row_degree -= Col [col].shared1.thickness ; + COLAMD_ASSERT (pivot_row_degree >= 0) ; + /* order it */ + Col [col].shared2.order = k ; + /* increment order count by column thickness */ + k += Col [col].shared1.thickness ; + } + else + { + /* === Prepare for supercolumn detection ==================== */ + + COLAMD_DEBUG4 (("Preparing supercol detection for Col: %d.\n", col)) ; + + /* save score so far */ + Col [col].shared2.score = cur_score ; + + /* add column to hash table, for supercolumn detection */ + hash %= n_col + 1 ; + + COLAMD_DEBUG4 ((" Hash = %d, n_col = %d.\n", hash, n_col)) ; + COLAMD_ASSERT (hash <= n_col) ; + + head_column = head [hash] ; + if (head_column > COLAMD_EMPTY) + { + /* degree list "hash" is non-empty, use prev (shared3) of */ + /* first column in degree list as head of hash bucket */ + first_col = Col [head_column].shared3.headhash ; + Col [head_column].shared3.headhash = col ; + } + else + { + /* degree list "hash" is empty, use head as hash bucket */ + first_col = - (head_column + 2) ; + head [hash] = - (col + 2) ; + } + Col [col].shared4.hash_next = first_col ; + + /* save hash function in Col [col].shared3.hash */ + Col [col].shared3.hash = (IndexType) hash ; + COLAMD_ASSERT (COL_IS_ALIVE (col)) ; + } + } + + /* The approximate external column degree is now computed. */ + + /* === Supercolumn detection ======================================== */ + + COLAMD_DEBUG3 (("** Supercolumn detection phase. **\n")) ; + + Eigen::internal::detect_super_cols (Col, A, head, pivot_row_start, pivot_row_length) ; + + /* === Kill the pivotal column ====================================== */ + + KILL_PRINCIPAL_COL (pivot_col) ; + + /* === Clear mark =================================================== */ + + tag_mark += (max_deg + 1) ; + if (tag_mark >= max_mark) + { + COLAMD_DEBUG2 (("clearing tag_mark\n")) ; + tag_mark = Eigen::internal::clear_mark (n_row, Row) ; + } + + /* === Finalize the new pivot row, and column scores ================ */ + + COLAMD_DEBUG3 (("** Finalize scores phase. **\n")) ; + + /* for each column in pivot row */ + rp = &A [pivot_row_start] ; + /* compact the pivot row */ + new_rp = rp ; + rp_end = rp + pivot_row_length ; + while (rp < rp_end) + { + col = *rp++ ; + /* skip dead columns */ + if (COL_IS_DEAD (col)) + { + continue ; + } + *new_rp++ = col ; + /* add new pivot row to column */ + A [Col [col].start + (Col [col].length++)] = pivot_row ; + + /* retrieve score so far and add on pivot row's degree. */ + /* (we wait until here for this in case the pivot */ + /* row's degree was reduced due to mass elimination). */ + cur_score = Col [col].shared2.score + pivot_row_degree ; + + /* calculate the max possible score as the number of */ + /* external columns minus the 'k' value minus the */ + /* columns thickness */ + max_score = n_col - k - Col [col].shared1.thickness ; + + /* make the score the external degree of the union-of-rows */ + cur_score -= Col [col].shared1.thickness ; + + /* make sure score is less or equal than the max score */ + cur_score = numext::mini(cur_score, max_score) ; + COLAMD_ASSERT (cur_score >= 0) ; + + /* store updated score */ + Col [col].shared2.score = cur_score ; + + /* === Place column back in degree list ========================= */ + + COLAMD_ASSERT (min_score >= 0) ; + COLAMD_ASSERT (min_score <= n_col) ; + COLAMD_ASSERT (cur_score >= 0) ; + COLAMD_ASSERT (cur_score <= n_col) ; + COLAMD_ASSERT (head [cur_score] >= COLAMD_EMPTY) ; + next_col = head [cur_score] ; + Col [col].shared4.degree_next = next_col ; + Col [col].shared3.prev = COLAMD_EMPTY ; + if (next_col != COLAMD_EMPTY) + { + Col [next_col].shared3.prev = col ; + } + head [cur_score] = col ; + + /* see if this score is less than current min */ + min_score = numext::mini(min_score, cur_score) ; + + } + + /* === Resurrect the new pivot row ================================== */ + + if (pivot_row_degree > 0) + { + /* update pivot row length to reflect any cols that were killed */ + /* during super-col detection and mass elimination */ + Row [pivot_row].start = pivot_row_start ; + Row [pivot_row].length = (IndexType) (new_rp - &A[pivot_row_start]) ; + Row [pivot_row].shared1.degree = pivot_row_degree ; + Row [pivot_row].shared2.mark = 0 ; + /* pivot row is no longer dead */ + } + } + + /* === All principal columns have now been ordered ====================== */ + + return (ngarbage) ; +} + + +/* ========================================================================== */ +/* === order_children ======================================================= */ +/* ========================================================================== */ + +/* + The find_ordering routine has ordered all of the principal columns (the + representatives of the supercolumns). The non-principal columns have not + yet been ordered. This routine orders those columns by walking up the + parent tree (a column is a child of the column which absorbed it). The + final permutation vector is then placed in p [0 ... n_col-1], with p [0] + being the first column, and p [n_col-1] being the last. It doesn't look + like it at first glance, but be assured that this routine takes time linear + in the number of columns. Although not immediately obvious, the time + taken by this routine is O (n_col), that is, linear in the number of + columns. Not user-callable. +*/ +template +static inline void order_children +( + /* === Parameters ======================================================= */ + + IndexType n_col, /* number of columns of A */ + colamd_col Col [], /* of size n_col+1 */ + IndexType p [] /* p [0 ... n_col-1] is the column permutation*/ + ) +{ + /* === Local variables ================================================== */ + + IndexType i ; /* loop counter for all columns */ + IndexType c ; /* column index */ + IndexType parent ; /* index of column's parent */ + IndexType order ; /* column's order */ + + /* === Order each non-principal column ================================== */ + + for (i = 0 ; i < n_col ; i++) + { + /* find an un-ordered non-principal column */ + COLAMD_ASSERT (COL_IS_DEAD (i)) ; + if (!COL_IS_DEAD_PRINCIPAL (i) && Col [i].shared2.order == COLAMD_EMPTY) + { + parent = i ; + /* once found, find its principal parent */ + do + { + parent = Col [parent].shared1.parent ; + } while (!COL_IS_DEAD_PRINCIPAL (parent)) ; + + /* now, order all un-ordered non-principal columns along path */ + /* to this parent. collapse tree at the same time */ + c = i ; + /* get order of parent */ + order = Col [parent].shared2.order ; + + do + { + COLAMD_ASSERT (Col [c].shared2.order == COLAMD_EMPTY) ; + + /* order this column */ + Col [c].shared2.order = order++ ; + /* collaps tree */ + Col [c].shared1.parent = parent ; + + /* get immediate parent of this column */ + c = Col [c].shared1.parent ; + + /* continue until we hit an ordered column. There are */ + /* guarranteed not to be anymore unordered columns */ + /* above an ordered column */ + } while (Col [c].shared2.order == COLAMD_EMPTY) ; + + /* re-order the super_col parent to largest order for this group */ + Col [parent].shared2.order = order ; + } + } + + /* === Generate the permutation ========================================= */ + + for (c = 0 ; c < n_col ; c++) + { + p [Col [c].shared2.order] = c ; + } +} + + +/* ========================================================================== */ +/* === detect_super_cols ==================================================== */ +/* ========================================================================== */ + +/* + Detects supercolumns by finding matches between columns in the hash buckets. + Check amongst columns in the set A [row_start ... row_start + row_length-1]. + The columns under consideration are currently *not* in the degree lists, + and have already been placed in the hash buckets. + + The hash bucket for columns whose hash function is equal to h is stored + as follows: + + if head [h] is >= 0, then head [h] contains a degree list, so: + + head [h] is the first column in degree bucket h. + Col [head [h]].headhash gives the first column in hash bucket h. + + otherwise, the degree list is empty, and: + + -(head [h] + 2) is the first column in hash bucket h. + + For a column c in a hash bucket, Col [c].shared3.prev is NOT a "previous + column" pointer. Col [c].shared3.hash is used instead as the hash number + for that column. The value of Col [c].shared4.hash_next is the next column + in the same hash bucket. + + Assuming no, or "few" hash collisions, the time taken by this routine is + linear in the sum of the sizes (lengths) of each column whose score has + just been computed in the approximate degree computation. + Not user-callable. +*/ +template +static void detect_super_cols +( + /* === Parameters ======================================================= */ + + colamd_col Col [], /* of size n_col+1 */ + IndexType A [], /* row indices of A */ + IndexType head [], /* head of degree lists and hash buckets */ + IndexType row_start, /* pointer to set of columns to check */ + IndexType row_length /* number of columns to check */ +) +{ + /* === Local variables ================================================== */ + + IndexType hash ; /* hash value for a column */ + IndexType *rp ; /* pointer to a row */ + IndexType c ; /* a column index */ + IndexType super_c ; /* column index of the column to absorb into */ + IndexType *cp1 ; /* column pointer for column super_c */ + IndexType *cp2 ; /* column pointer for column c */ + IndexType length ; /* length of column super_c */ + IndexType prev_c ; /* column preceding c in hash bucket */ + IndexType i ; /* loop counter */ + IndexType *rp_end ; /* pointer to the end of the row */ + IndexType col ; /* a column index in the row to check */ + IndexType head_column ; /* first column in hash bucket or degree list */ + IndexType first_col ; /* first column in hash bucket */ + + /* === Consider each column in the row ================================== */ + + rp = &A [row_start] ; + rp_end = rp + row_length ; + while (rp < rp_end) + { + col = *rp++ ; + if (COL_IS_DEAD (col)) + { + continue ; + } + + /* get hash number for this column */ + hash = Col [col].shared3.hash ; + COLAMD_ASSERT (hash <= n_col) ; + + /* === Get the first column in this hash bucket ===================== */ + + head_column = head [hash] ; + if (head_column > COLAMD_EMPTY) + { + first_col = Col [head_column].shared3.headhash ; + } + else + { + first_col = - (head_column + 2) ; + } + + /* === Consider each column in the hash bucket ====================== */ + + for (super_c = first_col ; super_c != COLAMD_EMPTY ; + super_c = Col [super_c].shared4.hash_next) + { + COLAMD_ASSERT (COL_IS_ALIVE (super_c)) ; + COLAMD_ASSERT (Col [super_c].shared3.hash == hash) ; + length = Col [super_c].length ; + + /* prev_c is the column preceding column c in the hash bucket */ + prev_c = super_c ; + + /* === Compare super_c with all columns after it ================ */ + + for (c = Col [super_c].shared4.hash_next ; + c != COLAMD_EMPTY ; c = Col [c].shared4.hash_next) + { + COLAMD_ASSERT (c != super_c) ; + COLAMD_ASSERT (COL_IS_ALIVE (c)) ; + COLAMD_ASSERT (Col [c].shared3.hash == hash) ; + + /* not identical if lengths or scores are different */ + if (Col [c].length != length || + Col [c].shared2.score != Col [super_c].shared2.score) + { + prev_c = c ; + continue ; + } + + /* compare the two columns */ + cp1 = &A [Col [super_c].start] ; + cp2 = &A [Col [c].start] ; + + for (i = 0 ; i < length ; i++) + { + /* the columns are "clean" (no dead rows) */ + COLAMD_ASSERT (ROW_IS_ALIVE (*cp1)) ; + COLAMD_ASSERT (ROW_IS_ALIVE (*cp2)) ; + /* row indices will same order for both supercols, */ + /* no gather scatter nessasary */ + if (*cp1++ != *cp2++) + { + break ; + } + } + + /* the two columns are different if the for-loop "broke" */ + if (i != length) + { + prev_c = c ; + continue ; + } + + /* === Got it! two columns are identical =================== */ + + COLAMD_ASSERT (Col [c].shared2.score == Col [super_c].shared2.score) ; + + Col [super_c].shared1.thickness += Col [c].shared1.thickness ; + Col [c].shared1.parent = super_c ; + KILL_NON_PRINCIPAL_COL (c) ; + /* order c later, in order_children() */ + Col [c].shared2.order = COLAMD_EMPTY ; + /* remove c from hash bucket */ + Col [prev_c].shared4.hash_next = Col [c].shared4.hash_next ; + } + } + + /* === Empty this hash bucket ======================================= */ + + if (head_column > COLAMD_EMPTY) + { + /* corresponding degree list "hash" is not empty */ + Col [head_column].shared3.headhash = COLAMD_EMPTY ; + } + else + { + /* corresponding degree list "hash" is empty */ + head [hash] = COLAMD_EMPTY ; + } + } +} + + +/* ========================================================================== */ +/* === garbage_collection =================================================== */ +/* ========================================================================== */ + +/* + Defragments and compacts columns and rows in the workspace A. Used when + all avaliable memory has been used while performing row merging. Returns + the index of the first free position in A, after garbage collection. The + time taken by this routine is linear is the size of the array A, which is + itself linear in the number of nonzeros in the input matrix. + Not user-callable. +*/ +template +static IndexType garbage_collection /* returns the new value of pfree */ + ( + /* === Parameters ======================================================= */ + + IndexType n_row, /* number of rows */ + IndexType n_col, /* number of columns */ + Colamd_Row Row [], /* row info */ + colamd_col Col [], /* column info */ + IndexType A [], /* A [0 ... Alen-1] holds the matrix */ + IndexType *pfree /* &A [0] ... pfree is in use */ + ) +{ + /* === Local variables ================================================== */ + + IndexType *psrc ; /* source pointer */ + IndexType *pdest ; /* destination pointer */ + IndexType j ; /* counter */ + IndexType r ; /* a row index */ + IndexType c ; /* a column index */ + IndexType length ; /* length of a row or column */ + + /* === Defragment the columns =========================================== */ + + pdest = &A[0] ; + for (c = 0 ; c < n_col ; c++) + { + if (COL_IS_ALIVE (c)) + { + psrc = &A [Col [c].start] ; + + /* move and compact the column */ + COLAMD_ASSERT (pdest <= psrc) ; + Col [c].start = (IndexType) (pdest - &A [0]) ; + length = Col [c].length ; + for (j = 0 ; j < length ; j++) + { + r = *psrc++ ; + if (ROW_IS_ALIVE (r)) + { + *pdest++ = r ; + } + } + Col [c].length = (IndexType) (pdest - &A [Col [c].start]) ; + } + } + + /* === Prepare to defragment the rows =================================== */ + + for (r = 0 ; r < n_row ; r++) + { + if (ROW_IS_ALIVE (r)) + { + if (Row [r].length == 0) + { + /* this row is of zero length. cannot compact it, so kill it */ + COLAMD_DEBUG3 (("Defrag row kill\n")) ; + KILL_ROW (r) ; + } + else + { + /* save first column index in Row [r].shared2.first_column */ + psrc = &A [Row [r].start] ; + Row [r].shared2.first_column = *psrc ; + COLAMD_ASSERT (ROW_IS_ALIVE (r)) ; + /* flag the start of the row with the one's complement of row */ + *psrc = ONES_COMPLEMENT (r) ; + + } + } + } + + /* === Defragment the rows ============================================== */ + + psrc = pdest ; + while (psrc < pfree) + { + /* find a negative number ... the start of a row */ + if (*psrc++ < 0) + { + psrc-- ; + /* get the row index */ + r = ONES_COMPLEMENT (*psrc) ; + COLAMD_ASSERT (r >= 0 && r < n_row) ; + /* restore first column index */ + *psrc = Row [r].shared2.first_column ; + COLAMD_ASSERT (ROW_IS_ALIVE (r)) ; + + /* move and compact the row */ + COLAMD_ASSERT (pdest <= psrc) ; + Row [r].start = (IndexType) (pdest - &A [0]) ; + length = Row [r].length ; + for (j = 0 ; j < length ; j++) + { + c = *psrc++ ; + if (COL_IS_ALIVE (c)) + { + *pdest++ = c ; + } + } + Row [r].length = (IndexType) (pdest - &A [Row [r].start]) ; + + } + } + /* ensure we found all the rows */ + COLAMD_ASSERT (debug_rows == 0) ; + + /* === Return the new value of pfree ==================================== */ + + return ((IndexType) (pdest - &A [0])) ; +} + + +/* ========================================================================== */ +/* === clear_mark =========================================================== */ +/* ========================================================================== */ + +/* + Clears the Row [].shared2.mark array, and returns the new tag_mark. + Return value is the new tag_mark. Not user-callable. +*/ +template +static inline IndexType clear_mark /* return the new value for tag_mark */ + ( + /* === Parameters ======================================================= */ + + IndexType n_row, /* number of rows in A */ + Colamd_Row Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */ + ) +{ + /* === Local variables ================================================== */ + + IndexType r ; + + for (r = 0 ; r < n_row ; r++) + { + if (ROW_IS_ALIVE (r)) + { + Row [r].shared2.mark = 0 ; + } + } + return (1) ; +} + + +} // namespace internal +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/OrderingMethods/Ordering.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/OrderingMethods/Ordering.h new file mode 100644 index 000000000..7ea9b14d7 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/OrderingMethods/Ordering.h @@ -0,0 +1,157 @@ + +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ORDERING_H +#define EIGEN_ORDERING_H + +namespace Eigen { + +#include "Eigen_Colamd.h" + +namespace internal { + +/** \internal + * \ingroup OrderingMethods_Module + * \param[in] A the input non-symmetric matrix + * \param[out] symmat the symmetric pattern A^T+A from the input matrix \a A. + * FIXME: The values should not be considered here + */ +template +void ordering_helper_at_plus_a(const MatrixType& A, MatrixType& symmat) +{ + MatrixType C; + C = A.transpose(); // NOTE: Could be costly + for (int i = 0; i < C.rows(); i++) + { + for (typename MatrixType::InnerIterator it(C, i); it; ++it) + it.valueRef() = 0.0; + } + symmat = C + A; +} + +} + +#ifndef EIGEN_MPL2_ONLY + +/** \ingroup OrderingMethods_Module + * \class AMDOrdering + * + * Functor computing the \em approximate \em minimum \em degree ordering + * If the matrix is not structurally symmetric, an ordering of A^T+A is computed + * \tparam StorageIndex The type of indices of the matrix + * \sa COLAMDOrdering + */ +template +class AMDOrdering +{ + public: + typedef PermutationMatrix PermutationType; + + /** Compute the permutation vector from a sparse matrix + * This routine is much faster if the input matrix is column-major + */ + template + void operator()(const MatrixType& mat, PermutationType& perm) + { + // Compute the symmetric pattern + SparseMatrix symm; + internal::ordering_helper_at_plus_a(mat,symm); + + // Call the AMD routine + //m_mat.prune(keep_diag()); + internal::minimum_degree_ordering(symm, perm); + } + + /** Compute the permutation with a selfadjoint matrix */ + template + void operator()(const SparseSelfAdjointView& mat, PermutationType& perm) + { + SparseMatrix C; C = mat; + + // Call the AMD routine + // m_mat.prune(keep_diag()); //Remove the diagonal elements + internal::minimum_degree_ordering(C, perm); + } +}; + +#endif // EIGEN_MPL2_ONLY + +/** \ingroup OrderingMethods_Module + * \class NaturalOrdering + * + * Functor computing the natural ordering (identity) + * + * \note Returns an empty permutation matrix + * \tparam StorageIndex The type of indices of the matrix + */ +template +class NaturalOrdering +{ + public: + typedef PermutationMatrix PermutationType; + + /** Compute the permutation vector from a column-major sparse matrix */ + template + void operator()(const MatrixType& /*mat*/, PermutationType& perm) + { + perm.resize(0); + } + +}; + +/** \ingroup OrderingMethods_Module + * \class COLAMDOrdering + * + * \tparam StorageIndex The type of indices of the matrix + * + * Functor computing the \em column \em approximate \em minimum \em degree ordering + * The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()). + */ +template +class COLAMDOrdering +{ + public: + typedef PermutationMatrix PermutationType; + typedef Matrix IndexVector; + + /** Compute the permutation vector \a perm form the sparse matrix \a mat + * \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). + */ + template + void operator() (const MatrixType& mat, PermutationType& perm) + { + eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering"); + + StorageIndex m = StorageIndex(mat.rows()); + StorageIndex n = StorageIndex(mat.cols()); + StorageIndex nnz = StorageIndex(mat.nonZeros()); + // Get the recommended value of Alen to be used by colamd + StorageIndex Alen = internal::colamd_recommended(nnz, m, n); + // Set the default parameters + double knobs [COLAMD_KNOBS]; + StorageIndex stats [COLAMD_STATS]; + internal::colamd_set_defaults(knobs); + + IndexVector p(n+1), A(Alen); + for(StorageIndex i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i]; + for(StorageIndex i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i]; + // Call Colamd routine to compute the ordering + StorageIndex info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); + EIGEN_UNUSED_VARIABLE(info); + eigen_assert( info && "COLAMD failed " ); + + perm.resize(n); + for (StorageIndex i = 0; i < n; i++) perm.indices()(p(i)) = i; + } +}; + +} // end namespace Eigen + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h new file mode 100644 index 000000000..160d8a523 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h @@ -0,0 +1,678 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PASTIXSUPPORT_H +#define EIGEN_PASTIXSUPPORT_H + +namespace Eigen { + +#if defined(DCOMPLEX) + #define PASTIX_COMPLEX COMPLEX + #define PASTIX_DCOMPLEX DCOMPLEX +#else + #define PASTIX_COMPLEX std::complex + #define PASTIX_DCOMPLEX std::complex +#endif + +/** \ingroup PaStiXSupport_Module + * \brief Interface to the PaStix solver + * + * This class is used to solve the linear systems A.X = B via the PaStix library. + * The matrix can be either real or complex, symmetric or not. + * + * \sa TutorialSparseDirectSolvers + */ +template class PastixLU; +template class PastixLLT; +template class PastixLDLT; + +namespace internal +{ + + template struct pastix_traits; + + template + struct pastix_traits< PastixLU<_MatrixType> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::StorageIndex StorageIndex; + }; + + template + struct pastix_traits< PastixLLT<_MatrixType,Options> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::StorageIndex StorageIndex; + }; + + template + struct pastix_traits< PastixLDLT<_MatrixType,Options> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::StorageIndex StorageIndex; + }; + + inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); + } + + inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); + } + + inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); + } + + inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); + } + + // Convert the matrix to Fortran-style Numbering + template + void c_to_fortran_numbering (MatrixType& mat) + { + if ( !(mat.outerIndexPtr()[0]) ) + { + int i; + for(i = 0; i <= mat.rows(); ++i) + ++mat.outerIndexPtr()[i]; + for(i = 0; i < mat.nonZeros(); ++i) + ++mat.innerIndexPtr()[i]; + } + } + + // Convert to C-style Numbering + template + void fortran_to_c_numbering (MatrixType& mat) + { + // Check the Numbering + if ( mat.outerIndexPtr()[0] == 1 ) + { // Convert to C-style numbering + int i; + for(i = 0; i <= mat.rows(); ++i) + --mat.outerIndexPtr()[i]; + for(i = 0; i < mat.nonZeros(); ++i) + --mat.innerIndexPtr()[i]; + } + } +} + +// This is the base class to interface with PaStiX functions. +// Users should not used this class directly. +template +class PastixBase : public SparseSolverBase +{ + protected: + typedef SparseSolverBase Base; + using Base::derived; + using Base::m_isInitialized; + public: + using Base::_solve_impl; + + typedef typename internal::pastix_traits::MatrixType _MatrixType; + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Matrix Vector; + typedef SparseMatrix ColSpMatrix; + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + public: + + PastixBase() : m_initisOk(false), m_analysisIsOk(false), m_factorizationIsOk(false), m_pastixdata(0), m_size(0) + { + init(); + } + + ~PastixBase() + { + clean(); + } + + template + bool _solve_impl(const MatrixBase &b, MatrixBase &x) const; + + /** Returns a reference to the integer vector IPARM of PaStiX parameters + * to modify the default parameters. + * The statistics related to the different phases of factorization and solve are saved here as well + * \sa analyzePattern() factorize() + */ + Array& iparm() + { + return m_iparm; + } + + /** Return a reference to a particular index parameter of the IPARM vector + * \sa iparm() + */ + + int& iparm(int idxparam) + { + return m_iparm(idxparam); + } + + /** Returns a reference to the double vector DPARM of PaStiX parameters + * The statistics related to the different phases of factorization and solve are saved here as well + * \sa analyzePattern() factorize() + */ + Array& dparm() + { + return m_dparm; + } + + + /** Return a reference to a particular index parameter of the DPARM vector + * \sa dparm() + */ + double& dparm(int idxparam) + { + return m_dparm(idxparam); + } + + inline Index cols() const { return m_size; } + inline Index rows() const { return m_size; } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the PaStiX reports a problem + * \c InvalidInput if the input matrix is invalid + * + * \sa iparm() + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + protected: + + // Initialize the Pastix data structure, check the matrix + void init(); + + // Compute the ordering and the symbolic factorization + void analyzePattern(ColSpMatrix& mat); + + // Compute the numerical factorization + void factorize(ColSpMatrix& mat); + + // Free all the data allocated by Pastix + void clean() + { + eigen_assert(m_initisOk && "The Pastix structure should be allocated first"); + m_iparm(IPARM_START_TASK) = API_TASK_CLEAN; + m_iparm(IPARM_END_TASK) = API_TASK_CLEAN; + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0, + m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); + } + + void compute(ColSpMatrix& mat); + + int m_initisOk; + int m_analysisIsOk; + int m_factorizationIsOk; + mutable ComputationInfo m_info; + mutable pastix_data_t *m_pastixdata; // Data structure for pastix + mutable int m_comm; // The MPI communicator identifier + mutable Array m_iparm; // integer vector for the input parameters + mutable Array m_dparm; // Scalar vector for the input parameters + mutable Matrix m_perm; // Permutation vector + mutable Matrix m_invp; // Inverse permutation vector + mutable int m_size; // Size of the matrix +}; + + /** Initialize the PaStiX data structure. + *A first call to this function fills iparm and dparm with the default PaStiX parameters + * \sa iparm() dparm() + */ +template +void PastixBase::init() +{ + m_size = 0; + m_iparm.setZero(IPARM_SIZE); + m_dparm.setZero(DPARM_SIZE); + + m_iparm(IPARM_MODIFY_PARAMETER) = API_NO; + pastix(&m_pastixdata, MPI_COMM_WORLD, + 0, 0, 0, 0, + 0, 0, 0, 1, m_iparm.data(), m_dparm.data()); + + m_iparm[IPARM_MATRIX_VERIFICATION] = API_NO; + m_iparm[IPARM_VERBOSE] = API_VERBOSE_NOT; + m_iparm[IPARM_ORDERING] = API_ORDER_SCOTCH; + m_iparm[IPARM_INCOMPLETE] = API_NO; + m_iparm[IPARM_OOC_LIMIT] = 2000; + m_iparm[IPARM_RHS_MAKING] = API_RHS_B; + m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO; + + m_iparm(IPARM_START_TASK) = API_TASK_INIT; + m_iparm(IPARM_END_TASK) = API_TASK_INIT; + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0, + 0, 0, 0, 0, m_iparm.data(), m_dparm.data()); + + // Check the returned error + if(m_iparm(IPARM_ERROR_NUMBER)) { + m_info = InvalidInput; + m_initisOk = false; + } + else { + m_info = Success; + m_initisOk = true; + } +} + +template +void PastixBase::compute(ColSpMatrix& mat) +{ + eigen_assert(mat.rows() == mat.cols() && "The input matrix should be squared"); + + analyzePattern(mat); + factorize(mat); + + m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO; +} + + +template +void PastixBase::analyzePattern(ColSpMatrix& mat) +{ + eigen_assert(m_initisOk && "The initialization of PaSTiX failed"); + + // clean previous calls + if(m_size>0) + clean(); + + m_size = internal::convert_index(mat.rows()); + m_perm.resize(m_size); + m_invp.resize(m_size); + + m_iparm(IPARM_START_TASK) = API_TASK_ORDERING; + m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE; + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), + mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); + + // Check the returned error + if(m_iparm(IPARM_ERROR_NUMBER)) + { + m_info = NumericalIssue; + m_analysisIsOk = false; + } + else + { + m_info = Success; + m_analysisIsOk = true; + } +} + +template +void PastixBase::factorize(ColSpMatrix& mat) +{ +// if(&m_cpyMat != &mat) m_cpyMat = mat; + eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase"); + m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT; + m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT; + m_size = internal::convert_index(mat.rows()); + + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), + mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); + + // Check the returned error + if(m_iparm(IPARM_ERROR_NUMBER)) + { + m_info = NumericalIssue; + m_factorizationIsOk = false; + m_isInitialized = false; + } + else + { + m_info = Success; + m_factorizationIsOk = true; + m_isInitialized = true; + } +} + +/* Solve the system */ +template +template +bool PastixBase::_solve_impl(const MatrixBase &b, MatrixBase &x) const +{ + eigen_assert(m_isInitialized && "The matrix should be factorized first"); + EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0, + THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + int rhs = 1; + + x = b; /* on return, x is overwritten by the computed solution */ + + for (int i = 0; i < b.cols(); i++){ + m_iparm[IPARM_START_TASK] = API_TASK_SOLVE; + m_iparm[IPARM_END_TASK] = API_TASK_REFINE; + + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, internal::convert_index(x.rows()), 0, 0, 0, + m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data()); + } + + // Check the returned error + m_info = m_iparm(IPARM_ERROR_NUMBER)==0 ? Success : NumericalIssue; + + return m_iparm(IPARM_ERROR_NUMBER)==0; +} + +/** \ingroup PaStiXSupport_Module + * \class PastixLU + * \brief Sparse direct LU solver based on PaStiX library + * + * This class is used to solve the linear systems A.X = B with a supernodal LU + * factorization in the PaStiX library. The matrix A should be squared and nonsingular + * PaStiX requires that the matrix A has a symmetric structural pattern. + * This interface can symmetrize the input matrix otherwise. + * The vectors or matrices X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false + * NOTE : Note that if the analysis and factorization phase are called separately, + * the input matrix will be symmetrized at each call, hence it is advised to + * symmetrize the matrix in a end-user program and set \p IsStrSym to true + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SparseLU + * + */ +template +class PastixLU : public PastixBase< PastixLU<_MatrixType> > +{ + public: + typedef _MatrixType MatrixType; + typedef PastixBase > Base; + typedef typename Base::ColSpMatrix ColSpMatrix; + typedef typename MatrixType::StorageIndex StorageIndex; + + public: + PastixLU() : Base() + { + init(); + } + + explicit PastixLU(const MatrixType& matrix):Base() + { + init(); + compute(matrix); + } + /** Compute the LU supernodal factorization of \p matrix. + * iparm and dparm can be used to tune the PaStiX parameters. + * see the PaStiX user's manual + * \sa analyzePattern() factorize() + */ + void compute (const MatrixType& matrix) + { + m_structureIsUptodate = false; + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::compute(temp); + } + /** Compute the LU symbolic factorization of \p matrix using its sparsity pattern. + * Several ordering methods can be used at this step. See the PaStiX user's manual. + * The result of this operation can be used with successive matrices having the same pattern as \p matrix + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + m_structureIsUptodate = false; + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::analyzePattern(temp); + } + + /** Compute the LU supernodal factorization of \p matrix + * WARNING The matrix \p matrix should have the same structural pattern + * as the same used in the analysis phase. + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::factorize(temp); + } + protected: + + void init() + { + m_structureIsUptodate = false; + m_iparm(IPARM_SYM) = API_SYM_NO; + m_iparm(IPARM_FACTORIZATION) = API_FACT_LU; + } + + void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) + { + if(IsStrSym) + out = matrix; + else + { + if(!m_structureIsUptodate) + { + // update the transposed structure + m_transposedStructure = matrix.transpose(); + + // Set the elements of the matrix to zero + for (Index j=0; j + * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SimplicialLLT + */ +template +class PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> > +{ + public: + typedef _MatrixType MatrixType; + typedef PastixBase > Base; + typedef typename Base::ColSpMatrix ColSpMatrix; + + public: + enum { UpLo = _UpLo }; + PastixLLT() : Base() + { + init(); + } + + explicit PastixLLT(const MatrixType& matrix):Base() + { + init(); + compute(matrix); + } + + /** Compute the L factor of the LL^T supernodal factorization of \p matrix + * \sa analyzePattern() factorize() + */ + void compute (const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::compute(temp); + } + + /** Compute the LL^T symbolic factorization of \p matrix using its sparsity pattern + * The result of this operation can be used with successive matrices having the same pattern as \p matrix + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::analyzePattern(temp); + } + /** Compute the LL^T supernodal numerical factorization of \p matrix + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::factorize(temp); + } + protected: + using Base::m_iparm; + + void init() + { + m_iparm(IPARM_SYM) = API_SYM_YES; + m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT; + } + + void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) + { + out.resize(matrix.rows(), matrix.cols()); + // Pastix supports only lower, column-major matrices + out.template selfadjointView() = matrix.template selfadjointView(); + internal::c_to_fortran_numbering(out); + } +}; + +/** \ingroup PaStiXSupport_Module + * \class PastixLDLT + * \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library + * + * This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization + * available in the PaStiX library. The matrix A should be symmetric and positive definite + * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX + * The vectors or matrices X and B can be either dense or sparse + * + * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SimplicialLDLT + */ +template +class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> > +{ + public: + typedef _MatrixType MatrixType; + typedef PastixBase > Base; + typedef typename Base::ColSpMatrix ColSpMatrix; + + public: + enum { UpLo = _UpLo }; + PastixLDLT():Base() + { + init(); + } + + explicit PastixLDLT(const MatrixType& matrix):Base() + { + init(); + compute(matrix); + } + + /** Compute the L and D factors of the LDL^T factorization of \p matrix + * \sa analyzePattern() factorize() + */ + void compute (const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::compute(temp); + } + + /** Compute the LDL^T symbolic factorization of \p matrix using its sparsity pattern + * The result of this operation can be used with successive matrices having the same pattern as \p matrix + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::analyzePattern(temp); + } + /** Compute the LDL^T supernodal numerical factorization of \p matrix + * + */ + void factorize(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::factorize(temp); + } + + protected: + using Base::m_iparm; + + void init() + { + m_iparm(IPARM_SYM) = API_SYM_YES; + m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT; + } + + void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) + { + // Pastix supports only lower, column-major matrices + out.resize(matrix.rows(), matrix.cols()); + out.template selfadjointView() = matrix.template selfadjointView(); + internal::c_to_fortran_numbering(out); + } +}; + +} // end namespace Eigen + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/PardisoSupport/PardisoSupport.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/PardisoSupport/PardisoSupport.h new file mode 100644 index 000000000..091c3970e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/PardisoSupport/PardisoSupport.h @@ -0,0 +1,543 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL PARDISO + ******************************************************************************** +*/ + +#ifndef EIGEN_PARDISOSUPPORT_H +#define EIGEN_PARDISOSUPPORT_H + +namespace Eigen { + +template class PardisoLU; +template class PardisoLLT; +template class PardisoLDLT; + +namespace internal +{ + template + struct pardiso_run_selector + { + static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a, + IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x) + { + IndexType error = 0; + ::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error); + return error; + } + }; + template<> + struct pardiso_run_selector + { + typedef long long int IndexType; + static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a, + IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x) + { + IndexType error = 0; + ::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error); + return error; + } + }; + + template struct pardiso_traits; + + template + struct pardiso_traits< PardisoLU<_MatrixType> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::StorageIndex StorageIndex; + }; + + template + struct pardiso_traits< PardisoLLT<_MatrixType, Options> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::StorageIndex StorageIndex; + }; + + template + struct pardiso_traits< PardisoLDLT<_MatrixType, Options> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::StorageIndex StorageIndex; + }; + +} // end namespace internal + +template +class PardisoImpl : public SparseSolverBase +{ + protected: + typedef SparseSolverBase Base; + using Base::derived; + using Base::m_isInitialized; + + typedef internal::pardiso_traits Traits; + public: + using Base::_solve_impl; + + typedef typename Traits::MatrixType MatrixType; + typedef typename Traits::Scalar Scalar; + typedef typename Traits::RealScalar RealScalar; + typedef typename Traits::StorageIndex StorageIndex; + typedef SparseMatrix SparseMatrixType; + typedef Matrix VectorType; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + typedef Array ParameterType; + enum { + ScalarIsComplex = NumTraits::IsComplex, + ColsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic + }; + + PardisoImpl() + { + eigen_assert((sizeof(StorageIndex) >= sizeof(_INTEGER_t) && sizeof(StorageIndex) <= 8) && "Non-supported index type"); + m_iparm.setZero(); + m_msglvl = 0; // No output + m_isInitialized = false; + } + + ~PardisoImpl() + { + pardisoRelease(); + } + + inline Index cols() const { return m_size; } + inline Index rows() const { return m_size; } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + /** \warning for advanced usage only. + * \returns a reference to the parameter array controlling PARDISO. + * See the PARDISO manual to know how to use it. */ + ParameterType& pardisoParameterArray() + { + return m_iparm; + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + Derived& analyzePattern(const MatrixType& matrix); + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + Derived& factorize(const MatrixType& matrix); + + Derived& compute(const MatrixType& matrix); + + template + void _solve_impl(const MatrixBase &b, MatrixBase &dest) const; + + protected: + void pardisoRelease() + { + if(m_isInitialized) // Factorization ran at least once + { + internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, -1, internal::convert_index(m_size),0, 0, 0, m_perm.data(), 0, + m_iparm.data(), m_msglvl, NULL, NULL); + m_isInitialized = false; + } + } + + void pardisoInit(int type) + { + m_type = type; + bool symmetric = std::abs(m_type) < 10; + m_iparm[0] = 1; // No solver default + m_iparm[1] = 2; // use Metis for the ordering + m_iparm[2] = 0; // Reserved. Set to zero. (??Numbers of processors, value of OMP_NUM_THREADS??) + m_iparm[3] = 0; // No iterative-direct algorithm + m_iparm[4] = 0; // No user fill-in reducing permutation + m_iparm[5] = 0; // Write solution into x, b is left unchanged + m_iparm[6] = 0; // Not in use + m_iparm[7] = 2; // Max numbers of iterative refinement steps + m_iparm[8] = 0; // Not in use + m_iparm[9] = 13; // Perturb the pivot elements with 1E-13 + m_iparm[10] = symmetric ? 0 : 1; // Use nonsymmetric permutation and scaling MPS + m_iparm[11] = 0; // Not in use + m_iparm[12] = symmetric ? 0 : 1; // Maximum weighted matching algorithm is switched-off (default for symmetric). + // Try m_iparm[12] = 1 in case of inappropriate accuracy + m_iparm[13] = 0; // Output: Number of perturbed pivots + m_iparm[14] = 0; // Not in use + m_iparm[15] = 0; // Not in use + m_iparm[16] = 0; // Not in use + m_iparm[17] = -1; // Output: Number of nonzeros in the factor LU + m_iparm[18] = -1; // Output: Mflops for LU factorization + m_iparm[19] = 0; // Output: Numbers of CG Iterations + + m_iparm[20] = 0; // 1x1 pivoting + m_iparm[26] = 0; // No matrix checker + m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0; + m_iparm[34] = 1; // C indexing + m_iparm[36] = 0; // CSR + m_iparm[59] = 0; // 0 - In-Core ; 1 - Automatic switch between In-Core and Out-of-Core modes ; 2 - Out-of-Core + + memset(m_pt, 0, sizeof(m_pt)); + } + + protected: + // cached data to reduce reallocation, etc. + + void manageErrorCode(Index error) const + { + switch(error) + { + case 0: + m_info = Success; + break; + case -4: + case -7: + m_info = NumericalIssue; + break; + default: + m_info = InvalidInput; + } + } + + mutable SparseMatrixType m_matrix; + mutable ComputationInfo m_info; + bool m_analysisIsOk, m_factorizationIsOk; + StorageIndex m_type, m_msglvl; + mutable void *m_pt[64]; + mutable ParameterType m_iparm; + mutable IntColVectorType m_perm; + Index m_size; + +}; + +template +Derived& PardisoImpl::compute(const MatrixType& a) +{ + m_size = a.rows(); + eigen_assert(a.rows() == a.cols()); + + pardisoRelease(); + m_perm.setZero(m_size); + derived().getMatrix(a); + + Index error; + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 12, internal::convert_index(m_size), + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); + manageErrorCode(error); + m_analysisIsOk = true; + m_factorizationIsOk = true; + m_isInitialized = true; + return derived(); +} + +template +Derived& PardisoImpl::analyzePattern(const MatrixType& a) +{ + m_size = a.rows(); + eigen_assert(m_size == a.cols()); + + pardisoRelease(); + m_perm.setZero(m_size); + derived().getMatrix(a); + + Index error; + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 11, internal::convert_index(m_size), + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); + + manageErrorCode(error); + m_analysisIsOk = true; + m_factorizationIsOk = false; + m_isInitialized = true; + return derived(); +} + +template +Derived& PardisoImpl::factorize(const MatrixType& a) +{ + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + eigen_assert(m_size == a.rows() && m_size == a.cols()); + + derived().getMatrix(a); + + Index error; + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 22, internal::convert_index(m_size), + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); + + manageErrorCode(error); + m_factorizationIsOk = true; + return derived(); +} + +template +template +void PardisoImpl::_solve_impl(const MatrixBase &b, MatrixBase& x) const +{ + if(m_iparm[0] == 0) // Factorization was not computed + { + m_info = InvalidInput; + return; + } + + //Index n = m_matrix.rows(); + Index nrhs = Index(b.cols()); + eigen_assert(m_size==b.rows()); + eigen_assert(((MatrixBase::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major right hand sides are not supported"); + eigen_assert(((MatrixBase::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major matrices of unknowns are not supported"); + eigen_assert(((nrhs == 1) || b.outerStride() == b.rows())); + + +// switch (transposed) { +// case SvNoTrans : m_iparm[11] = 0 ; break; +// case SvTranspose : m_iparm[11] = 2 ; break; +// case SvAdjoint : m_iparm[11] = 1 ; break; +// default: +// //std::cerr << "Eigen: transposition option \"" << transposed << "\" not supported by the PARDISO backend\n"; +// m_iparm[11] = 0; +// } + + Scalar* rhs_ptr = const_cast(b.derived().data()); + Matrix tmp; + + // Pardiso cannot solve in-place + if(rhs_ptr == x.derived().data()) + { + tmp = b; + rhs_ptr = tmp.data(); + } + + Index error; + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 33, internal::convert_index(m_size), + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), internal::convert_index(nrhs), m_iparm.data(), m_msglvl, + rhs_ptr, x.derived().data()); + + manageErrorCode(error); +} + + +/** \ingroup PardisoSupport_Module + * \class PardisoLU + * \brief A sparse direct LU factorization and solver based on the PARDISO library + * + * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization + * using the Intel MKL PARDISO library. The sparse matrix A must be squared and invertible. + * The vectors or matrices X and B can be either dense or sparse. + * + * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set: + * \code solver.pardisoParameterArray()[59] = 1; \endcode + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SparseLU + */ +template +class PardisoLU : public PardisoImpl< PardisoLU > +{ + protected: + typedef PardisoImpl Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + using Base::pardisoInit; + using Base::m_matrix; + friend class PardisoImpl< PardisoLU >; + + public: + + using Base::compute; + using Base::solve; + + PardisoLU() + : Base() + { + pardisoInit(Base::ScalarIsComplex ? 13 : 11); + } + + explicit PardisoLU(const MatrixType& matrix) + : Base() + { + pardisoInit(Base::ScalarIsComplex ? 13 : 11); + compute(matrix); + } + protected: + void getMatrix(const MatrixType& matrix) + { + m_matrix = matrix; + m_matrix.makeCompressed(); + } +}; + +/** \ingroup PardisoSupport_Module + * \class PardisoLLT + * \brief A sparse direct Cholesky (LLT) factorization and solver based on the PARDISO library + * + * This class allows to solve for A.X = B sparse linear problems via a LL^T Cholesky factorization + * using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite. + * The vectors or matrices X and B can be either dense or sparse. + * + * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set: + * \code solver.pardisoParameterArray()[59] = 1; \endcode + * + * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used. + * Upper|Lower can be used to tell both triangular parts can be used as input. + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SimplicialLLT + */ +template +class PardisoLLT : public PardisoImpl< PardisoLLT > +{ + protected: + typedef PardisoImpl< PardisoLLT > Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + using Base::pardisoInit; + using Base::m_matrix; + friend class PardisoImpl< PardisoLLT >; + + public: + + typedef typename Base::StorageIndex StorageIndex; + enum { UpLo = _UpLo }; + using Base::compute; + + PardisoLLT() + : Base() + { + pardisoInit(Base::ScalarIsComplex ? 4 : 2); + } + + explicit PardisoLLT(const MatrixType& matrix) + : Base() + { + pardisoInit(Base::ScalarIsComplex ? 4 : 2); + compute(matrix); + } + + protected: + + void getMatrix(const MatrixType& matrix) + { + // PARDISO supports only upper, row-major matrices + PermutationMatrix p_null; + m_matrix.resize(matrix.rows(), matrix.cols()); + m_matrix.template selfadjointView() = matrix.template selfadjointView().twistedBy(p_null); + m_matrix.makeCompressed(); + } +}; + +/** \ingroup PardisoSupport_Module + * \class PardisoLDLT + * \brief A sparse direct Cholesky (LDLT) factorization and solver based on the PARDISO library + * + * This class allows to solve for A.X = B sparse linear problems via a LDL^T Cholesky factorization + * using the Intel MKL PARDISO library. The sparse matrix A is assumed to be selfajoint and positive definite. + * For complex matrices, A can also be symmetric only, see the \a Options template parameter. + * The vectors or matrices X and B can be either dense or sparse. + * + * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set: + * \code solver.pardisoParameterArray()[59] = 1; \endcode + * + * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used. + * Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix. + * Upper|Lower can be used to tell both triangular parts can be used as input. + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SimplicialLDLT + */ +template +class PardisoLDLT : public PardisoImpl< PardisoLDLT > +{ + protected: + typedef PardisoImpl< PardisoLDLT > Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + using Base::pardisoInit; + using Base::m_matrix; + friend class PardisoImpl< PardisoLDLT >; + + public: + + typedef typename Base::StorageIndex StorageIndex; + using Base::compute; + enum { UpLo = Options&(Upper|Lower) }; + + PardisoLDLT() + : Base() + { + pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2); + } + + explicit PardisoLDLT(const MatrixType& matrix) + : Base() + { + pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2); + compute(matrix); + } + + void getMatrix(const MatrixType& matrix) + { + // PARDISO supports only upper, row-major matrices + PermutationMatrix p_null; + m_matrix.resize(matrix.rows(), matrix.cols()); + m_matrix.template selfadjointView() = matrix.template selfadjointView().twistedBy(p_null); + m_matrix.makeCompressed(); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_PARDISOSUPPORT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/ColPivHouseholderQR.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/ColPivHouseholderQR.h new file mode 100644 index 000000000..a7b47d55d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/ColPivHouseholderQR.h @@ -0,0 +1,653 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_H +#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_H + +namespace Eigen { + +namespace internal { +template struct traits > + : traits<_MatrixType> +{ + enum { Flags = 0 }; +}; + +} // end namespace internal + +/** \ingroup QR_Module + * + * \class ColPivHouseholderQR + * + * \brief Householder rank-revealing QR decomposition of a matrix with column-pivoting + * + * \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition + * + * This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b Q and \b R + * such that + * \f[ + * \mathbf{A} \, \mathbf{P} = \mathbf{Q} \, \mathbf{R} + * \f] + * by using Householder transformations. Here, \b P is a permutation matrix, \b Q a unitary matrix and \b R an + * upper triangular matrix. + * + * This decomposition performs column pivoting in order to be rank-revealing and improve + * numerical stability. It is slower than HouseholderQR, and faster than FullPivHouseholderQR. + * + * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. + * + * \sa MatrixBase::colPivHouseholderQr() + */ +template class ColPivHouseholderQR +{ + public: + + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + // FIXME should be int + typedef typename MatrixType::StorageIndex StorageIndex; + typedef typename internal::plain_diag_type::type HCoeffsType; + typedef PermutationMatrix PermutationType; + typedef typename internal::plain_row_type::type IntRowVectorType; + typedef typename internal::plain_row_type::type RowVectorType; + typedef typename internal::plain_row_type::type RealRowVectorType; + typedef HouseholderSequence::type> HouseholderSequenceType; + typedef typename MatrixType::PlainObject PlainObject; + + private: + + typedef typename PermutationType::StorageIndex PermIndexType; + + public: + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via ColPivHouseholderQR::compute(const MatrixType&). + */ + ColPivHouseholderQR() + : m_qr(), + m_hCoeffs(), + m_colsPermutation(), + m_colsTranspositions(), + m_temp(), + m_colNormsUpdated(), + m_colNormsDirect(), + m_isInitialized(false), + m_usePrescribedThreshold(false) {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa ColPivHouseholderQR() + */ + ColPivHouseholderQR(Index rows, Index cols) + : m_qr(rows, cols), + m_hCoeffs((std::min)(rows,cols)), + m_colsPermutation(PermIndexType(cols)), + m_colsTranspositions(cols), + m_temp(cols), + m_colNormsUpdated(cols), + m_colNormsDirect(cols), + m_isInitialized(false), + m_usePrescribedThreshold(false) {} + + /** \brief Constructs a QR factorization from a given matrix + * + * This constructor computes the QR factorization of the matrix \a matrix by calling + * the method compute(). It is a short cut for: + * + * \code + * ColPivHouseholderQR qr(matrix.rows(), matrix.cols()); + * qr.compute(matrix); + * \endcode + * + * \sa compute() + */ + template + explicit ColPivHouseholderQR(const EigenBase& matrix) + : m_qr(matrix.rows(), matrix.cols()), + m_hCoeffs((std::min)(matrix.rows(),matrix.cols())), + m_colsPermutation(PermIndexType(matrix.cols())), + m_colsTranspositions(matrix.cols()), + m_temp(matrix.cols()), + m_colNormsUpdated(matrix.cols()), + m_colNormsDirect(matrix.cols()), + m_isInitialized(false), + m_usePrescribedThreshold(false) + { + compute(matrix.derived()); + } + + /** \brief Constructs a QR factorization from a given matrix + * + * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. + * + * \sa ColPivHouseholderQR(const EigenBase&) + */ + template + explicit ColPivHouseholderQR(EigenBase& matrix) + : m_qr(matrix.derived()), + m_hCoeffs((std::min)(matrix.rows(),matrix.cols())), + m_colsPermutation(PermIndexType(matrix.cols())), + m_colsTranspositions(matrix.cols()), + m_temp(matrix.cols()), + m_colNormsUpdated(matrix.cols()), + m_colNormsDirect(matrix.cols()), + m_isInitialized(false), + m_usePrescribedThreshold(false) + { + computeInPlace(); + } + + /** This method finds a solution x to the equation Ax=b, where A is the matrix of which + * *this is the QR decomposition, if any exists. + * + * \param b the right-hand-side of the equation to solve. + * + * \returns a solution. + * + * \note_about_checking_solutions + * + * \note_about_arbitrary_choice_of_solution + * + * Example: \include ColPivHouseholderQR_solve.cpp + * Output: \verbinclude ColPivHouseholderQR_solve.out + */ + template + inline const Solve + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return Solve(*this, b.derived()); + } + + HouseholderSequenceType householderQ() const; + HouseholderSequenceType matrixQ() const + { + return householderQ(); + } + + /** \returns a reference to the matrix where the Householder QR decomposition is stored + */ + const MatrixType& matrixQR() const + { + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return m_qr; + } + + /** \returns a reference to the matrix where the result Householder QR is stored + * \warning The strict lower part of this matrix contains internal values. + * Only the upper triangular part should be referenced. To get it, use + * \code matrixR().template triangularView() \endcode + * For rank-deficient matrices, use + * \code + * matrixR().topLeftCorner(rank(), rank()).template triangularView() + * \endcode + */ + const MatrixType& matrixR() const + { + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return m_qr; + } + + template + ColPivHouseholderQR& compute(const EigenBase& matrix); + + /** \returns a const reference to the column permutation matrix */ + const PermutationType& colsPermutation() const + { + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return m_colsPermutation; + } + + /** \returns the absolute value of the determinant of the matrix of which + * *this is the QR decomposition. It has only linear complexity + * (that is, O(n) where n is the dimension of the square matrix) + * as the QR decomposition has already been computed. + * + * \note This is only for square matrices. + * + * \warning a determinant can be very big or small, so for matrices + * of large enough dimension, there is a risk of overflow/underflow. + * One way to work around that is to use logAbsDeterminant() instead. + * + * \sa logAbsDeterminant(), MatrixBase::determinant() + */ + typename MatrixType::RealScalar absDeterminant() const; + + /** \returns the natural log of the absolute value of the determinant of the matrix of which + * *this is the QR decomposition. It has only linear complexity + * (that is, O(n) where n is the dimension of the square matrix) + * as the QR decomposition has already been computed. + * + * \note This is only for square matrices. + * + * \note This method is useful to work around the risk of overflow/underflow that's inherent + * to determinant computation. + * + * \sa absDeterminant(), MatrixBase::determinant() + */ + typename MatrixType::RealScalar logAbsDeterminant() const; + + /** \returns the rank of the matrix of which *this is the QR decomposition. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline Index rank() const + { + using std::abs; + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); + Index result = 0; + for(Index i = 0; i < m_nonzero_pivots; ++i) + result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold); + return result; + } + + /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline Index dimensionOfKernel() const + { + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return cols() - rank(); + } + + /** \returns true if the matrix of which *this is the QR decomposition represents an injective + * linear map, i.e. has trivial kernel; false otherwise. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline bool isInjective() const + { + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return rank() == cols(); + } + + /** \returns true if the matrix of which *this is the QR decomposition represents a surjective + * linear map; false otherwise. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline bool isSurjective() const + { + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return rank() == rows(); + } + + /** \returns true if the matrix of which *this is the QR decomposition is invertible. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline bool isInvertible() const + { + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return isInjective() && isSurjective(); + } + + /** \returns the inverse of the matrix of which *this is the QR decomposition. + * + * \note If this matrix is not invertible, the returned matrix has undefined coefficients. + * Use isInvertible() to first determine whether this matrix is invertible. + */ + inline const Inverse inverse() const + { + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return Inverse(*this); + } + + inline Index rows() const { return m_qr.rows(); } + inline Index cols() const { return m_qr.cols(); } + + /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q. + * + * For advanced uses only. + */ + const HCoeffsType& hCoeffs() const { return m_hCoeffs; } + + /** Allows to prescribe a threshold to be used by certain methods, such as rank(), + * who need to determine when pivots are to be considered nonzero. This is not used for the + * QR decomposition itself. + * + * When it needs to get the threshold value, Eigen calls threshold(). By default, this + * uses a formula to automatically determine a reasonable threshold. + * Once you have called the present method setThreshold(const RealScalar&), + * your value is used instead. + * + * \param threshold The new value to use as the threshold. + * + * A pivot will be considered nonzero if its absolute value is strictly greater than + * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$ + * where maxpivot is the biggest pivot. + * + * If you want to come back to the default behavior, call setThreshold(Default_t) + */ + ColPivHouseholderQR& setThreshold(const RealScalar& threshold) + { + m_usePrescribedThreshold = true; + m_prescribedThreshold = threshold; + return *this; + } + + /** Allows to come back to the default behavior, letting Eigen use its default formula for + * determining the threshold. + * + * You should pass the special object Eigen::Default as parameter here. + * \code qr.setThreshold(Eigen::Default); \endcode + * + * See the documentation of setThreshold(const RealScalar&). + */ + ColPivHouseholderQR& setThreshold(Default_t) + { + m_usePrescribedThreshold = false; + return *this; + } + + /** Returns the threshold that will be used by certain methods such as rank(). + * + * See the documentation of setThreshold(const RealScalar&). + */ + RealScalar threshold() const + { + eigen_assert(m_isInitialized || m_usePrescribedThreshold); + return m_usePrescribedThreshold ? m_prescribedThreshold + // this formula comes from experimenting (see "LU precision tuning" thread on the list) + // and turns out to be identical to Higham's formula used already in LDLt. + : NumTraits::epsilon() * RealScalar(m_qr.diagonalSize()); + } + + /** \returns the number of nonzero pivots in the QR decomposition. + * Here nonzero is meant in the exact sense, not in a fuzzy sense. + * So that notion isn't really intrinsically interesting, but it is + * still useful when implementing algorithms. + * + * \sa rank() + */ + inline Index nonzeroPivots() const + { + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return m_nonzero_pivots; + } + + /** \returns the absolute value of the biggest pivot, i.e. the biggest + * diagonal coefficient of R. + */ + RealScalar maxPivot() const { return m_maxpivot; } + + /** \brief Reports whether the QR factorization was succesful. + * + * \note This function always returns \c Success. It is provided for compatibility + * with other factorization routines. + * \returns \c Success + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return Success; + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC + void _solve_impl(const RhsType &rhs, DstType &dst) const; + #endif + + protected: + + friend class CompleteOrthogonalDecomposition; + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + void computeInPlace(); + + MatrixType m_qr; + HCoeffsType m_hCoeffs; + PermutationType m_colsPermutation; + IntRowVectorType m_colsTranspositions; + RowVectorType m_temp; + RealRowVectorType m_colNormsUpdated; + RealRowVectorType m_colNormsDirect; + bool m_isInitialized, m_usePrescribedThreshold; + RealScalar m_prescribedThreshold, m_maxpivot; + Index m_nonzero_pivots; + Index m_det_pq; +}; + +template +typename MatrixType::RealScalar ColPivHouseholderQR::absDeterminant() const +{ + using std::abs; + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + return abs(m_qr.diagonal().prod()); +} + +template +typename MatrixType::RealScalar ColPivHouseholderQR::logAbsDeterminant() const +{ + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + return m_qr.diagonal().cwiseAbs().array().log().sum(); +} + +/** Performs the QR factorization of the given matrix \a matrix. The result of + * the factorization is stored into \c *this, and a reference to \c *this + * is returned. + * + * \sa class ColPivHouseholderQR, ColPivHouseholderQR(const MatrixType&) + */ +template +template +ColPivHouseholderQR& ColPivHouseholderQR::compute(const EigenBase& matrix) +{ + m_qr = matrix.derived(); + computeInPlace(); + return *this; +} + +template +void ColPivHouseholderQR::computeInPlace() +{ + check_template_parameters(); + + // the column permutation is stored as int indices, so just to be sure: + eigen_assert(m_qr.cols()<=NumTraits::highest()); + + using std::abs; + + Index rows = m_qr.rows(); + Index cols = m_qr.cols(); + Index size = m_qr.diagonalSize(); + + m_hCoeffs.resize(size); + + m_temp.resize(cols); + + m_colsTranspositions.resize(m_qr.cols()); + Index number_of_transpositions = 0; + + m_colNormsUpdated.resize(cols); + m_colNormsDirect.resize(cols); + for (Index k = 0; k < cols; ++k) { + // colNormsDirect(k) caches the most recent directly computed norm of + // column k. + m_colNormsDirect.coeffRef(k) = m_qr.col(k).norm(); + m_colNormsUpdated.coeffRef(k) = m_colNormsDirect.coeffRef(k); + } + + RealScalar threshold_helper = numext::abs2(m_colNormsUpdated.maxCoeff() * NumTraits::epsilon()) / RealScalar(rows); + RealScalar norm_downdate_threshold = numext::sqrt(NumTraits::epsilon()); + + m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) + m_maxpivot = RealScalar(0); + + for(Index k = 0; k < size; ++k) + { + // first, we look up in our table m_colNormsUpdated which column has the biggest norm + Index biggest_col_index; + RealScalar biggest_col_sq_norm = numext::abs2(m_colNormsUpdated.tail(cols-k).maxCoeff(&biggest_col_index)); + biggest_col_index += k; + + // Track the number of meaningful pivots but do not stop the decomposition to make + // sure that the initial matrix is properly reproduced. See bug 941. + if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k)) + m_nonzero_pivots = k; + + // apply the transposition to the columns + m_colsTranspositions.coeffRef(k) = biggest_col_index; + if(k != biggest_col_index) { + m_qr.col(k).swap(m_qr.col(biggest_col_index)); + std::swap(m_colNormsUpdated.coeffRef(k), m_colNormsUpdated.coeffRef(biggest_col_index)); + std::swap(m_colNormsDirect.coeffRef(k), m_colNormsDirect.coeffRef(biggest_col_index)); + ++number_of_transpositions; + } + + // generate the householder vector, store it below the diagonal + RealScalar beta; + m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta); + + // apply the householder transformation to the diagonal coefficient + m_qr.coeffRef(k,k) = beta; + + // remember the maximum absolute value of diagonal coefficients + if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta); + + // apply the householder transformation + m_qr.bottomRightCorner(rows-k, cols-k-1) + .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1)); + + // update our table of norms of the columns + for (Index j = k + 1; j < cols; ++j) { + // The following implements the stable norm downgrade step discussed in + // http://www.netlib.org/lapack/lawnspdf/lawn176.pdf + // and used in LAPACK routines xGEQPF and xGEQP3. + // See lines 278-297 in http://www.netlib.org/lapack/explore-html/dc/df4/sgeqpf_8f_source.html + if (m_colNormsUpdated.coeffRef(j) != RealScalar(0)) { + RealScalar temp = abs(m_qr.coeffRef(k, j)) / m_colNormsUpdated.coeffRef(j); + temp = (RealScalar(1) + temp) * (RealScalar(1) - temp); + temp = temp < RealScalar(0) ? RealScalar(0) : temp; + RealScalar temp2 = temp * numext::abs2(m_colNormsUpdated.coeffRef(j) / + m_colNormsDirect.coeffRef(j)); + if (temp2 <= norm_downdate_threshold) { + // The updated norm has become too inaccurate so re-compute the column + // norm directly. + m_colNormsDirect.coeffRef(j) = m_qr.col(j).tail(rows - k - 1).norm(); + m_colNormsUpdated.coeffRef(j) = m_colNormsDirect.coeffRef(j); + } else { + m_colNormsUpdated.coeffRef(j) *= numext::sqrt(temp); + } + } + } + } + + m_colsPermutation.setIdentity(PermIndexType(cols)); + for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k) + m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k))); + + m_det_pq = (number_of_transpositions%2) ? -1 : 1; + m_isInitialized = true; +} + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const +{ + eigen_assert(rhs.rows() == rows()); + + const Index nonzero_pivots = nonzeroPivots(); + + if(nonzero_pivots == 0) + { + dst.setZero(); + return; + } + + typename RhsType::PlainObject c(rhs); + + // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T + c.applyOnTheLeft(householderSequence(m_qr, m_hCoeffs) + .setLength(nonzero_pivots) + .transpose() + ); + + m_qr.topLeftCorner(nonzero_pivots, nonzero_pivots) + .template triangularView() + .solveInPlace(c.topRows(nonzero_pivots)); + + for(Index i = 0; i < nonzero_pivots; ++i) dst.row(m_colsPermutation.indices().coeff(i)) = c.row(i); + for(Index i = nonzero_pivots; i < cols(); ++i) dst.row(m_colsPermutation.indices().coeff(i)).setZero(); +} +#endif + +namespace internal { + +template +struct Assignment >, internal::assign_op::Scalar>, Dense2Dense> +{ + typedef ColPivHouseholderQR QrType; + typedef Inverse SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); + } +}; + +} // end namespace internal + +/** \returns the matrix Q as a sequence of householder transformations. + * You can extract the meaningful part only by using: + * \code qr.householderQ().setLength(qr.nonzeroPivots()) \endcode*/ +template +typename ColPivHouseholderQR::HouseholderSequenceType ColPivHouseholderQR + ::householderQ() const +{ + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()); +} + +/** \return the column-pivoting Householder QR decomposition of \c *this. + * + * \sa class ColPivHouseholderQR + */ +template +const ColPivHouseholderQR::PlainObject> +MatrixBase::colPivHouseholderQr() const +{ + return ColPivHouseholderQR(eval()); +} + +} // end namespace Eigen + +#endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h new file mode 100644 index 000000000..4e9651f83 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h @@ -0,0 +1,97 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to LAPACKe + * Householder QR decomposition of a matrix with column pivoting based on + * LAPACKE_?geqp3 function. + ******************************************************************************** +*/ + +#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H +#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H + +namespace Eigen { + +/** \internal Specialization for the data types supported by LAPACKe */ + +#define EIGEN_LAPACKE_QR_COLPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \ +template<> template inline \ +ColPivHouseholderQR >& \ +ColPivHouseholderQR >::compute( \ + const EigenBase& matrix) \ +\ +{ \ + using std::abs; \ + typedef Matrix MatrixType; \ + typedef MatrixType::RealScalar RealScalar; \ + Index rows = matrix.rows();\ + Index cols = matrix.cols();\ +\ + m_qr = matrix;\ + Index size = m_qr.diagonalSize();\ + m_hCoeffs.resize(size);\ +\ + m_colsTranspositions.resize(cols);\ + /*Index number_of_transpositions = 0;*/ \ +\ + m_nonzero_pivots = 0; \ + m_maxpivot = RealScalar(0);\ + m_colsPermutation.resize(cols); \ + m_colsPermutation.indices().setZero(); \ +\ + lapack_int lda = internal::convert_index(m_qr.outerStride()); \ + lapack_int matrix_order = LAPACKE_COLROW; \ + LAPACKE_##LAPACKE_PREFIX##geqp3( matrix_order, internal::convert_index(rows), internal::convert_index(cols), \ + (LAPACKE_TYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (LAPACKE_TYPE*)m_hCoeffs.data()); \ + m_isInitialized = true; \ + m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \ + m_hCoeffs.adjointInPlace(); \ + RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); \ + lapack_int *perm = m_colsPermutation.indices().data(); \ + for(Index i=0;i premultiplied_threshold);\ + } \ + for(Index i=0;i +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H +#define EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H + +namespace Eigen { + +namespace internal { +template +struct traits > + : traits<_MatrixType> { + enum { Flags = 0 }; +}; + +} // end namespace internal + +/** \ingroup QR_Module + * + * \class CompleteOrthogonalDecomposition + * + * \brief Complete orthogonal decomposition (COD) of a matrix. + * + * \param MatrixType the type of the matrix of which we are computing the COD. + * + * This class performs a rank-revealing complete orthogonal decomposition of a + * matrix \b A into matrices \b P, \b Q, \b T, and \b Z such that + * \f[ + * \mathbf{A} \, \mathbf{P} = \mathbf{Q} \, + * \begin{bmatrix} \mathbf{T} & \mathbf{0} \\ + * \mathbf{0} & \mathbf{0} \end{bmatrix} \, \mathbf{Z} + * \f] + * by using Householder transformations. Here, \b P is a permutation matrix, + * \b Q and \b Z are unitary matrices and \b T an upper triangular matrix of + * size rank-by-rank. \b A may be rank deficient. + * + * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. + * + * \sa MatrixBase::completeOrthogonalDecomposition() + */ +template +class CompleteOrthogonalDecomposition { + public: + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef typename internal::plain_diag_type::type HCoeffsType; + typedef PermutationMatrix + PermutationType; + typedef typename internal::plain_row_type::type + IntRowVectorType; + typedef typename internal::plain_row_type::type RowVectorType; + typedef typename internal::plain_row_type::type + RealRowVectorType; + typedef HouseholderSequence< + MatrixType, typename internal::remove_all< + typename HCoeffsType::ConjugateReturnType>::type> + HouseholderSequenceType; + typedef typename MatrixType::PlainObject PlainObject; + + private: + typedef typename PermutationType::Index PermIndexType; + + public: + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via + * \c CompleteOrthogonalDecomposition::compute(const* MatrixType&). + */ + CompleteOrthogonalDecomposition() : m_cpqr(), m_zCoeffs(), m_temp() {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa CompleteOrthogonalDecomposition() + */ + CompleteOrthogonalDecomposition(Index rows, Index cols) + : m_cpqr(rows, cols), m_zCoeffs((std::min)(rows, cols)), m_temp(cols) {} + + /** \brief Constructs a complete orthogonal decomposition from a given + * matrix. + * + * This constructor computes the complete orthogonal decomposition of the + * matrix \a matrix by calling the method compute(). The default + * threshold for rank determination will be used. It is a short cut for: + * + * \code + * CompleteOrthogonalDecomposition cod(matrix.rows(), + * matrix.cols()); + * cod.setThreshold(Default); + * cod.compute(matrix); + * \endcode + * + * \sa compute() + */ + template + explicit CompleteOrthogonalDecomposition(const EigenBase& matrix) + : m_cpqr(matrix.rows(), matrix.cols()), + m_zCoeffs((std::min)(matrix.rows(), matrix.cols())), + m_temp(matrix.cols()) + { + compute(matrix.derived()); + } + + /** \brief Constructs a complete orthogonal decomposition from a given matrix + * + * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. + * + * \sa CompleteOrthogonalDecomposition(const EigenBase&) + */ + template + explicit CompleteOrthogonalDecomposition(EigenBase& matrix) + : m_cpqr(matrix.derived()), + m_zCoeffs((std::min)(matrix.rows(), matrix.cols())), + m_temp(matrix.cols()) + { + computeInPlace(); + } + + + /** This method computes the minimum-norm solution X to a least squares + * problem \f[\mathrm{minimize} \|A X - B\|, \f] where \b A is the matrix of + * which \c *this is the complete orthogonal decomposition. + * + * \param b the right-hand sides of the problem to solve. + * + * \returns a solution. + * + */ + template + inline const Solve solve( + const MatrixBase& b) const { + eigen_assert(m_cpqr.m_isInitialized && + "CompleteOrthogonalDecomposition is not initialized."); + return Solve(*this, b.derived()); + } + + HouseholderSequenceType householderQ(void) const; + HouseholderSequenceType matrixQ(void) const { return m_cpqr.householderQ(); } + + /** \returns the matrix \b Z. + */ + MatrixType matrixZ() const { + MatrixType Z = MatrixType::Identity(m_cpqr.cols(), m_cpqr.cols()); + applyZAdjointOnTheLeftInPlace(Z); + return Z.adjoint(); + } + + /** \returns a reference to the matrix where the complete orthogonal + * decomposition is stored + */ + const MatrixType& matrixQTZ() const { return m_cpqr.matrixQR(); } + + /** \returns a reference to the matrix where the complete orthogonal + * decomposition is stored. + * \warning The strict lower part and \code cols() - rank() \endcode right + * columns of this matrix contains internal values. + * Only the upper triangular part should be referenced. To get it, use + * \code matrixT().template triangularView() \endcode + * For rank-deficient matrices, use + * \code + * matrixR().topLeftCorner(rank(), rank()).template triangularView() + * \endcode + */ + const MatrixType& matrixT() const { return m_cpqr.matrixQR(); } + + template + CompleteOrthogonalDecomposition& compute(const EigenBase& matrix) { + // Compute the column pivoted QR factorization A P = Q R. + m_cpqr.compute(matrix); + computeInPlace(); + return *this; + } + + /** \returns a const reference to the column permutation matrix */ + const PermutationType& colsPermutation() const { + return m_cpqr.colsPermutation(); + } + + /** \returns the absolute value of the determinant of the matrix of which + * *this is the complete orthogonal decomposition. It has only linear + * complexity (that is, O(n) where n is the dimension of the square matrix) + * as the complete orthogonal decomposition has already been computed. + * + * \note This is only for square matrices. + * + * \warning a determinant can be very big or small, so for matrices + * of large enough dimension, there is a risk of overflow/underflow. + * One way to work around that is to use logAbsDeterminant() instead. + * + * \sa logAbsDeterminant(), MatrixBase::determinant() + */ + typename MatrixType::RealScalar absDeterminant() const; + + /** \returns the natural log of the absolute value of the determinant of the + * matrix of which *this is the complete orthogonal decomposition. It has + * only linear complexity (that is, O(n) where n is the dimension of the + * square matrix) as the complete orthogonal decomposition has already been + * computed. + * + * \note This is only for square matrices. + * + * \note This method is useful to work around the risk of overflow/underflow + * that's inherent to determinant computation. + * + * \sa absDeterminant(), MatrixBase::determinant() + */ + typename MatrixType::RealScalar logAbsDeterminant() const; + + /** \returns the rank of the matrix of which *this is the complete orthogonal + * decomposition. + * + * \note This method has to determine which pivots should be considered + * nonzero. For that, it uses the threshold value that you can control by + * calling setThreshold(const RealScalar&). + */ + inline Index rank() const { return m_cpqr.rank(); } + + /** \returns the dimension of the kernel of the matrix of which *this is the + * complete orthogonal decomposition. + * + * \note This method has to determine which pivots should be considered + * nonzero. For that, it uses the threshold value that you can control by + * calling setThreshold(const RealScalar&). + */ + inline Index dimensionOfKernel() const { return m_cpqr.dimensionOfKernel(); } + + /** \returns true if the matrix of which *this is the decomposition represents + * an injective linear map, i.e. has trivial kernel; false otherwise. + * + * \note This method has to determine which pivots should be considered + * nonzero. For that, it uses the threshold value that you can control by + * calling setThreshold(const RealScalar&). + */ + inline bool isInjective() const { return m_cpqr.isInjective(); } + + /** \returns true if the matrix of which *this is the decomposition represents + * a surjective linear map; false otherwise. + * + * \note This method has to determine which pivots should be considered + * nonzero. For that, it uses the threshold value that you can control by + * calling setThreshold(const RealScalar&). + */ + inline bool isSurjective() const { return m_cpqr.isSurjective(); } + + /** \returns true if the matrix of which *this is the complete orthogonal + * decomposition is invertible. + * + * \note This method has to determine which pivots should be considered + * nonzero. For that, it uses the threshold value that you can control by + * calling setThreshold(const RealScalar&). + */ + inline bool isInvertible() const { return m_cpqr.isInvertible(); } + + /** \returns the pseudo-inverse of the matrix of which *this is the complete + * orthogonal decomposition. + * \warning: Do not compute \c this->pseudoInverse()*rhs to solve a linear systems. + * It is more efficient and numerically stable to call \c this->solve(rhs). + */ + inline const Inverse pseudoInverse() const + { + return Inverse(*this); + } + + inline Index rows() const { return m_cpqr.rows(); } + inline Index cols() const { return m_cpqr.cols(); } + + /** \returns a const reference to the vector of Householder coefficients used + * to represent the factor \c Q. + * + * For advanced uses only. + */ + inline const HCoeffsType& hCoeffs() const { return m_cpqr.hCoeffs(); } + + /** \returns a const reference to the vector of Householder coefficients + * used to represent the factor \c Z. + * + * For advanced uses only. + */ + const HCoeffsType& zCoeffs() const { return m_zCoeffs; } + + /** Allows to prescribe a threshold to be used by certain methods, such as + * rank(), who need to determine when pivots are to be considered nonzero. + * Most be called before calling compute(). + * + * When it needs to get the threshold value, Eigen calls threshold(). By + * default, this uses a formula to automatically determine a reasonable + * threshold. Once you have called the present method + * setThreshold(const RealScalar&), your value is used instead. + * + * \param threshold The new value to use as the threshold. + * + * A pivot will be considered nonzero if its absolute value is strictly + * greater than + * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$ + * where maxpivot is the biggest pivot. + * + * If you want to come back to the default behavior, call + * setThreshold(Default_t) + */ + CompleteOrthogonalDecomposition& setThreshold(const RealScalar& threshold) { + m_cpqr.setThreshold(threshold); + return *this; + } + + /** Allows to come back to the default behavior, letting Eigen use its default + * formula for determining the threshold. + * + * You should pass the special object Eigen::Default as parameter here. + * \code qr.setThreshold(Eigen::Default); \endcode + * + * See the documentation of setThreshold(const RealScalar&). + */ + CompleteOrthogonalDecomposition& setThreshold(Default_t) { + m_cpqr.setThreshold(Default); + return *this; + } + + /** Returns the threshold that will be used by certain methods such as rank(). + * + * See the documentation of setThreshold(const RealScalar&). + */ + RealScalar threshold() const { return m_cpqr.threshold(); } + + /** \returns the number of nonzero pivots in the complete orthogonal + * decomposition. Here nonzero is meant in the exact sense, not in a + * fuzzy sense. So that notion isn't really intrinsically interesting, + * but it is still useful when implementing algorithms. + * + * \sa rank() + */ + inline Index nonzeroPivots() const { return m_cpqr.nonzeroPivots(); } + + /** \returns the absolute value of the biggest pivot, i.e. the biggest + * diagonal coefficient of R. + */ + inline RealScalar maxPivot() const { return m_cpqr.maxPivot(); } + + /** \brief Reports whether the complete orthogonal decomposition was + * succesful. + * + * \note This function always returns \c Success. It is provided for + * compatibility + * with other factorization routines. + * \returns \c Success + */ + ComputationInfo info() const { + eigen_assert(m_cpqr.m_isInitialized && "Decomposition is not initialized."); + return Success; + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC void _solve_impl(const RhsType& rhs, DstType& dst) const; +#endif + + protected: + static void check_template_parameters() { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + void computeInPlace(); + + /** Overwrites \b rhs with \f$ \mathbf{Z}^* * \mathbf{rhs} \f$. + */ + template + void applyZAdjointOnTheLeftInPlace(Rhs& rhs) const; + + ColPivHouseholderQR m_cpqr; + HCoeffsType m_zCoeffs; + RowVectorType m_temp; +}; + +template +typename MatrixType::RealScalar +CompleteOrthogonalDecomposition::absDeterminant() const { + return m_cpqr.absDeterminant(); +} + +template +typename MatrixType::RealScalar +CompleteOrthogonalDecomposition::logAbsDeterminant() const { + return m_cpqr.logAbsDeterminant(); +} + +/** Performs the complete orthogonal decomposition of the given matrix \a + * matrix. The result of the factorization is stored into \c *this, and a + * reference to \c *this is returned. + * + * \sa class CompleteOrthogonalDecomposition, + * CompleteOrthogonalDecomposition(const MatrixType&) + */ +template +void CompleteOrthogonalDecomposition::computeInPlace() +{ + check_template_parameters(); + + // the column permutation is stored as int indices, so just to be sure: + eigen_assert(m_cpqr.cols() <= NumTraits::highest()); + + const Index rank = m_cpqr.rank(); + const Index cols = m_cpqr.cols(); + const Index rows = m_cpqr.rows(); + m_zCoeffs.resize((std::min)(rows, cols)); + m_temp.resize(cols); + + if (rank < cols) { + // We have reduced the (permuted) matrix to the form + // [R11 R12] + // [ 0 R22] + // where R11 is r-by-r (r = rank) upper triangular, R12 is + // r-by-(n-r), and R22 is empty or the norm of R22 is negligible. + // We now compute the complete orthogonal decomposition by applying + // Householder transformations from the right to the upper trapezoidal + // matrix X = [R11 R12] to zero out R12 and obtain the factorization + // [R11 R12] = [T11 0] * Z, where T11 is r-by-r upper triangular and + // Z = Z(0) * Z(1) ... Z(r-1) is an n-by-n orthogonal matrix. + // We store the data representing Z in R12 and m_zCoeffs. + for (Index k = rank - 1; k >= 0; --k) { + if (k != rank - 1) { + // Given the API for Householder reflectors, it is more convenient if + // we swap the leading parts of columns k and r-1 (zero-based) to form + // the matrix X_k = [X(0:k, k), X(0:k, r:n)] + m_cpqr.m_qr.col(k).head(k + 1).swap( + m_cpqr.m_qr.col(rank - 1).head(k + 1)); + } + // Construct Householder reflector Z(k) to zero out the last row of X_k, + // i.e. choose Z(k) such that + // [X(k, k), X(k, r:n)] * Z(k) = [beta, 0, .., 0]. + RealScalar beta; + m_cpqr.m_qr.row(k) + .tail(cols - rank + 1) + .makeHouseholderInPlace(m_zCoeffs(k), beta); + m_cpqr.m_qr(k, rank - 1) = beta; + if (k > 0) { + // Apply Z(k) to the first k rows of X_k + m_cpqr.m_qr.topRightCorner(k, cols - rank + 1) + .applyHouseholderOnTheRight( + m_cpqr.m_qr.row(k).tail(cols - rank).transpose(), m_zCoeffs(k), + &m_temp(0)); + } + if (k != rank - 1) { + // Swap X(0:k,k) back to its proper location. + m_cpqr.m_qr.col(k).head(k + 1).swap( + m_cpqr.m_qr.col(rank - 1).head(k + 1)); + } + } + } +} + +template +template +void CompleteOrthogonalDecomposition::applyZAdjointOnTheLeftInPlace( + Rhs& rhs) const { + const Index cols = this->cols(); + const Index nrhs = rhs.cols(); + const Index rank = this->rank(); + Matrix temp((std::max)(cols, nrhs)); + for (Index k = 0; k < rank; ++k) { + if (k != rank - 1) { + rhs.row(k).swap(rhs.row(rank - 1)); + } + rhs.middleRows(rank - 1, cols - rank + 1) + .applyHouseholderOnTheLeft( + matrixQTZ().row(k).tail(cols - rank).adjoint(), zCoeffs()(k), + &temp(0)); + if (k != rank - 1) { + rhs.row(k).swap(rhs.row(rank - 1)); + } + } +} + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl( + const RhsType& rhs, DstType& dst) const { + eigen_assert(rhs.rows() == this->rows()); + + const Index rank = this->rank(); + if (rank == 0) { + dst.setZero(); + return; + } + + // Compute c = Q^* * rhs + // Note that the matrix Q = H_0^* H_1^*... so its inverse is + // Q^* = (H_0 H_1 ...)^T + typename RhsType::PlainObject c(rhs); + c.applyOnTheLeft( + householderSequence(matrixQTZ(), hCoeffs()).setLength(rank).transpose()); + + // Solve T z = c(1:rank, :) + dst.topRows(rank) = matrixT() + .topLeftCorner(rank, rank) + .template triangularView() + .solve(c.topRows(rank)); + + const Index cols = this->cols(); + if (rank < cols) { + // Compute y = Z^* * [ z ] + // [ 0 ] + dst.bottomRows(cols - rank).setZero(); + applyZAdjointOnTheLeftInPlace(dst); + } + + // Undo permutation to get x = P^{-1} * y. + dst = colsPermutation() * dst; +} +#endif + +namespace internal { + +template +struct Assignment >, internal::assign_op::Scalar>, Dense2Dense> +{ + typedef CompleteOrthogonalDecomposition CodType; + typedef Inverse SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.rows())); + } +}; + +} // end namespace internal + +/** \returns the matrix Q as a sequence of householder transformations */ +template +typename CompleteOrthogonalDecomposition::HouseholderSequenceType +CompleteOrthogonalDecomposition::householderQ() const { + return m_cpqr.householderQ(); +} + +/** \return the complete orthogonal decomposition of \c *this. + * + * \sa class CompleteOrthogonalDecomposition + */ +template +const CompleteOrthogonalDecomposition::PlainObject> +MatrixBase::completeOrthogonalDecomposition() const { + return CompleteOrthogonalDecomposition(eval()); +} + +} // end namespace Eigen + +#endif // EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/FullPivHouseholderQR.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/FullPivHouseholderQR.h new file mode 100644 index 000000000..e489bddc2 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/FullPivHouseholderQR.h @@ -0,0 +1,676 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H +#define EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H + +namespace Eigen { + +namespace internal { + +template struct traits > + : traits<_MatrixType> +{ + enum { Flags = 0 }; +}; + +template struct FullPivHouseholderQRMatrixQReturnType; + +template +struct traits > +{ + typedef typename MatrixType::PlainObject ReturnType; +}; + +} // end namespace internal + +/** \ingroup QR_Module + * + * \class FullPivHouseholderQR + * + * \brief Householder rank-revealing QR decomposition of a matrix with full pivoting + * + * \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition + * + * This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b P', \b Q and \b R + * such that + * \f[ + * \mathbf{P} \, \mathbf{A} \, \mathbf{P}' = \mathbf{Q} \, \mathbf{R} + * \f] + * by using Householder transformations. Here, \b P and \b P' are permutation matrices, \b Q a unitary matrix + * and \b R an upper triangular matrix. + * + * This decomposition performs a very prudent full pivoting in order to be rank-revealing and achieve optimal + * numerical stability. The trade-off is that it is slower than HouseholderQR and ColPivHouseholderQR. + * + * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. + * + * \sa MatrixBase::fullPivHouseholderQr() + */ +template class FullPivHouseholderQR +{ + public: + + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + // FIXME should be int + typedef typename MatrixType::StorageIndex StorageIndex; + typedef internal::FullPivHouseholderQRMatrixQReturnType MatrixQReturnType; + typedef typename internal::plain_diag_type::type HCoeffsType; + typedef Matrix IntDiagSizeVectorType; + typedef PermutationMatrix PermutationType; + typedef typename internal::plain_row_type::type RowVectorType; + typedef typename internal::plain_col_type::type ColVectorType; + typedef typename MatrixType::PlainObject PlainObject; + + /** \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via FullPivHouseholderQR::compute(const MatrixType&). + */ + FullPivHouseholderQR() + : m_qr(), + m_hCoeffs(), + m_rows_transpositions(), + m_cols_transpositions(), + m_cols_permutation(), + m_temp(), + m_isInitialized(false), + m_usePrescribedThreshold(false) {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa FullPivHouseholderQR() + */ + FullPivHouseholderQR(Index rows, Index cols) + : m_qr(rows, cols), + m_hCoeffs((std::min)(rows,cols)), + m_rows_transpositions((std::min)(rows,cols)), + m_cols_transpositions((std::min)(rows,cols)), + m_cols_permutation(cols), + m_temp(cols), + m_isInitialized(false), + m_usePrescribedThreshold(false) {} + + /** \brief Constructs a QR factorization from a given matrix + * + * This constructor computes the QR factorization of the matrix \a matrix by calling + * the method compute(). It is a short cut for: + * + * \code + * FullPivHouseholderQR qr(matrix.rows(), matrix.cols()); + * qr.compute(matrix); + * \endcode + * + * \sa compute() + */ + template + explicit FullPivHouseholderQR(const EigenBase& matrix) + : m_qr(matrix.rows(), matrix.cols()), + m_hCoeffs((std::min)(matrix.rows(), matrix.cols())), + m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())), + m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())), + m_cols_permutation(matrix.cols()), + m_temp(matrix.cols()), + m_isInitialized(false), + m_usePrescribedThreshold(false) + { + compute(matrix.derived()); + } + + /** \brief Constructs a QR factorization from a given matrix + * + * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. + * + * \sa FullPivHouseholderQR(const EigenBase&) + */ + template + explicit FullPivHouseholderQR(EigenBase& matrix) + : m_qr(matrix.derived()), + m_hCoeffs((std::min)(matrix.rows(), matrix.cols())), + m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())), + m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())), + m_cols_permutation(matrix.cols()), + m_temp(matrix.cols()), + m_isInitialized(false), + m_usePrescribedThreshold(false) + { + computeInPlace(); + } + + /** This method finds a solution x to the equation Ax=b, where A is the matrix of which + * \c *this is the QR decomposition. + * + * \param b the right-hand-side of the equation to solve. + * + * \returns the exact or least-square solution if the rank is greater or equal to the number of columns of A, + * and an arbitrary solution otherwise. + * + * \note_about_checking_solutions + * + * \note_about_arbitrary_choice_of_solution + * + * Example: \include FullPivHouseholderQR_solve.cpp + * Output: \verbinclude FullPivHouseholderQR_solve.out + */ + template + inline const Solve + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return Solve(*this, b.derived()); + } + + /** \returns Expression object representing the matrix Q + */ + MatrixQReturnType matrixQ(void) const; + + /** \returns a reference to the matrix where the Householder QR decomposition is stored + */ + const MatrixType& matrixQR() const + { + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return m_qr; + } + + template + FullPivHouseholderQR& compute(const EigenBase& matrix); + + /** \returns a const reference to the column permutation matrix */ + const PermutationType& colsPermutation() const + { + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return m_cols_permutation; + } + + /** \returns a const reference to the vector of indices representing the rows transpositions */ + const IntDiagSizeVectorType& rowsTranspositions() const + { + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return m_rows_transpositions; + } + + /** \returns the absolute value of the determinant of the matrix of which + * *this is the QR decomposition. It has only linear complexity + * (that is, O(n) where n is the dimension of the square matrix) + * as the QR decomposition has already been computed. + * + * \note This is only for square matrices. + * + * \warning a determinant can be very big or small, so for matrices + * of large enough dimension, there is a risk of overflow/underflow. + * One way to work around that is to use logAbsDeterminant() instead. + * + * \sa logAbsDeterminant(), MatrixBase::determinant() + */ + typename MatrixType::RealScalar absDeterminant() const; + + /** \returns the natural log of the absolute value of the determinant of the matrix of which + * *this is the QR decomposition. It has only linear complexity + * (that is, O(n) where n is the dimension of the square matrix) + * as the QR decomposition has already been computed. + * + * \note This is only for square matrices. + * + * \note This method is useful to work around the risk of overflow/underflow that's inherent + * to determinant computation. + * + * \sa absDeterminant(), MatrixBase::determinant() + */ + typename MatrixType::RealScalar logAbsDeterminant() const; + + /** \returns the rank of the matrix of which *this is the QR decomposition. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline Index rank() const + { + using std::abs; + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); + Index result = 0; + for(Index i = 0; i < m_nonzero_pivots; ++i) + result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold); + return result; + } + + /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline Index dimensionOfKernel() const + { + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return cols() - rank(); + } + + /** \returns true if the matrix of which *this is the QR decomposition represents an injective + * linear map, i.e. has trivial kernel; false otherwise. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline bool isInjective() const + { + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return rank() == cols(); + } + + /** \returns true if the matrix of which *this is the QR decomposition represents a surjective + * linear map; false otherwise. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline bool isSurjective() const + { + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return rank() == rows(); + } + + /** \returns true if the matrix of which *this is the QR decomposition is invertible. + * + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline bool isInvertible() const + { + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return isInjective() && isSurjective(); + } + + /** \returns the inverse of the matrix of which *this is the QR decomposition. + * + * \note If this matrix is not invertible, the returned matrix has undefined coefficients. + * Use isInvertible() to first determine whether this matrix is invertible. + */ + inline const Inverse inverse() const + { + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return Inverse(*this); + } + + inline Index rows() const { return m_qr.rows(); } + inline Index cols() const { return m_qr.cols(); } + + /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q. + * + * For advanced uses only. + */ + const HCoeffsType& hCoeffs() const { return m_hCoeffs; } + + /** Allows to prescribe a threshold to be used by certain methods, such as rank(), + * who need to determine when pivots are to be considered nonzero. This is not used for the + * QR decomposition itself. + * + * When it needs to get the threshold value, Eigen calls threshold(). By default, this + * uses a formula to automatically determine a reasonable threshold. + * Once you have called the present method setThreshold(const RealScalar&), + * your value is used instead. + * + * \param threshold The new value to use as the threshold. + * + * A pivot will be considered nonzero if its absolute value is strictly greater than + * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$ + * where maxpivot is the biggest pivot. + * + * If you want to come back to the default behavior, call setThreshold(Default_t) + */ + FullPivHouseholderQR& setThreshold(const RealScalar& threshold) + { + m_usePrescribedThreshold = true; + m_prescribedThreshold = threshold; + return *this; + } + + /** Allows to come back to the default behavior, letting Eigen use its default formula for + * determining the threshold. + * + * You should pass the special object Eigen::Default as parameter here. + * \code qr.setThreshold(Eigen::Default); \endcode + * + * See the documentation of setThreshold(const RealScalar&). + */ + FullPivHouseholderQR& setThreshold(Default_t) + { + m_usePrescribedThreshold = false; + return *this; + } + + /** Returns the threshold that will be used by certain methods such as rank(). + * + * See the documentation of setThreshold(const RealScalar&). + */ + RealScalar threshold() const + { + eigen_assert(m_isInitialized || m_usePrescribedThreshold); + return m_usePrescribedThreshold ? m_prescribedThreshold + // this formula comes from experimenting (see "LU precision tuning" thread on the list) + // and turns out to be identical to Higham's formula used already in LDLt. + : NumTraits::epsilon() * RealScalar(m_qr.diagonalSize()); + } + + /** \returns the number of nonzero pivots in the QR decomposition. + * Here nonzero is meant in the exact sense, not in a fuzzy sense. + * So that notion isn't really intrinsically interesting, but it is + * still useful when implementing algorithms. + * + * \sa rank() + */ + inline Index nonzeroPivots() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return m_nonzero_pivots; + } + + /** \returns the absolute value of the biggest pivot, i.e. the biggest + * diagonal coefficient of U. + */ + RealScalar maxPivot() const { return m_maxpivot; } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC + void _solve_impl(const RhsType &rhs, DstType &dst) const; + #endif + + protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + void computeInPlace(); + + MatrixType m_qr; + HCoeffsType m_hCoeffs; + IntDiagSizeVectorType m_rows_transpositions; + IntDiagSizeVectorType m_cols_transpositions; + PermutationType m_cols_permutation; + RowVectorType m_temp; + bool m_isInitialized, m_usePrescribedThreshold; + RealScalar m_prescribedThreshold, m_maxpivot; + Index m_nonzero_pivots; + RealScalar m_precision; + Index m_det_pq; +}; + +template +typename MatrixType::RealScalar FullPivHouseholderQR::absDeterminant() const +{ + using std::abs; + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + return abs(m_qr.diagonal().prod()); +} + +template +typename MatrixType::RealScalar FullPivHouseholderQR::logAbsDeterminant() const +{ + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + return m_qr.diagonal().cwiseAbs().array().log().sum(); +} + +/** Performs the QR factorization of the given matrix \a matrix. The result of + * the factorization is stored into \c *this, and a reference to \c *this + * is returned. + * + * \sa class FullPivHouseholderQR, FullPivHouseholderQR(const MatrixType&) + */ +template +template +FullPivHouseholderQR& FullPivHouseholderQR::compute(const EigenBase& matrix) +{ + m_qr = matrix.derived(); + computeInPlace(); + return *this; +} + +template +void FullPivHouseholderQR::computeInPlace() +{ + check_template_parameters(); + + using std::abs; + Index rows = m_qr.rows(); + Index cols = m_qr.cols(); + Index size = (std::min)(rows,cols); + + + m_hCoeffs.resize(size); + + m_temp.resize(cols); + + m_precision = NumTraits::epsilon() * RealScalar(size); + + m_rows_transpositions.resize(size); + m_cols_transpositions.resize(size); + Index number_of_transpositions = 0; + + RealScalar biggest(0); + + m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) + m_maxpivot = RealScalar(0); + + for (Index k = 0; k < size; ++k) + { + Index row_of_biggest_in_corner, col_of_biggest_in_corner; + typedef internal::scalar_score_coeff_op Scoring; + typedef typename Scoring::result_type Score; + + Score score = m_qr.bottomRightCorner(rows-k, cols-k) + .unaryExpr(Scoring()) + .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner); + row_of_biggest_in_corner += k; + col_of_biggest_in_corner += k; + RealScalar biggest_in_corner = internal::abs_knowing_score()(m_qr(row_of_biggest_in_corner, col_of_biggest_in_corner), score); + if(k==0) biggest = biggest_in_corner; + + // if the corner is negligible, then we have less than full rank, and we can finish early + if(internal::isMuchSmallerThan(biggest_in_corner, biggest, m_precision)) + { + m_nonzero_pivots = k; + for(Index i = k; i < size; i++) + { + m_rows_transpositions.coeffRef(i) = i; + m_cols_transpositions.coeffRef(i) = i; + m_hCoeffs.coeffRef(i) = Scalar(0); + } + break; + } + + m_rows_transpositions.coeffRef(k) = row_of_biggest_in_corner; + m_cols_transpositions.coeffRef(k) = col_of_biggest_in_corner; + if(k != row_of_biggest_in_corner) { + m_qr.row(k).tail(cols-k).swap(m_qr.row(row_of_biggest_in_corner).tail(cols-k)); + ++number_of_transpositions; + } + if(k != col_of_biggest_in_corner) { + m_qr.col(k).swap(m_qr.col(col_of_biggest_in_corner)); + ++number_of_transpositions; + } + + RealScalar beta; + m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta); + m_qr.coeffRef(k,k) = beta; + + // remember the maximum absolute value of diagonal coefficients + if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta); + + m_qr.bottomRightCorner(rows-k, cols-k-1) + .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1)); + } + + m_cols_permutation.setIdentity(cols); + for(Index k = 0; k < size; ++k) + m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k)); + + m_det_pq = (number_of_transpositions%2) ? -1 : 1; + m_isInitialized = true; +} + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const +{ + eigen_assert(rhs.rows() == rows()); + const Index l_rank = rank(); + + // FIXME introduce nonzeroPivots() and use it here. and more generally, + // make the same improvements in this dec as in FullPivLU. + if(l_rank==0) + { + dst.setZero(); + return; + } + + typename RhsType::PlainObject c(rhs); + + Matrix temp(rhs.cols()); + for (Index k = 0; k < l_rank; ++k) + { + Index remainingSize = rows()-k; + c.row(k).swap(c.row(m_rows_transpositions.coeff(k))); + c.bottomRightCorner(remainingSize, rhs.cols()) + .applyHouseholderOnTheLeft(m_qr.col(k).tail(remainingSize-1), + m_hCoeffs.coeff(k), &temp.coeffRef(0)); + } + + m_qr.topLeftCorner(l_rank, l_rank) + .template triangularView() + .solveInPlace(c.topRows(l_rank)); + + for(Index i = 0; i < l_rank; ++i) dst.row(m_cols_permutation.indices().coeff(i)) = c.row(i); + for(Index i = l_rank; i < cols(); ++i) dst.row(m_cols_permutation.indices().coeff(i)).setZero(); +} +#endif + +namespace internal { + +template +struct Assignment >, internal::assign_op::Scalar>, Dense2Dense> +{ + typedef FullPivHouseholderQR QrType; + typedef Inverse SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); + } +}; + +/** \ingroup QR_Module + * + * \brief Expression type for return value of FullPivHouseholderQR::matrixQ() + * + * \tparam MatrixType type of underlying dense matrix + */ +template struct FullPivHouseholderQRMatrixQReturnType + : public ReturnByValue > +{ +public: + typedef typename FullPivHouseholderQR::IntDiagSizeVectorType IntDiagSizeVectorType; + typedef typename internal::plain_diag_type::type HCoeffsType; + typedef Matrix WorkVectorType; + + FullPivHouseholderQRMatrixQReturnType(const MatrixType& qr, + const HCoeffsType& hCoeffs, + const IntDiagSizeVectorType& rowsTranspositions) + : m_qr(qr), + m_hCoeffs(hCoeffs), + m_rowsTranspositions(rowsTranspositions) + {} + + template + void evalTo(ResultType& result) const + { + const Index rows = m_qr.rows(); + WorkVectorType workspace(rows); + evalTo(result, workspace); + } + + template + void evalTo(ResultType& result, WorkVectorType& workspace) const + { + using numext::conj; + // compute the product H'_0 H'_1 ... H'_n-1, + // where H_k is the k-th Householder transformation I - h_k v_k v_k' + // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...] + const Index rows = m_qr.rows(); + const Index cols = m_qr.cols(); + const Index size = (std::min)(rows, cols); + workspace.resize(rows); + result.setIdentity(rows, rows); + for (Index k = size-1; k >= 0; k--) + { + result.block(k, k, rows-k, rows-k) + .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), conj(m_hCoeffs.coeff(k)), &workspace.coeffRef(k)); + result.row(k).swap(result.row(m_rowsTranspositions.coeff(k))); + } + } + + Index rows() const { return m_qr.rows(); } + Index cols() const { return m_qr.rows(); } + +protected: + typename MatrixType::Nested m_qr; + typename HCoeffsType::Nested m_hCoeffs; + typename IntDiagSizeVectorType::Nested m_rowsTranspositions; +}; + +// template +// struct evaluator > +// : public evaluator > > +// {}; + +} // end namespace internal + +template +inline typename FullPivHouseholderQR::MatrixQReturnType FullPivHouseholderQR::matrixQ() const +{ + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return MatrixQReturnType(m_qr, m_hCoeffs, m_rows_transpositions); +} + +/** \return the full-pivoting Householder QR decomposition of \c *this. + * + * \sa class FullPivHouseholderQR + */ +template +const FullPivHouseholderQR::PlainObject> +MatrixBase::fullPivHouseholderQr() const +{ + return FullPivHouseholderQR(eval()); +} + +} // end namespace Eigen + +#endif // EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/HouseholderQR.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/HouseholderQR.h new file mode 100644 index 000000000..3513d995c --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/HouseholderQR.h @@ -0,0 +1,409 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2010 Vincent Lejeune +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_QR_H +#define EIGEN_QR_H + +namespace Eigen { + +/** \ingroup QR_Module + * + * + * \class HouseholderQR + * + * \brief Householder QR decomposition of a matrix + * + * \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition + * + * This class performs a QR decomposition of a matrix \b A into matrices \b Q and \b R + * such that + * \f[ + * \mathbf{A} = \mathbf{Q} \, \mathbf{R} + * \f] + * by using Householder transformations. Here, \b Q a unitary matrix and \b R an upper triangular matrix. + * The result is stored in a compact way compatible with LAPACK. + * + * Note that no pivoting is performed. This is \b not a rank-revealing decomposition. + * If you want that feature, use FullPivHouseholderQR or ColPivHouseholderQR instead. + * + * This Householder QR decomposition is faster, but less numerically stable and less feature-full than + * FullPivHouseholderQR or ColPivHouseholderQR. + * + * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. + * + * \sa MatrixBase::householderQr() + */ +template class HouseholderQR +{ + public: + + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + // FIXME should be int + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Matrix MatrixQType; + typedef typename internal::plain_diag_type::type HCoeffsType; + typedef typename internal::plain_row_type::type RowVectorType; + typedef HouseholderSequence::type> HouseholderSequenceType; + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via HouseholderQR::compute(const MatrixType&). + */ + HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa HouseholderQR() + */ + HouseholderQR(Index rows, Index cols) + : m_qr(rows, cols), + m_hCoeffs((std::min)(rows,cols)), + m_temp(cols), + m_isInitialized(false) {} + + /** \brief Constructs a QR factorization from a given matrix + * + * This constructor computes the QR factorization of the matrix \a matrix by calling + * the method compute(). It is a short cut for: + * + * \code + * HouseholderQR qr(matrix.rows(), matrix.cols()); + * qr.compute(matrix); + * \endcode + * + * \sa compute() + */ + template + explicit HouseholderQR(const EigenBase& matrix) + : m_qr(matrix.rows(), matrix.cols()), + m_hCoeffs((std::min)(matrix.rows(),matrix.cols())), + m_temp(matrix.cols()), + m_isInitialized(false) + { + compute(matrix.derived()); + } + + + /** \brief Constructs a QR factorization from a given matrix + * + * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when + * \c MatrixType is a Eigen::Ref. + * + * \sa HouseholderQR(const EigenBase&) + */ + template + explicit HouseholderQR(EigenBase& matrix) + : m_qr(matrix.derived()), + m_hCoeffs((std::min)(matrix.rows(),matrix.cols())), + m_temp(matrix.cols()), + m_isInitialized(false) + { + computeInPlace(); + } + + /** This method finds a solution x to the equation Ax=b, where A is the matrix of which + * *this is the QR decomposition, if any exists. + * + * \param b the right-hand-side of the equation to solve. + * + * \returns a solution. + * + * \note_about_checking_solutions + * + * \note_about_arbitrary_choice_of_solution + * + * Example: \include HouseholderQR_solve.cpp + * Output: \verbinclude HouseholderQR_solve.out + */ + template + inline const Solve + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); + return Solve(*this, b.derived()); + } + + /** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations. + * + * The returned expression can directly be used to perform matrix products. It can also be assigned to a dense Matrix object. + * Here is an example showing how to recover the full or thin matrix Q, as well as how to perform matrix products using operator*: + * + * Example: \include HouseholderQR_householderQ.cpp + * Output: \verbinclude HouseholderQR_householderQ.out + */ + HouseholderSequenceType householderQ() const + { + eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); + return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()); + } + + /** \returns a reference to the matrix where the Householder QR decomposition is stored + * in a LAPACK-compatible way. + */ + const MatrixType& matrixQR() const + { + eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); + return m_qr; + } + + template + HouseholderQR& compute(const EigenBase& matrix) { + m_qr = matrix.derived(); + computeInPlace(); + return *this; + } + + /** \returns the absolute value of the determinant of the matrix of which + * *this is the QR decomposition. It has only linear complexity + * (that is, O(n) where n is the dimension of the square matrix) + * as the QR decomposition has already been computed. + * + * \note This is only for square matrices. + * + * \warning a determinant can be very big or small, so for matrices + * of large enough dimension, there is a risk of overflow/underflow. + * One way to work around that is to use logAbsDeterminant() instead. + * + * \sa logAbsDeterminant(), MatrixBase::determinant() + */ + typename MatrixType::RealScalar absDeterminant() const; + + /** \returns the natural log of the absolute value of the determinant of the matrix of which + * *this is the QR decomposition. It has only linear complexity + * (that is, O(n) where n is the dimension of the square matrix) + * as the QR decomposition has already been computed. + * + * \note This is only for square matrices. + * + * \note This method is useful to work around the risk of overflow/underflow that's inherent + * to determinant computation. + * + * \sa absDeterminant(), MatrixBase::determinant() + */ + typename MatrixType::RealScalar logAbsDeterminant() const; + + inline Index rows() const { return m_qr.rows(); } + inline Index cols() const { return m_qr.cols(); } + + /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q. + * + * For advanced uses only. + */ + const HCoeffsType& hCoeffs() const { return m_hCoeffs; } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC + void _solve_impl(const RhsType &rhs, DstType &dst) const; + #endif + + protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + void computeInPlace(); + + MatrixType m_qr; + HCoeffsType m_hCoeffs; + RowVectorType m_temp; + bool m_isInitialized; +}; + +template +typename MatrixType::RealScalar HouseholderQR::absDeterminant() const +{ + using std::abs; + eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + return abs(m_qr.diagonal().prod()); +} + +template +typename MatrixType::RealScalar HouseholderQR::logAbsDeterminant() const +{ + eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + return m_qr.diagonal().cwiseAbs().array().log().sum(); +} + +namespace internal { + +/** \internal */ +template +void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0) +{ + typedef typename MatrixQR::Scalar Scalar; + typedef typename MatrixQR::RealScalar RealScalar; + Index rows = mat.rows(); + Index cols = mat.cols(); + Index size = (std::min)(rows,cols); + + eigen_assert(hCoeffs.size() == size); + + typedef Matrix TempType; + TempType tempVector; + if(tempData==0) + { + tempVector.resize(cols); + tempData = tempVector.data(); + } + + for(Index k = 0; k < size; ++k) + { + Index remainingRows = rows - k; + Index remainingCols = cols - k - 1; + + RealScalar beta; + mat.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeffs.coeffRef(k), beta); + mat.coeffRef(k,k) = beta; + + // apply H to remaining part of m_qr from the left + mat.bottomRightCorner(remainingRows, remainingCols) + .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), hCoeffs.coeffRef(k), tempData+k+1); + } +} + +/** \internal */ +template +struct householder_qr_inplace_blocked +{ + // This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h + static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index maxBlockSize=32, + typename MatrixQR::Scalar* tempData = 0) + { + typedef typename MatrixQR::Scalar Scalar; + typedef Block BlockType; + + Index rows = mat.rows(); + Index cols = mat.cols(); + Index size = (std::min)(rows, cols); + + typedef Matrix TempType; + TempType tempVector; + if(tempData==0) + { + tempVector.resize(cols); + tempData = tempVector.data(); + } + + Index blockSize = (std::min)(maxBlockSize,size); + + Index k = 0; + for (k = 0; k < size; k += blockSize) + { + Index bs = (std::min)(size-k,blockSize); // actual size of the block + Index tcols = cols - k - bs; // trailing columns + Index brows = rows-k; // rows of the block + + // partition the matrix: + // A00 | A01 | A02 + // mat = A10 | A11 | A12 + // A20 | A21 | A22 + // and performs the qr dec of [A11^T A12^T]^T + // and update [A21^T A22^T]^T using level 3 operations. + // Finally, the algorithm continue on A22 + + BlockType A11_21 = mat.block(k,k,brows,bs); + Block hCoeffsSegment = hCoeffs.segment(k,bs); + + householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData); + + if(tcols) + { + BlockType A21_22 = mat.block(k,k+bs,brows,tcols); + apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment, false); // false == backward + } + } + } +}; + +} // end namespace internal + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const +{ + const Index rank = (std::min)(rows(), cols()); + eigen_assert(rhs.rows() == rows()); + + typename RhsType::PlainObject c(rhs); + + // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T + c.applyOnTheLeft(householderSequence( + m_qr.leftCols(rank), + m_hCoeffs.head(rank)).transpose() + ); + + m_qr.topLeftCorner(rank, rank) + .template triangularView() + .solveInPlace(c.topRows(rank)); + + dst.topRows(rank) = c.topRows(rank); + dst.bottomRows(cols()-rank).setZero(); +} +#endif + +/** Performs the QR factorization of the given matrix \a matrix. The result of + * the factorization is stored into \c *this, and a reference to \c *this + * is returned. + * + * \sa class HouseholderQR, HouseholderQR(const MatrixType&) + */ +template +void HouseholderQR::computeInPlace() +{ + check_template_parameters(); + + Index rows = m_qr.rows(); + Index cols = m_qr.cols(); + Index size = (std::min)(rows,cols); + + m_hCoeffs.resize(size); + + m_temp.resize(cols); + + internal::householder_qr_inplace_blocked::run(m_qr, m_hCoeffs, 48, m_temp.data()); + + m_isInitialized = true; +} + +/** \return the Householder QR decomposition of \c *this. + * + * \sa class HouseholderQR + */ +template +const HouseholderQR::PlainObject> +MatrixBase::householderQr() const +{ + return HouseholderQR(eval()); +} + +} // end namespace Eigen + +#endif // EIGEN_QR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/HouseholderQR_LAPACKE.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/HouseholderQR_LAPACKE.h new file mode 100644 index 000000000..1dc7d5363 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/QR/HouseholderQR_LAPACKE.h @@ -0,0 +1,68 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to LAPACKe + * Householder QR decomposition of a matrix w/o pivoting based on + * LAPACKE_?geqrf function. + ******************************************************************************** +*/ + +#ifndef EIGEN_QR_LAPACKE_H +#define EIGEN_QR_LAPACKE_H + +namespace Eigen { + +namespace internal { + +/** \internal Specialization for the data types supported by LAPACKe */ + +#define EIGEN_LAPACKE_QR_NOPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \ +template \ +struct householder_qr_inplace_blocked \ +{ \ + static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index = 32, \ + typename MatrixQR::Scalar* = 0) \ + { \ + lapack_int m = (lapack_int) mat.rows(); \ + lapack_int n = (lapack_int) mat.cols(); \ + lapack_int lda = (lapack_int) mat.outerStride(); \ + lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ + LAPACKE_##LAPACKE_PREFIX##geqrf( matrix_order, m, n, (LAPACKE_TYPE*)mat.data(), lda, (LAPACKE_TYPE*)hCoeffs.data()); \ + hCoeffs.adjointInPlace(); \ + } \ +}; + +EIGEN_LAPACKE_QR_NOPIV(double, double, d) +EIGEN_LAPACKE_QR_NOPIV(float, float, s) +EIGEN_LAPACKE_QR_NOPIV(dcomplex, lapack_complex_double, z) +EIGEN_LAPACKE_QR_NOPIV(scomplex, lapack_complex_float, c) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_QR_LAPACKE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h new file mode 100644 index 000000000..953d57c9d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h @@ -0,0 +1,313 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Desire Nuentsa +// Copyright (C) 2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SUITESPARSEQRSUPPORT_H +#define EIGEN_SUITESPARSEQRSUPPORT_H + +namespace Eigen { + + template class SPQR; + template struct SPQRMatrixQReturnType; + template struct SPQRMatrixQTransposeReturnType; + template struct SPQR_QProduct; + namespace internal { + template struct traits > + { + typedef typename SPQRType::MatrixType ReturnType; + }; + template struct traits > + { + typedef typename SPQRType::MatrixType ReturnType; + }; + template struct traits > + { + typedef typename Derived::PlainObject ReturnType; + }; + } // End namespace internal + +/** + * \ingroup SPQRSupport_Module + * \class SPQR + * \brief Sparse QR factorization based on SuiteSparseQR library + * + * This class is used to perform a multithreaded and multifrontal rank-revealing QR decomposition + * of sparse matrices. The result is then used to solve linear leasts_square systems. + * Clearly, a QR factorization is returned such that A*P = Q*R where : + * + * P is the column permutation. Use colsPermutation() to get it. + * + * Q is the orthogonal matrix represented as Householder reflectors. + * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose. + * You can then apply it to a vector. + * + * R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix. + * NOTE : The Index type of R is always SuiteSparse_long. You can get it with SPQR::Index + * + * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<> + * + * \implsparsesolverconcept + * + * + */ +template +class SPQR : public SparseSolverBase > +{ + protected: + typedef SparseSolverBase > Base; + using Base::m_isInitialized; + public: + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef SuiteSparse_long StorageIndex ; + typedef SparseMatrix MatrixType; + typedef Map > PermutationType; + enum { + ColsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic + }; + public: + SPQR() + : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits::epsilon()), m_useDefaultThreshold(true) + { + cholmod_l_start(&m_cc); + } + + explicit SPQR(const _MatrixType& matrix) + : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits::epsilon()), m_useDefaultThreshold(true) + { + cholmod_l_start(&m_cc); + compute(matrix); + } + + ~SPQR() + { + SPQR_free(); + cholmod_l_finish(&m_cc); + } + void SPQR_free() + { + cholmod_l_free_sparse(&m_H, &m_cc); + cholmod_l_free_sparse(&m_cR, &m_cc); + cholmod_l_free_dense(&m_HTau, &m_cc); + std::free(m_E); + std::free(m_HPinv); + } + + void compute(const _MatrixType& matrix) + { + if(m_isInitialized) SPQR_free(); + + MatrixType mat(matrix); + + /* Compute the default threshold as in MatLab, see: + * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing + * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 + */ + RealScalar pivotThreshold = m_tolerance; + if(m_useDefaultThreshold) + { + RealScalar max2Norm = 0.0; + for (int j = 0; j < mat.cols(); j++) max2Norm = numext::maxi(max2Norm, mat.col(j).norm()); + if(max2Norm==RealScalar(0)) + max2Norm = RealScalar(1); + pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits::epsilon(); + } + cholmod_sparse A; + A = viewAsCholmod(mat); + m_rows = matrix.rows(); + Index col = matrix.cols(); + m_rank = SuiteSparseQR(m_ordering, pivotThreshold, col, &A, + &m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc); + + if (!m_cR) + { + m_info = NumericalIssue; + m_isInitialized = false; + return; + } + m_info = Success; + m_isInitialized = true; + m_isRUpToDate = false; + } + /** + * Get the number of rows of the input matrix and the Q matrix + */ + inline Index rows() const {return m_rows; } + + /** + * Get the number of columns of the input matrix. + */ + inline Index cols() const { return m_cR->ncol; } + + template + void _solve_impl(const MatrixBase &b, MatrixBase &dest) const + { + eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); + eigen_assert(b.cols()==1 && "This method is for vectors only"); + + //Compute Q^T * b + typename Dest::PlainObject y, y2; + y = matrixQ().transpose() * b; + + // Solves with the triangular matrix R + Index rk = this->rank(); + y2 = y; + y.resize((std::max)(cols(),Index(y.rows())),y.cols()); + y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView().solve(y2.topRows(rk)); + + // Apply the column permutation + // colsPermutation() performs a copy of the permutation, + // so let's apply it manually: + for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i); + for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero(); + +// y.bottomRows(y.rows()-rk).setZero(); +// dest = colsPermutation() * y.topRows(cols()); + + m_info = Success; + } + + /** \returns the sparse triangular factor R. It is a sparse matrix + */ + const MatrixType matrixR() const + { + eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); + if(!m_isRUpToDate) { + m_R = viewAsEigen(*m_cR); + m_isRUpToDate = true; + } + return m_R; + } + /// Get an expression of the matrix Q + SPQRMatrixQReturnType matrixQ() const + { + return SPQRMatrixQReturnType(*this); + } + /// Get the permutation that was applied to columns of A + PermutationType colsPermutation() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return PermutationType(m_E, m_cR->ncol); + } + /** + * Gets the rank of the matrix. + * It should be equal to matrixQR().cols if the matrix is full-rank + */ + Index rank() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_cc.SPQR_istat[4]; + } + /// Set the fill-reducing ordering method to be used + void setSPQROrdering(int ord) { m_ordering = ord;} + /// Set the tolerance tol to treat columns with 2-norm < =tol as zero + void setPivotThreshold(const RealScalar& tol) + { + m_useDefaultThreshold = false; + m_tolerance = tol; + } + + /** \returns a pointer to the SPQR workspace */ + cholmod_common *cholmodCommon() const { return &m_cc; } + + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the sparse QR can not be computed + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + protected: + bool m_analysisIsOk; + bool m_factorizationIsOk; + mutable bool m_isRUpToDate; + mutable ComputationInfo m_info; + int m_ordering; // Ordering method to use, see SPQR's manual + int m_allow_tol; // Allow to use some tolerance during numerical factorization. + RealScalar m_tolerance; // treat columns with 2-norm below this tolerance as zero + mutable cholmod_sparse *m_cR; // The sparse R factor in cholmod format + mutable MatrixType m_R; // The sparse matrix R in Eigen format + mutable StorageIndex *m_E; // The permutation applied to columns + mutable cholmod_sparse *m_H; //The householder vectors + mutable StorageIndex *m_HPinv; // The row permutation of H + mutable cholmod_dense *m_HTau; // The Householder coefficients + mutable Index m_rank; // The rank of the matrix + mutable cholmod_common m_cc; // Workspace and parameters + bool m_useDefaultThreshold; // Use default threshold + Index m_rows; + template friend struct SPQR_QProduct; +}; + +template +struct SPQR_QProduct : ReturnByValue > +{ + typedef typename SPQRType::Scalar Scalar; + typedef typename SPQRType::StorageIndex StorageIndex; + //Define the constructor to get reference to argument types + SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {} + + inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); } + inline Index cols() const { return m_other.cols(); } + // Assign to a vector + template + void evalTo(ResType& res) const + { + cholmod_dense y_cd; + cholmod_dense *x_cd; + int method = m_transpose ? SPQR_QTX : SPQR_QX; + cholmod_common *cc = m_spqr.cholmodCommon(); + y_cd = viewAsCholmod(m_other.const_cast_derived()); + x_cd = SuiteSparseQR_qmult(method, m_spqr.m_H, m_spqr.m_HTau, m_spqr.m_HPinv, &y_cd, cc); + res = Matrix::Map(reinterpret_cast(x_cd->x), x_cd->nrow, x_cd->ncol); + cholmod_l_free_dense(&x_cd, cc); + } + const SPQRType& m_spqr; + const Derived& m_other; + bool m_transpose; + +}; +template +struct SPQRMatrixQReturnType{ + + SPQRMatrixQReturnType(const SPQRType& spqr) : m_spqr(spqr) {} + template + SPQR_QProduct operator*(const MatrixBase& other) + { + return SPQR_QProduct(m_spqr,other.derived(),false); + } + SPQRMatrixQTransposeReturnType adjoint() const + { + return SPQRMatrixQTransposeReturnType(m_spqr); + } + // To use for operations with the transpose of Q + SPQRMatrixQTransposeReturnType transpose() const + { + return SPQRMatrixQTransposeReturnType(m_spqr); + } + const SPQRType& m_spqr; +}; + +template +struct SPQRMatrixQTransposeReturnType{ + SPQRMatrixQTransposeReturnType(const SPQRType& spqr) : m_spqr(spqr) {} + template + SPQR_QProduct operator*(const MatrixBase& other) + { + return SPQR_QProduct(m_spqr,other.derived(), true); + } + const SPQRType& m_spqr; +}; + +}// End namespace Eigen +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/BDCSVD.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/BDCSVD.h new file mode 100644 index 000000000..1134d66e7 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/BDCSVD.h @@ -0,0 +1,1246 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD" +// research report written by Ming Gu and Stanley C.Eisenstat +// The code variable names correspond to the names they used in their +// report +// +// Copyright (C) 2013 Gauthier Brun +// Copyright (C) 2013 Nicolas Carre +// Copyright (C) 2013 Jean Ceccato +// Copyright (C) 2013 Pierre Zoppitelli +// Copyright (C) 2013 Jitse Niesen +// Copyright (C) 2014-2017 Gael Guennebaud +// +// Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BDCSVD_H +#define EIGEN_BDCSVD_H +// #define EIGEN_BDCSVD_DEBUG_VERBOSE +// #define EIGEN_BDCSVD_SANITY_CHECKS + +namespace Eigen { + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE +IOFormat bdcsvdfmt(8, 0, ", ", "\n", " [", "]"); +#endif + +template class BDCSVD; + +namespace internal { + +template +struct traits > +{ + typedef _MatrixType MatrixType; +}; + +} // end namespace internal + + +/** \ingroup SVD_Module + * + * + * \class BDCSVD + * + * \brief class Bidiagonal Divide and Conquer SVD + * + * \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition + * + * This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization, + * and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD. + * You can control the switching size with the setSwitchSize() method, default is 16. + * For small matrice (<16), it is thus preferable to directly use JacobiSVD. For larger ones, BDCSVD is highly + * recommended and can several order of magnitude faster. + * + * \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations. + * For instance, this concerns Intel's compiler (ICC), which perfroms such optimization by default unless + * you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will + * significantly degrade the accuracy. + * + * \sa class JacobiSVD + */ +template +class BDCSVD : public SVDBase > +{ + typedef SVDBase Base; + +public: + using Base::rows; + using Base::cols; + using Base::computeU; + using Base::computeV; + + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename NumTraits::Literal Literal; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), + MatrixOptions = MatrixType::Options + }; + + typedef typename Base::MatrixUType MatrixUType; + typedef typename Base::MatrixVType MatrixVType; + typedef typename Base::SingularValuesType SingularValuesType; + + typedef Matrix MatrixX; + typedef Matrix MatrixXr; + typedef Matrix VectorType; + typedef Array ArrayXr; + typedef Array ArrayXi; + typedef Ref ArrayRef; + typedef Ref IndicesRef; + + /** \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via BDCSVD::compute(const MatrixType&). + */ + BDCSVD() : m_algoswap(16), m_numIters(0) + {} + + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem size. + * \sa BDCSVD() + */ + BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0) + : m_algoswap(16), m_numIters(0) + { + allocate(rows, cols, computationOptions); + } + + /** \brief Constructor performing the decomposition of given matrix. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non - default) FullPivHouseholderQR preconditioner. + */ + BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0) + : m_algoswap(16), m_numIters(0) + { + compute(matrix, computationOptions); + } + + ~BDCSVD() + { + } + + /** \brief Method performing the decomposition of given matrix using custom options. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non - default) FullPivHouseholderQR preconditioner. + */ + BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions); + + /** \brief Method performing the decomposition of given matrix using current options. + * + * \param matrix the matrix to decompose + * + * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). + */ + BDCSVD& compute(const MatrixType& matrix) + { + return compute(matrix, this->m_computationOptions); + } + + void setSwitchSize(int s) + { + eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3"); + m_algoswap = s; + } + +private: + void allocate(Index rows, Index cols, unsigned int computationOptions); + void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift); + void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V); + void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus); + void perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat); + void computeSingVecs(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V); + void deflation43(Index firstCol, Index shift, Index i, Index size); + void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size); + void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift); + template + void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev); + void structured_update(Block A, const MatrixXr &B, Index n1); + static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift); + +protected: + MatrixXr m_naiveU, m_naiveV; + MatrixXr m_computed; + Index m_nRec; + ArrayXr m_workspace; + ArrayXi m_workspaceI; + int m_algoswap; + bool m_isTranspose, m_compU, m_compV; + + using Base::m_singularValues; + using Base::m_diagSize; + using Base::m_computeFullU; + using Base::m_computeFullV; + using Base::m_computeThinU; + using Base::m_computeThinV; + using Base::m_matrixU; + using Base::m_matrixV; + using Base::m_isInitialized; + using Base::m_nonzeroSingularValues; + +public: + int m_numIters; +}; //end class BDCSVD + + +// Method to allocate and initialize matrix and attributes +template +void BDCSVD::allocate(Index rows, Index cols, unsigned int computationOptions) +{ + m_isTranspose = (cols > rows); + + if (Base::allocate(rows, cols, computationOptions)) + return; + + m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize ); + m_compU = computeV(); + m_compV = computeU(); + if (m_isTranspose) + std::swap(m_compU, m_compV); + + if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 ); + else m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 ); + + if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize); + + m_workspace.resize((m_diagSize+1)*(m_diagSize+1)*3); + m_workspaceI.resize(3*m_diagSize); +}// end allocate + +template +BDCSVD& BDCSVD::compute(const MatrixType& matrix, unsigned int computationOptions) +{ +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "\n\n\n======================================================================================================================\n\n\n"; +#endif + allocate(matrix.rows(), matrix.cols(), computationOptions); + using std::abs; + + const RealScalar considerZero = (std::numeric_limits::min)(); + + //**** step -1 - If the problem is too small, directly falls back to JacobiSVD and return + if(matrix.cols() < m_algoswap) + { + // FIXME this line involves temporaries + JacobiSVD jsvd(matrix,computationOptions); + if(computeU()) m_matrixU = jsvd.matrixU(); + if(computeV()) m_matrixV = jsvd.matrixV(); + m_singularValues = jsvd.singularValues(); + m_nonzeroSingularValues = jsvd.nonzeroSingularValues(); + m_isInitialized = true; + return *this; + } + + //**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows + RealScalar scale = matrix.cwiseAbs().maxCoeff(); + if(scale==Literal(0)) scale = Literal(1); + MatrixX copy; + if (m_isTranspose) copy = matrix.adjoint()/scale; + else copy = matrix/scale; + + //**** step 1 - Bidiagonalization + // FIXME this line involves temporaries + internal::UpperBidiagonalization bid(copy); + + //**** step 2 - Divide & Conquer + m_naiveU.setZero(); + m_naiveV.setZero(); + // FIXME this line involves a temporary matrix + m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose(); + m_computed.template bottomRows<1>().setZero(); + divide(0, m_diagSize - 1, 0, 0, 0); + + //**** step 3 - Copy singular values and vectors + for (int i=0; i +template +void BDCSVD::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV) +{ + // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa + if (computeU()) + { + Index Ucols = m_computeThinU ? m_diagSize : householderU.cols(); + m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); + m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast().topLeftCorner(m_diagSize, m_diagSize); + householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer + } + if (computeV()) + { + Index Vcols = m_computeThinV ? m_diagSize : householderV.cols(); + m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); + m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast().topLeftCorner(m_diagSize, m_diagSize); + householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer + } +} + +/** \internal + * Performs A = A * B exploiting the special structure of the matrix A. Splitting A as: + * A = [A1] + * [A2] + * such that A1.rows()==n1, then we assume that at least half of the columns of A1 and A2 are zeros. + * We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large + * enough. + */ +template +void BDCSVD::structured_update(Block A, const MatrixXr &B, Index n1) +{ + Index n = A.rows(); + if(n>100) + { + // If the matrices are large enough, let's exploit the sparse structure of A by + // splitting it in half (wrt n1), and packing the non-zero columns. + Index n2 = n - n1; + Map A1(m_workspace.data() , n1, n); + Map A2(m_workspace.data()+ n1*n, n2, n); + Map B1(m_workspace.data()+ n*n, n, n); + Map B2(m_workspace.data()+2*n*n, n, n); + Index k1=0, k2=0; + for(Index j=0; j tmp(m_workspace.data(),n,n); + tmp.noalias() = A*B; + A = tmp; + } +} + +// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the +// place of the submatrix we are currently working on. + +//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU; +//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; +// lastCol + 1 - firstCol is the size of the submatrix. +//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W) +//@param firstRowW : Same as firstRowW with the column. +//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix +// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper. +template +void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift) +{ + // requires rows = cols + 1; + using std::pow; + using std::sqrt; + using std::abs; + const Index n = lastCol - firstCol + 1; + const Index k = n/2; + const RealScalar considerZero = (std::numeric_limits::min)(); + RealScalar alphaK; + RealScalar betaK; + RealScalar r0; + RealScalar lambda, phi, c0, s0; + VectorType l, f; + // We use the other algorithm which is more efficient for small + // matrices. + if (n < m_algoswap) + { + // FIXME this line involves temporaries + JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0)); + if (m_compU) + m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU(); + else + { + m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0); + m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n); + } + if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV(); + m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); + m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n); + return; + } + // We use the divide and conquer algorithm + alphaK = m_computed(firstCol + k, firstCol + k); + betaK = m_computed(firstCol + k + 1, firstCol + k); + // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices + // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the + // right submatrix before the left one. + divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift); + divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1); + + if (m_compU) + { + lambda = m_naiveU(firstCol + k, firstCol + k); + phi = m_naiveU(firstCol + k + 1, lastCol + 1); + } + else + { + lambda = m_naiveU(1, firstCol + k); + phi = m_naiveU(0, lastCol + 1); + } + r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi)); + if (m_compU) + { + l = m_naiveU.row(firstCol + k).segment(firstCol, k); + f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1); + } + else + { + l = m_naiveU.row(1).segment(firstCol, k); + f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1); + } + if (m_compV) m_naiveV(firstRowW+k, firstColW) = Literal(1); + if (r0= firstCol; i--) + m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1); + // we shift q1 at the left with a factor c0 + m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0); + // last column = q1 * - s0 + m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0)); + // first column = q2 * s0 + m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0; + // q2 *= c0 + m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; + } + else + { + RealScalar q1 = m_naiveU(0, firstCol + k); + // we shift Q1 to the right + for (Index i = firstCol + k - 1; i >= firstCol; i--) + m_naiveU(0, i + 1) = m_naiveU(0, i); + // we shift q1 at the left with a factor c0 + m_naiveU(0, firstCol) = (q1 * c0); + // last column = q1 * - s0 + m_naiveU(0, lastCol + 1) = (q1 * ( - s0)); + // first column = q2 * s0 + m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; + // q2 *= c0 + m_naiveU(1, lastCol + 1) *= c0; + m_naiveU.row(1).segment(firstCol + 1, k).setZero(); + m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero(); + } + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(m_naiveU.allFinite()); + assert(m_naiveV.allFinite()); + assert(m_computed.allFinite()); +#endif + + m_computed(firstCol + shift, firstCol + shift) = r0; + m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real(); + m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real(); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + ArrayXr tmp1 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues(); +#endif + // Second part: try to deflate singular values in combined matrix + deflation(firstCol, lastCol, k, firstRowW, firstColW, shift); +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + ArrayXr tmp2 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues(); + std::cout << "\n\nj1 = " << tmp1.transpose().format(bdcsvdfmt) << "\n"; + std::cout << "j2 = " << tmp2.transpose().format(bdcsvdfmt) << "\n\n"; + std::cout << "err: " << ((tmp1-tmp2).abs()>1e-12*tmp2.abs()).transpose() << "\n"; + static int count = 0; + std::cout << "# " << ++count << "\n\n"; + assert((tmp1-tmp2).matrix().norm() < 1e-14*tmp2.matrix().norm()); +// assert(count<681); +// assert(((tmp1-tmp2).abs()<1e-13*tmp2.abs()).all()); +#endif + + // Third part: compute SVD of combined matrix + MatrixXr UofSVD, VofSVD; + VectorType singVals; + computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD); + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(UofSVD.allFinite()); + assert(VofSVD.allFinite()); +#endif + + if (m_compU) + structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2); + else + { + Map,Aligned> tmp(m_workspace.data(),2,n+1); + tmp.noalias() = m_naiveU.middleCols(firstCol, n+1) * UofSVD; + m_naiveU.middleCols(firstCol, n + 1) = tmp; + } + + if (m_compV) structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2); + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(m_naiveU.allFinite()); + assert(m_naiveV.allFinite()); + assert(m_computed.allFinite()); +#endif + + m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero(); + m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals; +}// end divide + +// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in +// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing +// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except +// that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order. +// +// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better +// handling of round-off errors, be consistent in ordering +// For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf +template +void BDCSVD::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V) +{ + const RealScalar considerZero = (std::numeric_limits::min)(); + using std::abs; + ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n); + m_workspace.head(n) = m_computed.block(firstCol, firstCol, n, n).diagonal(); + ArrayRef diag = m_workspace.head(n); + diag(0) = Literal(0); + + // Allocate space for singular values and vectors + singVals.resize(n); + U.resize(n+1, n+1); + if (m_compV) V.resize(n, n); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + if (col0.hasNaN() || diag.hasNaN()) + std::cout << "\n\nHAS NAN\n\n"; +#endif + + // Many singular values might have been deflated, the zero ones have been moved to the end, + // but others are interleaved and we must ignore them at this stage. + // To this end, let's compute a permutation skipping them: + Index actual_n = n; + while(actual_n>1 && diag(actual_n-1)==Literal(0)) --actual_n; + Index m = 0; // size of the deflated problem + for(Index k=0;kconsiderZero) + m_workspaceI(m++) = k; + Map perm(m_workspaceI.data(),m); + + Map shifts(m_workspace.data()+1*n, n); + Map mus(m_workspace.data()+2*n, n); + Map zhat(m_workspace.data()+3*n, n); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "computeSVDofM using:\n"; + std::cout << " z: " << col0.transpose() << "\n"; + std::cout << " d: " << diag.transpose() << "\n"; +#endif + + // Compute singVals, shifts, and mus + computeSingVals(col0, diag, perm, singVals, shifts, mus); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << " j: " << (m_computed.block(firstCol, firstCol, n, n)).jacobiSvd().singularValues().transpose().reverse() << "\n\n"; + std::cout << " sing-val: " << singVals.transpose() << "\n"; + std::cout << " mu: " << mus.transpose() << "\n"; + std::cout << " shift: " << shifts.transpose() << "\n"; + + { + Index actual_n = n; + while(actual_n>1 && abs(col0(actual_n-1))0) : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n"; + std::cout << " check3 (>0) : " << ((diag.segment(1,actual_n-1)-singVals.head(actual_n-1).array()) / singVals.head(actual_n-1).array()).transpose() << "\n\n\n"; + std::cout << " check4 (>0) : " << ((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).transpose() << "\n\n\n"; + } +#endif + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(singVals.allFinite()); + assert(mus.allFinite()); + assert(shifts.allFinite()); +#endif + + // Compute zhat + perturbCol0(col0, diag, perm, singVals, shifts, mus, zhat); +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << " zhat: " << zhat.transpose() << "\n"; +#endif + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(zhat.allFinite()); +#endif + + computeSingVecs(zhat, diag, perm, singVals, shifts, mus, U, V); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "U^T U: " << (U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() << "\n"; + std::cout << "V^T V: " << (V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() << "\n"; +#endif + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(U.allFinite()); + assert(V.allFinite()); + assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 1e-14 * n); + assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 1e-14 * n); + assert(m_naiveU.allFinite()); + assert(m_naiveV.allFinite()); + assert(m_computed.allFinite()); +#endif + + // Because of deflation, the singular values might not be completely sorted. + // Fortunately, reordering them is a O(n) problem + for(Index i=0; isingVals(i+1)) + { + using std::swap; + swap(singVals(i),singVals(i+1)); + U.col(i).swap(U.col(i+1)); + if(m_compV) V.col(i).swap(V.col(i+1)); + } + } + + // Reverse order so that singular values in increased order + // Because of deflation, the zeros singular-values are already at the end + singVals.head(actual_n).reverseInPlace(); + U.leftCols(actual_n).rowwise().reverseInPlace(); + if (m_compV) V.leftCols(actual_n).rowwise().reverseInPlace(); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + JacobiSVD jsvd(m_computed.block(firstCol, firstCol, n, n) ); + std::cout << " * j: " << jsvd.singularValues().transpose() << "\n\n"; + std::cout << " * sing-val: " << singVals.transpose() << "\n"; +// std::cout << " * err: " << ((jsvd.singularValues()-singVals)>1e-13*singVals.norm()).transpose() << "\n"; +#endif +} + +template +typename BDCSVD::RealScalar BDCSVD::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift) +{ + Index m = perm.size(); + RealScalar res = Literal(1); + for(Index i=0; i +void BDCSVD::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, + VectorType& singVals, ArrayRef shifts, ArrayRef mus) +{ + using std::abs; + using std::swap; + using std::sqrt; + + Index n = col0.size(); + Index actual_n = n; + // Note that here actual_n is computed based on col0(i)==0 instead of diag(i)==0 as above + // because 1) we have diag(i)==0 => col0(i)==0 and 2) if col0(i)==0, then diag(i) is already a singular value. + while(actual_n>1 && col0(actual_n-1)==Literal(0)) --actual_n; + + for (Index k = 0; k < n; ++k) + { + if (col0(k) == Literal(0) || actual_n==1) + { + // if col0(k) == 0, then entry is deflated, so singular value is on diagonal + // if actual_n==1, then the deflated problem is already diagonalized + singVals(k) = k==0 ? col0(0) : diag(k); + mus(k) = Literal(0); + shifts(k) = k==0 ? col0(0) : diag(k); + continue; + } + + // otherwise, use secular equation to find singular value + RealScalar left = diag(k); + RealScalar right; // was: = (k != actual_n-1) ? diag(k+1) : (diag(actual_n-1) + col0.matrix().norm()); + if(k==actual_n-1) + right = (diag(actual_n-1) + col0.matrix().norm()); + else + { + // Skip deflated singular values, + // recall that at this stage we assume that z[j]!=0 and all entries for which z[j]==0 have been put aside. + // This should be equivalent to using perm[] + Index l = k+1; + while(col0(l)==Literal(0)) { ++l; eigen_internal_assert(l Literal(0)) ? left : right; + + // measure everything relative to shift + Map diagShifted(m_workspace.data()+4*n, n); + diagShifted = diag - shift; + + // initial guess + RealScalar muPrev, muCur; + if (shift == left) + { + muPrev = (right - left) * RealScalar(0.1); + if (k == actual_n-1) muCur = right - left; + else muCur = (right - left) * RealScalar(0.5); + } + else + { + muPrev = -(right - left) * RealScalar(0.1); + muCur = -(right - left) * RealScalar(0.5); + } + + RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift); + RealScalar fCur = secularEq(muCur, col0, diag, perm, diagShifted, shift); + if (abs(fPrev) < abs(fCur)) + { + swap(fPrev, fCur); + swap(muPrev, muCur); + } + + // rational interpolation: fit a function of the form a / mu + b through the two previous + // iterates and use its zero to compute the next iterate + bool useBisection = fPrev*fCur>Literal(0); + while (fCur!=Literal(0) && abs(muCur - muPrev) > Literal(8) * NumTraits::epsilon() * numext::maxi(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits::epsilon() && !useBisection) + { + ++m_numIters; + + // Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples. + RealScalar a = (fCur - fPrev) / (Literal(1)/muCur - Literal(1)/muPrev); + RealScalar b = fCur - a / muCur; + // And find mu such that f(mu)==0: + RealScalar muZero = -a/b; + RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift); + + muPrev = muCur; + fPrev = fCur; + muCur = muZero; + fCur = fZero; + + + if (shift == left && (muCur < Literal(0) || muCur > right - left)) useBisection = true; + if (shift == right && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true; + if (abs(fCur)>abs(fPrev)) useBisection = true; + } + + // fall back on bisection method if rational interpolation did not work + if (useBisection) + { +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "useBisection for k = " << k << ", actual_n = " << actual_n << "\n"; +#endif + RealScalar leftShifted, rightShifted; + if (shift == left) + { + // to avoid overflow, we must have mu > max(real_min, |z(k)|/sqrt(real_max)), + // the factor 2 is to be more conservative + leftShifted = numext::maxi( (std::numeric_limits::min)(), Literal(2) * abs(col0(k)) / sqrt((std::numeric_limits::max)()) ); + + // check that we did it right: + eigen_internal_assert( (numext::isfinite)( (col0(k)/leftShifted)*(col0(k)/(diag(k)+shift+leftShifted)) ) ); + // I don't understand why the case k==0 would be special there: + // if (k == 0) rightShifted = right - left; else + rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.51)); // theoretically we can take 0.5, but let's be safe + } + else + { + leftShifted = -(right - left) * RealScalar(0.51); + if(k+1( (std::numeric_limits::min)(), abs(col0(k+1)) / sqrt((std::numeric_limits::max)()) ); + else + rightShifted = -(std::numeric_limits::min)(); + } + + RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift); + +#if defined EIGEN_INTERNAL_DEBUGGING || defined EIGEN_BDCSVD_DEBUG_VERBOSE + RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift); +#endif + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + if(!(fLeft * fRight<0)) + { + std::cout << "fLeft: " << leftShifted << " - " << diagShifted.head(10).transpose() << "\n ; " << bool(left==shift) << " " << (left-shift) << "\n"; + std::cout << k << " : " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; " << left << " - " << right << " -> " << leftShifted << " " << rightShifted << " shift=" << shift << "\n"; + } +#endif + eigen_internal_assert(fLeft * fRight < Literal(0)); + + while (rightShifted - leftShifted > Literal(2) * NumTraits::epsilon() * numext::maxi(abs(leftShifted), abs(rightShifted))) + { + RealScalar midShifted = (leftShifted + rightShifted) / Literal(2); + fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift); + if (fLeft * fMid < Literal(0)) + { + rightShifted = midShifted; + } + else + { + leftShifted = midShifted; + fLeft = fMid; + } + } + + muCur = (leftShifted + rightShifted) / Literal(2); + } + + singVals[k] = shift + muCur; + shifts[k] = shift; + mus[k] = muCur; + + // perturb singular value slightly if it equals diagonal entry to avoid division by zero later + // (deflation is supposed to avoid this from happening) + // - this does no seem to be necessary anymore - +// if (singVals[k] == left) singVals[k] *= 1 + NumTraits::epsilon(); +// if (singVals[k] == right) singVals[k] *= 1 - NumTraits::epsilon(); + } +} + + +// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1) +template +void BDCSVD::perturbCol0 + (const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals, + const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat) +{ + using std::sqrt; + Index n = col0.size(); + Index m = perm.size(); + if(m==0) + { + zhat.setZero(); + return; + } + Index last = perm(m-1); + // The offset permits to skip deflated entries while computing zhat + for (Index k = 0; k < n; ++k) + { + if (col0(k) == Literal(0)) // deflated + zhat(k) = Literal(0); + else + { + // see equation (3.6) + RealScalar dk = diag(k); + RealScalar prod = (singVals(last) + dk) * (mus(last) + (shifts(last) - dk)); + + for(Index l = 0; l 0.9 ) + std::cout << " " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk)) + << ") / (" << (diag(i)+dk) << " * " << (diag(i)-dk) << ")\n"; +#endif + } + } +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(last) + dk) << " * " << mus(last) + shifts(last) << " - " << dk << "\n"; +#endif + RealScalar tmp = sqrt(prod); + zhat(k) = col0(k) > Literal(0) ? tmp : -tmp; + } + } +} + +// compute singular vectors +template +void BDCSVD::computeSingVecs + (const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals, + const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V) +{ + Index n = zhat.size(); + Index m = perm.size(); + + for (Index k = 0; k < n; ++k) + { + if (zhat(k) == Literal(0)) + { + U.col(k) = VectorType::Unit(n+1, k); + if (m_compV) V.col(k) = VectorType::Unit(n, k); + } + else + { + U.col(k).setZero(); + for(Index l=0;l= 1, di almost null and zi non null. +// We use a rotation to zero out zi applied to the left of M +template +void BDCSVD::deflation43(Index firstCol, Index shift, Index i, Index size) +{ + using std::abs; + using std::sqrt; + using std::pow; + Index start = firstCol + shift; + RealScalar c = m_computed(start, start); + RealScalar s = m_computed(start+i, start); + RealScalar r = numext::hypot(c,s); + if (r == Literal(0)) + { + m_computed(start+i, start+i) = Literal(0); + return; + } + m_computed(start,start) = r; + m_computed(start+i, start) = Literal(0); + m_computed(start+i, start+i) = Literal(0); + + JacobiRotation J(c/r,-s/r); + if (m_compU) m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J); + else m_naiveU.applyOnTheRight(firstCol, firstCol+i, J); +}// end deflation 43 + + +// page 13 +// i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M) +// We apply two rotations to have zj = 0; +// TODO deflation44 is still broken and not properly tested +template +void BDCSVD::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size) +{ + using std::abs; + using std::sqrt; + using std::conj; + using std::pow; + RealScalar c = m_computed(firstColm+i, firstColm); + RealScalar s = m_computed(firstColm+j, firstColm); + RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s)); +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "deflation 4.4: " << i << "," << j << " -> " << c << " " << s << " " << r << " ; " + << m_computed(firstColm + i-1, firstColm) << " " + << m_computed(firstColm + i, firstColm) << " " + << m_computed(firstColm + i+1, firstColm) << " " + << m_computed(firstColm + i+2, firstColm) << "\n"; + std::cout << m_computed(firstColm + i-1, firstColm + i-1) << " " + << m_computed(firstColm + i, firstColm+i) << " " + << m_computed(firstColm + i+1, firstColm+i+1) << " " + << m_computed(firstColm + i+2, firstColm+i+2) << "\n"; +#endif + if (r==Literal(0)) + { + m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); + return; + } + c/=r; + s/=r; + m_computed(firstColm + i, firstColm) = r; + m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i); + m_computed(firstColm + j, firstColm) = Literal(0); + + JacobiRotation J(c,-s); + if (m_compU) m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J); + else m_naiveU.applyOnTheRight(firstColu+i, firstColu+j, J); + if (m_compV) m_naiveV.middleRows(firstRowW, size).applyOnTheRight(firstColW + i, firstColW + j, J); +}// end deflation 44 + + +// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive] +template +void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift) +{ + using std::sqrt; + using std::abs; + const Index length = lastCol + 1 - firstCol; + + Block col0(m_computed, firstCol+shift, firstCol+shift, length, 1); + Diagonal fulldiag(m_computed); + VectorBlock,Dynamic> diag(fulldiag, firstCol+shift, length); + + const RealScalar considerZero = (std::numeric_limits::min)(); + RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff(); + RealScalar epsilon_strict = numext::maxi(considerZero,NumTraits::epsilon() * maxDiag); + RealScalar epsilon_coarse = Literal(8) * NumTraits::epsilon() * numext::maxi(col0.cwiseAbs().maxCoeff(), maxDiag); + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(m_naiveU.allFinite()); + assert(m_naiveV.allFinite()); + assert(m_computed.allFinite()); +#endif + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "\ndeflate:" << diag.head(k+1).transpose() << " | " << diag.segment(k+1,length-k-1).transpose() << "\n"; +#endif + + //condition 4.1 + if (diag(0) < epsilon_coarse) + { +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "deflation 4.1, because " << diag(0) << " < " << epsilon_coarse << "\n"; +#endif + diag(0) = epsilon_coarse; + } + + //condition 4.2 + for (Index i=1;i k) permutation[p] = j++; + else if (j >= length) permutation[p] = i++; + else if (diag(i) < diag(j)) permutation[p] = j++; + else permutation[p] = i++; + } + } + + // If we have a total deflation, then we have to insert diag(0) at the right place + if(total_deflation) + { + for(Index i=1; i0 && (abs(diag(i))1;--i) + if( (diag(i) - diag(i-1)) < NumTraits::epsilon()*maxDiag ) + { +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "deflation 4.4 with i = " << i << " because " << (diag(i) - diag(i-1)) << " < " << NumTraits::epsilon()*diag(i) << "\n"; +#endif + eigen_internal_assert(abs(diag(i) - diag(i-1)) +BDCSVD::PlainObject> +MatrixBase::bdcSvd(unsigned int computationOptions) const +{ + return BDCSVD(*this, computationOptions); +} +#endif + +} // end namespace Eigen + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/JacobiSVD.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/JacobiSVD.h new file mode 100644 index 000000000..43488b1e0 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/JacobiSVD.h @@ -0,0 +1,804 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Benoit Jacob +// Copyright (C) 2013-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_JACOBISVD_H +#define EIGEN_JACOBISVD_H + +namespace Eigen { + +namespace internal { +// forward declaration (needed by ICC) +// the empty body is required by MSVC +template::IsComplex> +struct svd_precondition_2x2_block_to_be_real {}; + +/*** QR preconditioners (R-SVD) + *** + *** Their role is to reduce the problem of computing the SVD to the case of a square matrix. + *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for + *** JacobiSVD which by itself is only able to work on square matrices. + ***/ + +enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols }; + +template +struct qr_preconditioner_should_do_anything +{ + enum { a = MatrixType::RowsAtCompileTime != Dynamic && + MatrixType::ColsAtCompileTime != Dynamic && + MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime, + b = MatrixType::RowsAtCompileTime != Dynamic && + MatrixType::ColsAtCompileTime != Dynamic && + MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime, + ret = !( (QRPreconditioner == NoQRPreconditioner) || + (Case == PreconditionIfMoreColsThanRows && bool(a)) || + (Case == PreconditionIfMoreRowsThanCols && bool(b)) ) + }; +}; + +template::ret +> struct qr_preconditioner_impl {}; + +template +class qr_preconditioner_impl +{ +public: + void allocate(const JacobiSVD&) {} + bool run(JacobiSVD&, const MatrixType&) + { + return false; + } +}; + +/*** preconditioner using FullPivHouseholderQR ***/ + +template +class qr_preconditioner_impl +{ +public: + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime + }; + typedef Matrix WorkspaceType; + + void allocate(const JacobiSVD& svd) + { + if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.rows(), svd.cols()); + } + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.rows() > matrix.cols()) + { + m_qr.compute(matrix); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); + if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace); + if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); + return true; + } + return false; + } +private: + typedef FullPivHouseholderQR QRType; + QRType m_qr; + WorkspaceType m_workspace; +}; + +template +class qr_preconditioner_impl +{ +public: + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor)) + : ColsAtCompileTime==1 ? (MatrixType::Options | RowMajor) + : MatrixType::Options + }; + typedef Matrix + TransposeTypeWithSameStorageOrder; + + void allocate(const JacobiSVD& svd) + { + if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.cols(), svd.rows()); + } + m_adjoint.resize(svd.cols(), svd.rows()); + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.cols() > matrix.rows()) + { + m_adjoint = matrix.adjoint(); + m_qr.compute(m_adjoint); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); + if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace); + if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); + return true; + } + else return false; + } +private: + typedef FullPivHouseholderQR QRType; + QRType m_qr; + TransposeTypeWithSameStorageOrder m_adjoint; + typename internal::plain_row_type::type m_workspace; +}; + +/*** preconditioner using ColPivHouseholderQR ***/ + +template +class qr_preconditioner_impl +{ +public: + void allocate(const JacobiSVD& svd) + { + if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.rows(), svd.cols()); + } + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.rows() > matrix.cols()) + { + m_qr.compute(matrix); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); + if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); + else if(svd.m_computeThinU) + { + svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); + } + if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); + return true; + } + return false; + } + +private: + typedef ColPivHouseholderQR QRType; + QRType m_qr; + typename internal::plain_col_type::type m_workspace; +}; + +template +class qr_preconditioner_impl +{ +public: + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor)) + : ColsAtCompileTime==1 ? (MatrixType::Options | RowMajor) + : MatrixType::Options + }; + + typedef Matrix + TransposeTypeWithSameStorageOrder; + + void allocate(const JacobiSVD& svd) + { + if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.cols(), svd.rows()); + } + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); + m_adjoint.resize(svd.cols(), svd.rows()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.cols() > matrix.rows()) + { + m_adjoint = matrix.adjoint(); + m_qr.compute(m_adjoint); + + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); + if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); + else if(svd.m_computeThinV) + { + svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); + } + if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); + return true; + } + else return false; + } + +private: + typedef ColPivHouseholderQR QRType; + QRType m_qr; + TransposeTypeWithSameStorageOrder m_adjoint; + typename internal::plain_row_type::type m_workspace; +}; + +/*** preconditioner using HouseholderQR ***/ + +template +class qr_preconditioner_impl +{ +public: + void allocate(const JacobiSVD& svd) + { + if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.rows(), svd.cols()); + } + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.rows() > matrix.cols()) + { + m_qr.compute(matrix); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); + if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); + else if(svd.m_computeThinU) + { + svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); + } + if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols()); + return true; + } + return false; + } +private: + typedef HouseholderQR QRType; + QRType m_qr; + typename internal::plain_col_type::type m_workspace; +}; + +template +class qr_preconditioner_impl +{ +public: + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + Options = MatrixType::Options + }; + + typedef Matrix + TransposeTypeWithSameStorageOrder; + + void allocate(const JacobiSVD& svd) + { + if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.cols(), svd.rows()); + } + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); + m_adjoint.resize(svd.cols(), svd.rows()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.cols() > matrix.rows()) + { + m_adjoint = matrix.adjoint(); + m_qr.compute(m_adjoint); + + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); + if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); + else if(svd.m_computeThinV) + { + svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); + } + if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows()); + return true; + } + else return false; + } + +private: + typedef HouseholderQR QRType; + QRType m_qr; + TransposeTypeWithSameStorageOrder m_adjoint; + typename internal::plain_row_type::type m_workspace; +}; + +/*** 2x2 SVD implementation + *** + *** JacobiSVD consists in performing a series of 2x2 SVD subproblems + ***/ + +template +struct svd_precondition_2x2_block_to_be_real +{ + typedef JacobiSVD SVD; + typedef typename MatrixType::RealScalar RealScalar; + static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; } +}; + +template +struct svd_precondition_2x2_block_to_be_real +{ + typedef JacobiSVD SVD; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry) + { + using std::sqrt; + using std::abs; + Scalar z; + JacobiRotation rot; + RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p))); + + const RealScalar considerAsZero = (std::numeric_limits::min)(); + const RealScalar precision = NumTraits::epsilon(); + + if(n==0) + { + // make sure first column is zero + work_matrix.coeffRef(p,p) = work_matrix.coeffRef(q,p) = Scalar(0); + + if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero) + { + // work_matrix.coeff(p,q) can be zero if work_matrix.coeff(q,p) is not zero but small enough to underflow when computing n + z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); + work_matrix.row(p) *= z; + if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); + } + if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero) + { + z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); + work_matrix.row(q) *= z; + if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); + } + // otherwise the second row is already zero, so we have nothing to do. + } + else + { + rot.c() = conj(work_matrix.coeff(p,p)) / n; + rot.s() = work_matrix.coeff(q,p) / n; + work_matrix.applyOnTheLeft(p,q,rot); + if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint()); + if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero) + { + z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); + work_matrix.col(q) *= z; + if(svd.computeV()) svd.m_matrixV.col(q) *= z; + } + if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero) + { + z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); + work_matrix.row(q) *= z; + if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); + } + } + + // update largest diagonal entry + maxDiagEntry = numext::maxi(maxDiagEntry,numext::maxi(abs(work_matrix.coeff(p,p)), abs(work_matrix.coeff(q,q)))); + // and check whether the 2x2 block is already diagonal + RealScalar threshold = numext::maxi(considerAsZero, precision * maxDiagEntry); + return abs(work_matrix.coeff(p,q))>threshold || abs(work_matrix.coeff(q,p)) > threshold; + } +}; + +template +struct traits > +{ + typedef _MatrixType MatrixType; +}; + +} // end namespace internal + +/** \ingroup SVD_Module + * + * + * \class JacobiSVD + * + * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix + * + * \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition + * \tparam QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally + * for the R-SVD step for non-square matrices. See discussion of possible values below. + * + * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product + * \f[ A = U S V^* \f] + * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; + * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left + * and right \em singular \em vectors of \a A respectively. + * + * Singular values are always sorted in decreasing order. + * + * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly. + * + * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the + * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual + * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, + * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. + * + * Here's an example demonstrating basic usage: + * \include JacobiSVD_basic.cpp + * Output: \verbinclude JacobiSVD_basic.out + * + * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than + * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and + * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms. + * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension. + * + * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to + * terminate in finite (and reasonable) time. + * + * The possible values for QRPreconditioner are: + * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR. + * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR. + * Contrary to other QRs, it doesn't allow computing thin unitaries. + * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR. + * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization + * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive + * process is more reliable than the optimized bidiagonal SVD iterations. + * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing + * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in + * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking + * if QR preconditioning is needed before applying it anyway. + * + * \sa MatrixBase::jacobiSvd() + */ +template class JacobiSVD + : public SVDBase > +{ + typedef SVDBase Base; + public: + + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), + MatrixOptions = MatrixType::Options + }; + + typedef typename Base::MatrixUType MatrixUType; + typedef typename Base::MatrixVType MatrixVType; + typedef typename Base::SingularValuesType SingularValuesType; + + typedef typename internal::plain_row_type::type RowType; + typedef typename internal::plain_col_type::type ColType; + typedef Matrix + WorkMatrixType; + + /** \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via JacobiSVD::compute(const MatrixType&). + */ + JacobiSVD() + {} + + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem size. + * \sa JacobiSVD() + */ + JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) + { + allocate(rows, cols, computationOptions); + } + + /** \brief Constructor performing the decomposition of given matrix. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non-default) FullPivHouseholderQR preconditioner. + */ + explicit JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) + { + compute(matrix, computationOptions); + } + + /** \brief Method performing the decomposition of given matrix using custom options. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non-default) FullPivHouseholderQR preconditioner. + */ + JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions); + + /** \brief Method performing the decomposition of given matrix using current options. + * + * \param matrix the matrix to decompose + * + * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). + */ + JacobiSVD& compute(const MatrixType& matrix) + { + return compute(matrix, m_computationOptions); + } + + using Base::computeU; + using Base::computeV; + using Base::rows; + using Base::cols; + using Base::rank; + + private: + void allocate(Index rows, Index cols, unsigned int computationOptions); + + protected: + using Base::m_matrixU; + using Base::m_matrixV; + using Base::m_singularValues; + using Base::m_isInitialized; + using Base::m_isAllocated; + using Base::m_usePrescribedThreshold; + using Base::m_computeFullU; + using Base::m_computeThinU; + using Base::m_computeFullV; + using Base::m_computeThinV; + using Base::m_computationOptions; + using Base::m_nonzeroSingularValues; + using Base::m_rows; + using Base::m_cols; + using Base::m_diagSize; + using Base::m_prescribedThreshold; + WorkMatrixType m_workMatrix; + + template + friend struct internal::svd_precondition_2x2_block_to_be_real; + template + friend struct internal::qr_preconditioner_impl; + + internal::qr_preconditioner_impl m_qr_precond_morecols; + internal::qr_preconditioner_impl m_qr_precond_morerows; + MatrixType m_scaledMatrix; +}; + +template +void JacobiSVD::allocate(Index rows, Index cols, unsigned int computationOptions) +{ + eigen_assert(rows >= 0 && cols >= 0); + + if (m_isAllocated && + rows == m_rows && + cols == m_cols && + computationOptions == m_computationOptions) + { + return; + } + + m_rows = rows; + m_cols = cols; + m_isInitialized = false; + m_isAllocated = true; + m_computationOptions = computationOptions; + m_computeFullU = (computationOptions & ComputeFullU) != 0; + m_computeThinU = (computationOptions & ComputeThinU) != 0; + m_computeFullV = (computationOptions & ComputeFullV) != 0; + m_computeThinV = (computationOptions & ComputeThinV) != 0; + eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U"); + eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V"); + eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && + "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns."); + if (QRPreconditioner == FullPivHouseholderQRPreconditioner) + { + eigen_assert(!(m_computeThinU || m_computeThinV) && + "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. " + "Use the ColPivHouseholderQR preconditioner instead."); + } + m_diagSize = (std::min)(m_rows, m_cols); + m_singularValues.resize(m_diagSize); + if(RowsAtCompileTime==Dynamic) + m_matrixU.resize(m_rows, m_computeFullU ? m_rows + : m_computeThinU ? m_diagSize + : 0); + if(ColsAtCompileTime==Dynamic) + m_matrixV.resize(m_cols, m_computeFullV ? m_cols + : m_computeThinV ? m_diagSize + : 0); + m_workMatrix.resize(m_diagSize, m_diagSize); + + if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this); + if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this); + if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols); +} + +template +JacobiSVD& +JacobiSVD::compute(const MatrixType& matrix, unsigned int computationOptions) +{ + using std::abs; + allocate(matrix.rows(), matrix.cols(), computationOptions); + + // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations, + // only worsening the precision of U and V as we accumulate more rotations + const RealScalar precision = RealScalar(2) * NumTraits::epsilon(); + + // limit for denormal numbers to be considered zero in order to avoid infinite loops (see bug 286) + const RealScalar considerAsZero = (std::numeric_limits::min)(); + + // Scaling factor to reduce over/under-flows + RealScalar scale = matrix.cwiseAbs().maxCoeff(); + if(scale==RealScalar(0)) scale = RealScalar(1); + + /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ + + if(m_rows!=m_cols) + { + m_scaledMatrix = matrix / scale; + m_qr_precond_morecols.run(*this, m_scaledMatrix); + m_qr_precond_morerows.run(*this, m_scaledMatrix); + } + else + { + m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale; + if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows); + if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize); + if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols); + if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize); + } + + /*** step 2. The main Jacobi SVD iteration. ***/ + RealScalar maxDiagEntry = m_workMatrix.cwiseAbs().diagonal().maxCoeff(); + + bool finished = false; + while(!finished) + { + finished = true; + + // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix + + for(Index p = 1; p < m_diagSize; ++p) + { + for(Index q = 0; q < p; ++q) + { + // if this 2x2 sub-matrix is not diagonal already... + // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't + // keep us iterating forever. Similarly, small denormal numbers are considered zero. + RealScalar threshold = numext::maxi(considerAsZero, precision * maxDiagEntry); + if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold) + { + finished = false; + // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal + // the complex to real operation returns true if the updated 2x2 block is not already diagonal + if(internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q, maxDiagEntry)) + { + JacobiRotation j_left, j_right; + internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); + + // accumulate resulting Jacobi rotations + m_workMatrix.applyOnTheLeft(p,q,j_left); + if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose()); + + m_workMatrix.applyOnTheRight(p,q,j_right); + if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right); + + // keep track of the largest diagonal coefficient + maxDiagEntry = numext::maxi(maxDiagEntry,numext::maxi(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q)))); + } + } + } + } + } + + /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/ + + for(Index i = 0; i < m_diagSize; ++i) + { + // For a complex matrix, some diagonal coefficients might note have been + // treated by svd_precondition_2x2_block_to_be_real, and the imaginary part + // of some diagonal entry might not be null. + if(NumTraits::IsComplex && abs(numext::imag(m_workMatrix.coeff(i,i)))>considerAsZero) + { + RealScalar a = abs(m_workMatrix.coeff(i,i)); + m_singularValues.coeffRef(i) = abs(a); + if(computeU()) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a; + } + else + { + // m_workMatrix.coeff(i,i) is already real, no difficulty: + RealScalar a = numext::real(m_workMatrix.coeff(i,i)); + m_singularValues.coeffRef(i) = abs(a); + if(computeU() && (a +JacobiSVD::PlainObject> +MatrixBase::jacobiSvd(unsigned int computationOptions) const +{ + return JacobiSVD(*this, computationOptions); +} + +} // end namespace Eigen + +#endif // EIGEN_JACOBISVD_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/JacobiSVD_LAPACKE.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/JacobiSVD_LAPACKE.h new file mode 100644 index 000000000..ff0516f61 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/JacobiSVD_LAPACKE.h @@ -0,0 +1,91 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to LAPACKe + * Singular Value Decomposition - SVD. + ******************************************************************************** +*/ + +#ifndef EIGEN_JACOBISVD_LAPACKE_H +#define EIGEN_JACOBISVD_LAPACKE_H + +namespace Eigen { + +/** \internal Specialization for the data types supported by LAPACKe */ + +#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \ +template<> inline \ +JacobiSVD, ColPivHouseholderQRPreconditioner>& \ +JacobiSVD, ColPivHouseholderQRPreconditioner>::compute(const Matrix& matrix, unsigned int computationOptions) \ +{ \ + typedef Matrix MatrixType; \ + /*typedef MatrixType::Scalar Scalar;*/ \ + /*typedef MatrixType::RealScalar RealScalar;*/ \ + allocate(matrix.rows(), matrix.cols(), computationOptions); \ +\ + /*const RealScalar precision = RealScalar(2) * NumTraits::epsilon();*/ \ + m_nonzeroSingularValues = m_diagSize; \ +\ + lapack_int lda = internal::convert_index(matrix.outerStride()), ldu, ldvt; \ + lapack_int matrix_order = LAPACKE_COLROW; \ + char jobu, jobvt; \ + LAPACKE_TYPE *u, *vt, dummy; \ + jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \ + jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \ + if (computeU()) { \ + ldu = internal::convert_index(m_matrixU.outerStride()); \ + u = (LAPACKE_TYPE*)m_matrixU.data(); \ + } else { ldu=1; u=&dummy; }\ + MatrixType localV; \ + lapack_int vt_rows = (m_computeFullV) ? internal::convert_index(m_cols) : (m_computeThinV) ? internal::convert_index(m_diagSize) : 1; \ + if (computeV()) { \ + localV.resize(vt_rows, m_cols); \ + ldvt = internal::convert_index(localV.outerStride()); \ + vt = (LAPACKE_TYPE*)localV.data(); \ + } else { ldvt=1; vt=&dummy; }\ + Matrix superb; superb.resize(m_diagSize, 1); \ + MatrixType m_temp; m_temp = matrix; \ + LAPACKE_##LAPACKE_PREFIX##gesvd( matrix_order, jobu, jobvt, internal::convert_index(m_rows), internal::convert_index(m_cols), (LAPACKE_TYPE*)m_temp.data(), lda, (LAPACKE_RTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \ + if (computeV()) m_matrixV = localV.adjoint(); \ + /* for(int i=0;i +// Copyright (C) 2014 Gael Guennebaud +// +// Copyright (C) 2013 Gauthier Brun +// Copyright (C) 2013 Nicolas Carre +// Copyright (C) 2013 Jean Ceccato +// Copyright (C) 2013 Pierre Zoppitelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SVDBASE_H +#define EIGEN_SVDBASE_H + +namespace Eigen { +/** \ingroup SVD_Module + * + * + * \class SVDBase + * + * \brief Base class of SVD algorithms + * + * \tparam Derived the type of the actual SVD decomposition + * + * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product + * \f[ A = U S V^* \f] + * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; + * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left + * and right \em singular \em vectors of \a A respectively. + * + * Singular values are always sorted in decreasing order. + * + * + * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the + * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual + * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, + * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. + * + * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to + * terminate in finite (and reasonable) time. + * \sa class BDCSVD, class JacobiSVD + */ +template +class SVDBase +{ + +public: + typedef typename internal::traits::MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), + MatrixOptions = MatrixType::Options + }; + + typedef Matrix MatrixUType; + typedef Matrix MatrixVType; + typedef typename internal::plain_diag_type::type SingularValuesType; + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + /** \returns the \a U matrix. + * + * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, + * the U matrix is n-by-n if you asked for \link Eigen::ComputeFullU ComputeFullU \endlink, and is n-by-m if you asked for \link Eigen::ComputeThinU ComputeThinU \endlink. + * + * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed. + * + * This method asserts that you asked for \a U to be computed. + */ + const MatrixUType& matrixU() const + { + eigen_assert(m_isInitialized && "SVD is not initialized."); + eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); + return m_matrixU; + } + + /** \returns the \a V matrix. + * + * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, + * the V matrix is p-by-p if you asked for \link Eigen::ComputeFullV ComputeFullV \endlink, and is p-by-m if you asked for \link Eigen::ComputeThinV ComputeThinV \endlink. + * + * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed. + * + * This method asserts that you asked for \a V to be computed. + */ + const MatrixVType& matrixV() const + { + eigen_assert(m_isInitialized && "SVD is not initialized."); + eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); + return m_matrixV; + } + + /** \returns the vector of singular values. + * + * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the + * returned vector has size \a m. Singular values are always sorted in decreasing order. + */ + const SingularValuesType& singularValues() const + { + eigen_assert(m_isInitialized && "SVD is not initialized."); + return m_singularValues; + } + + /** \returns the number of singular values that are not exactly 0 */ + Index nonzeroSingularValues() const + { + eigen_assert(m_isInitialized && "SVD is not initialized."); + return m_nonzeroSingularValues; + } + + /** \returns the rank of the matrix of which \c *this is the SVD. + * + * \note This method has to determine which singular values should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline Index rank() const + { + using std::abs; + eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); + if(m_singularValues.size()==0) return 0; + RealScalar premultiplied_threshold = numext::maxi(m_singularValues.coeff(0) * threshold(), (std::numeric_limits::min)()); + Index i = m_nonzeroSingularValues-1; + while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; + return i+1; + } + + /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(), + * which need to determine when singular values are to be considered nonzero. + * This is not used for the SVD decomposition itself. + * + * When it needs to get the threshold value, Eigen calls threshold(). + * The default is \c NumTraits::epsilon() + * + * \param threshold The new value to use as the threshold. + * + * A singular value will be considered nonzero if its value is strictly greater than + * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$. + * + * If you want to come back to the default behavior, call setThreshold(Default_t) + */ + Derived& setThreshold(const RealScalar& threshold) + { + m_usePrescribedThreshold = true; + m_prescribedThreshold = threshold; + return derived(); + } + + /** Allows to come back to the default behavior, letting Eigen use its default formula for + * determining the threshold. + * + * You should pass the special object Eigen::Default as parameter here. + * \code svd.setThreshold(Eigen::Default); \endcode + * + * See the documentation of setThreshold(const RealScalar&). + */ + Derived& setThreshold(Default_t) + { + m_usePrescribedThreshold = false; + return derived(); + } + + /** Returns the threshold that will be used by certain methods such as rank(). + * + * See the documentation of setThreshold(const RealScalar&). + */ + RealScalar threshold() const + { + eigen_assert(m_isInitialized || m_usePrescribedThreshold); + // this temporary is needed to workaround a MSVC issue + Index diagSize = (std::max)(1,m_diagSize); + return m_usePrescribedThreshold ? m_prescribedThreshold + : diagSize*NumTraits::epsilon(); + } + + /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */ + inline bool computeU() const { return m_computeFullU || m_computeThinU; } + /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */ + inline bool computeV() const { return m_computeFullV || m_computeThinV; } + + inline Index rows() const { return m_rows; } + inline Index cols() const { return m_cols; } + + /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. + * + * \param b the right-hand-side of the equation to solve. + * + * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. + * + * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving. + * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. + */ + template + inline const Solve + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "SVD is not initialized."); + eigen_assert(computeU() && computeV() && "SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); + return Solve(derived(), b.derived()); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC + void _solve_impl(const RhsType &rhs, DstType &dst) const; + #endif + +protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + // return true if already allocated + bool allocate(Index rows, Index cols, unsigned int computationOptions) ; + + MatrixUType m_matrixU; + MatrixVType m_matrixV; + SingularValuesType m_singularValues; + bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold; + bool m_computeFullU, m_computeThinU; + bool m_computeFullV, m_computeThinV; + unsigned int m_computationOptions; + Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; + RealScalar m_prescribedThreshold; + + /** \brief Default Constructor. + * + * Default constructor of SVDBase + */ + SVDBase() + : m_isInitialized(false), + m_isAllocated(false), + m_usePrescribedThreshold(false), + m_computationOptions(0), + m_rows(-1), m_cols(-1), m_diagSize(0) + { + check_template_parameters(); + } + + +}; + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void SVDBase::_solve_impl(const RhsType &rhs, DstType &dst) const +{ + eigen_assert(rhs.rows() == rows()); + + // A = U S V^* + // So A^{-1} = V S^{-1} U^* + + Matrix tmp; + Index l_rank = rank(); + tmp.noalias() = m_matrixU.leftCols(l_rank).adjoint() * rhs; + tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp; + dst = m_matrixV.leftCols(l_rank) * tmp; +} +#endif + +template +bool SVDBase::allocate(Index rows, Index cols, unsigned int computationOptions) +{ + eigen_assert(rows >= 0 && cols >= 0); + + if (m_isAllocated && + rows == m_rows && + cols == m_cols && + computationOptions == m_computationOptions) + { + return true; + } + + m_rows = rows; + m_cols = cols; + m_isInitialized = false; + m_isAllocated = true; + m_computationOptions = computationOptions; + m_computeFullU = (computationOptions & ComputeFullU) != 0; + m_computeThinU = (computationOptions & ComputeThinU) != 0; + m_computeFullV = (computationOptions & ComputeFullV) != 0; + m_computeThinV = (computationOptions & ComputeThinV) != 0; + eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); + eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); + eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && + "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns."); + + m_diagSize = (std::min)(m_rows, m_cols); + m_singularValues.resize(m_diagSize); + if(RowsAtCompileTime==Dynamic) + m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0); + if(ColsAtCompileTime==Dynamic) + m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0); + + return false; +} + +}// end namespace + +#endif // EIGEN_SVDBASE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/UpperBidiagonalization.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/UpperBidiagonalization.h new file mode 100644 index 000000000..11ac847e1 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SVD/UpperBidiagonalization.h @@ -0,0 +1,414 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Benoit Jacob +// Copyright (C) 2013-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BIDIAGONALIZATION_H +#define EIGEN_BIDIAGONALIZATION_H + +namespace Eigen { + +namespace internal { +// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API. +// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class. + +template class UpperBidiagonalization +{ + public: + + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + ColsAtCompileTimeMinusOne = internal::decrement_size::ret + }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + typedef Matrix RowVectorType; + typedef Matrix ColVectorType; + typedef BandMatrix BidiagonalType; + typedef Matrix DiagVectorType; + typedef Matrix SuperDiagVectorType; + typedef HouseholderSequence< + const MatrixType, + const typename internal::remove_all::ConjugateReturnType>::type + > HouseholderUSequenceType; + typedef HouseholderSequence< + const typename internal::remove_all::type, + Diagonal, + OnTheRight + > HouseholderVSequenceType; + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via Bidiagonalization::compute(const MatrixType&). + */ + UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {} + + explicit UpperBidiagonalization(const MatrixType& matrix) + : m_householder(matrix.rows(), matrix.cols()), + m_bidiagonal(matrix.cols(), matrix.cols()), + m_isInitialized(false) + { + compute(matrix); + } + + UpperBidiagonalization& compute(const MatrixType& matrix); + UpperBidiagonalization& computeUnblocked(const MatrixType& matrix); + + const MatrixType& householder() const { return m_householder; } + const BidiagonalType& bidiagonal() const { return m_bidiagonal; } + + const HouseholderUSequenceType householderU() const + { + eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); + return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate()); + } + + const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy + { + eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); + return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>()) + .setLength(m_householder.cols()-1) + .setShift(1); + } + + protected: + MatrixType m_householder; + BidiagonalType m_bidiagonal; + bool m_isInitialized; +}; + +// Standard upper bidiagonalization without fancy optimizations +// This version should be faster for small matrix size +template +void upperbidiagonalization_inplace_unblocked(MatrixType& mat, + typename MatrixType::RealScalar *diagonal, + typename MatrixType::RealScalar *upper_diagonal, + typename MatrixType::Scalar* tempData = 0) +{ + typedef typename MatrixType::Scalar Scalar; + + Index rows = mat.rows(); + Index cols = mat.cols(); + + typedef Matrix TempType; + TempType tempVector; + if(tempData==0) + { + tempVector.resize(rows); + tempData = tempVector.data(); + } + + for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k) + { + Index remainingRows = rows - k; + Index remainingCols = cols - k - 1; + + // construct left householder transform in-place in A + mat.col(k).tail(remainingRows) + .makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]); + // apply householder transform to remaining part of A on the left + mat.bottomRightCorner(remainingRows, remainingCols) + .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData); + + if(k == cols-1) break; + + // construct right householder transform in-place in mat + mat.row(k).tail(remainingCols) + .makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]); + // apply householder transform to remaining part of mat on the left + mat.bottomRightCorner(remainingRows-1, remainingCols) + .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData); + } +} + +/** \internal + * Helper routine for the block reduction to upper bidiagonal form. + * + * Let's partition the matrix A: + * + * | A00 A01 | + * A = | | + * | A10 A11 | + * + * This function reduces to bidiagonal form the left \c rows x \a blockSize vertical panel [A00/A10] + * and the \a blockSize x \c cols horizontal panel [A00 A01] of the matrix \a A. The bottom-right block A11 + * is updated using matrix-matrix products: + * A22 -= V * Y^T - X * U^T + * where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01 + * respectively, and the update matrices X and Y are computed during the reduction. + * + */ +template +void upperbidiagonalization_blocked_helper(MatrixType& A, + typename MatrixType::RealScalar *diagonal, + typename MatrixType::RealScalar *upper_diagonal, + Index bs, + Ref::Flags & RowMajorBit> > X, + Ref::Flags & RowMajorBit> > Y) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename NumTraits::Literal Literal; + enum { StorageOrder = traits::Flags & RowMajorBit }; + typedef InnerStride ColInnerStride; + typedef InnerStride RowInnerStride; + typedef Ref, 0, ColInnerStride> SubColumnType; + typedef Ref, 0, RowInnerStride> SubRowType; + typedef Ref > SubMatType; + + Index brows = A.rows(); + Index bcols = A.cols(); + + Scalar tau_u, tau_u_prev(0), tau_v; + + for(Index k = 0; k < bs; ++k) + { + Index remainingRows = brows - k; + Index remainingCols = bcols - k - 1; + + SubMatType X_k1( X.block(k,0, remainingRows,k) ); + SubMatType V_k1( A.block(k,0, remainingRows,k) ); + + // 1 - update the k-th column of A + SubColumnType v_k = A.col(k).tail(remainingRows); + v_k -= V_k1 * Y.row(k).head(k).adjoint(); + if(k) v_k -= X_k1 * A.col(k).head(k); + + // 2 - construct left Householder transform in-place + v_k.makeHouseholderInPlace(tau_v, diagonal[k]); + + if(k+10) A.coeffRef(k-1,k) = tau_u_prev; + tau_u_prev = tau_u; + } + else + A.coeffRef(k-1,k) = tau_u_prev; + + A.coeffRef(k,k) = tau_v; + } + + if(bsbs && brows>bs) + { + SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) ); + SubMatType A10( A.block(bs,0, brows-bs,bs) ); + SubMatType A01( A.block(0,bs, bs,bcols-bs) ); + Scalar tmp = A01(bs-1,0); + A01(bs-1,0) = Literal(1); + A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint(); + A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01; + A01(bs-1,0) = tmp; + } +} + +/** \internal + * + * Implementation of a block-bidiagonal reduction. + * It is based on the following paper: + * The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form. + * by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995) + * section 3.3 + */ +template +void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal, + Index maxBlockSize=32, + typename MatrixType::Scalar* /*tempData*/ = 0) +{ + typedef typename MatrixType::Scalar Scalar; + typedef Block BlockType; + + Index rows = A.rows(); + Index cols = A.cols(); + Index size = (std::min)(rows, cols); + + // X and Y are work space + enum { StorageOrder = traits::Flags & RowMajorBit }; + Matrix X(rows,maxBlockSize); + Matrix Y(cols,maxBlockSize); + Index blockSize = (std::min)(maxBlockSize,size); + + Index k = 0; + for(k = 0; k < size; k += blockSize) + { + Index bs = (std::min)(size-k,blockSize); // actual size of the block + Index brows = rows - k; // rows of the block + Index bcols = cols - k; // columns of the block + + // partition the matrix A: + // + // | A00 A01 A02 | + // | | + // A = | A10 A11 A12 | + // | | + // | A20 A21 A22 | + // + // where A11 is a bs x bs diagonal block, + // and let: + // | A11 A12 | + // B = | | + // | A21 A22 | + + BlockType B = A.block(k,k,brows,bcols); + + // This stage performs the bidiagonalization of A11, A21, A12, and updating of A22. + // Finally, the algorithm continue on the updated A22. + // + // However, if B is too small, or A22 empty, then let's use an unblocked strategy + if(k+bs==cols || bcols<48) // somewhat arbitrary threshold + { + upperbidiagonalization_inplace_unblocked(B, + &(bidiagonal.template diagonal<0>().coeffRef(k)), + &(bidiagonal.template diagonal<1>().coeffRef(k)), + X.data() + ); + break; // We're done + } + else + { + upperbidiagonalization_blocked_helper( B, + &(bidiagonal.template diagonal<0>().coeffRef(k)), + &(bidiagonal.template diagonal<1>().coeffRef(k)), + bs, + X.topLeftCorner(brows,bs), + Y.topLeftCorner(bcols,bs) + ); + } + } +} + +template +UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix) +{ + Index rows = matrix.rows(); + Index cols = matrix.cols(); + EIGEN_ONLY_USED_FOR_DEBUG(cols); + + eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols."); + + m_householder = matrix; + + ColVectorType temp(rows); + + upperbidiagonalization_inplace_unblocked(m_householder, + &(m_bidiagonal.template diagonal<0>().coeffRef(0)), + &(m_bidiagonal.template diagonal<1>().coeffRef(0)), + temp.data()); + + m_isInitialized = true; + return *this; +} + +template +UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix) +{ + Index rows = matrix.rows(); + Index cols = matrix.cols(); + EIGEN_ONLY_USED_FOR_DEBUG(rows); + EIGEN_ONLY_USED_FOR_DEBUG(cols); + + eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols."); + + m_householder = matrix; + upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal); + + m_isInitialized = true; + return *this; +} + +#if 0 +/** \return the Householder QR decomposition of \c *this. + * + * \sa class Bidiagonalization + */ +template +const UpperBidiagonalization::PlainObject> +MatrixBase::bidiagonalization() const +{ + return UpperBidiagonalization(eval()); +} +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BIDIAGONALIZATION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h new file mode 100644 index 000000000..2907f6529 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h @@ -0,0 +1,689 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SIMPLICIAL_CHOLESKY_H +#define EIGEN_SIMPLICIAL_CHOLESKY_H + +namespace Eigen { + +enum SimplicialCholeskyMode { + SimplicialCholeskyLLT, + SimplicialCholeskyLDLT +}; + +namespace internal { + template + struct simplicial_cholesky_grab_input { + typedef CholMatrixType const * ConstCholMatrixPtr; + static void run(const InputMatrixType& input, ConstCholMatrixPtr &pmat, CholMatrixType &tmp) + { + tmp = input; + pmat = &tmp; + } + }; + + template + struct simplicial_cholesky_grab_input { + typedef MatrixType const * ConstMatrixPtr; + static void run(const MatrixType& input, ConstMatrixPtr &pmat, MatrixType &/*tmp*/) + { + pmat = &input; + } + }; +} // end namespace internal + +/** \ingroup SparseCholesky_Module + * \brief A base class for direct sparse Cholesky factorizations + * + * This is a base class for LL^T and LDL^T Cholesky factorizations of sparse matrices that are + * selfadjoint and positive definite. These factorizations allow for solving A.X = B where + * X and B can be either dense or sparse. + * + * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization + * such that the factorized matrix is P A P^-1. + * + * \tparam Derived the type of the derived class, that is the actual factorization type. + * + */ +template +class SimplicialCholeskyBase : public SparseSolverBase +{ + typedef SparseSolverBase Base; + using Base::m_isInitialized; + + public: + typedef typename internal::traits::MatrixType MatrixType; + typedef typename internal::traits::OrderingType OrderingType; + enum { UpLo = internal::traits::UpLo }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; + typedef CholMatrixType const * ConstCholMatrixPtr; + typedef Matrix VectorType; + typedef Matrix VectorI; + + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + public: + + using Base::derived; + + /** Default constructor */ + SimplicialCholeskyBase() + : m_info(Success), m_shiftOffset(0), m_shiftScale(1) + {} + + explicit SimplicialCholeskyBase(const MatrixType& matrix) + : m_info(Success), m_shiftOffset(0), m_shiftScale(1) + { + derived().compute(matrix); + } + + ~SimplicialCholeskyBase() + { + } + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + inline Index cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + /** \returns the permutation P + * \sa permutationPinv() */ + const PermutationMatrix& permutationP() const + { return m_P; } + + /** \returns the inverse P^-1 of the permutation P + * \sa permutationP() */ + const PermutationMatrix& permutationPinv() const + { return m_Pinv; } + + /** Sets the shift parameters that will be used to adjust the diagonal coefficients during the numerical factorization. + * + * During the numerical factorization, the diagonal coefficients are transformed by the following linear model:\n + * \c d_ii = \a offset + \a scale * \c d_ii + * + * The default is the identity transformation with \a offset=0, and \a scale=1. + * + * \returns a reference to \c *this. + */ + Derived& setShift(const RealScalar& offset, const RealScalar& scale = 1) + { + m_shiftOffset = offset; + m_shiftScale = scale; + return derived(); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal */ + template + void dumpMemory(Stream& s) + { + int total = 0; + s << " L: " << ((total+=(m_matrix.cols()+1) * sizeof(int) + m_matrix.nonZeros()*(sizeof(int)+sizeof(Scalar))) >> 20) << "Mb" << "\n"; + s << " diag: " << ((total+=m_diag.size() * sizeof(Scalar)) >> 20) << "Mb" << "\n"; + s << " tree: " << ((total+=m_parent.size() * sizeof(int)) >> 20) << "Mb" << "\n"; + s << " nonzeros: " << ((total+=m_nonZerosPerCol.size() * sizeof(int)) >> 20) << "Mb" << "\n"; + s << " perm: " << ((total+=m_P.size() * sizeof(int)) >> 20) << "Mb" << "\n"; + s << " perm^-1: " << ((total+=m_Pinv.size() * sizeof(int)) >> 20) << "Mb" << "\n"; + s << " TOTAL: " << (total>> 20) << "Mb" << "\n"; + } + + /** \internal */ + template + void _solve_impl(const MatrixBase &b, MatrixBase &dest) const + { + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + eigen_assert(m_matrix.rows()==b.rows()); + + if(m_info!=Success) + return; + + if(m_P.size()>0) + dest = m_P * b; + else + dest = b; + + if(m_matrix.nonZeros()>0) // otherwise L==I + derived().matrixL().solveInPlace(dest); + + if(m_diag.size()>0) + dest = m_diag.asDiagonal().inverse() * dest; + + if (m_matrix.nonZeros()>0) // otherwise U==I + derived().matrixU().solveInPlace(dest); + + if(m_P.size()>0) + dest = m_Pinv * dest; + } + + template + void _solve_impl(const SparseMatrixBase &b, SparseMatrixBase &dest) const + { + internal::solve_sparse_through_dense_panels(derived(), b, dest); + } + +#endif // EIGEN_PARSED_BY_DOXYGEN + + protected: + + /** Computes the sparse Cholesky decomposition of \a matrix */ + template + void compute(const MatrixType& matrix) + { + eigen_assert(matrix.rows()==matrix.cols()); + Index size = matrix.cols(); + CholMatrixType tmp(size,size); + ConstCholMatrixPtr pmat; + ordering(matrix, pmat, tmp); + analyzePattern_preordered(*pmat, DoLDLT); + factorize_preordered(*pmat); + } + + template + void factorize(const MatrixType& a) + { + eigen_assert(a.rows()==a.cols()); + Index size = a.cols(); + CholMatrixType tmp(size,size); + ConstCholMatrixPtr pmat; + + if(m_P.size()==0 && (UpLo&Upper)==Upper) + { + // If there is no ordering, try to directly use the input matrix without any copy + internal::simplicial_cholesky_grab_input::run(a, pmat, tmp); + } + else + { + tmp.template selfadjointView() = a.template selfadjointView().twistedBy(m_P); + pmat = &tmp; + } + + factorize_preordered(*pmat); + } + + template + void factorize_preordered(const CholMatrixType& a); + + void analyzePattern(const MatrixType& a, bool doLDLT) + { + eigen_assert(a.rows()==a.cols()); + Index size = a.cols(); + CholMatrixType tmp(size,size); + ConstCholMatrixPtr pmat; + ordering(a, pmat, tmp); + analyzePattern_preordered(*pmat,doLDLT); + } + void analyzePattern_preordered(const CholMatrixType& a, bool doLDLT); + + void ordering(const MatrixType& a, ConstCholMatrixPtr &pmat, CholMatrixType& ap); + + /** keeps off-diagonal entries; drops diagonal entries */ + struct keep_diag { + inline bool operator() (const Index& row, const Index& col, const Scalar&) const + { + return row!=col; + } + }; + + mutable ComputationInfo m_info; + bool m_factorizationIsOk; + bool m_analysisIsOk; + + CholMatrixType m_matrix; + VectorType m_diag; // the diagonal coefficients (LDLT mode) + VectorI m_parent; // elimination tree + VectorI m_nonZerosPerCol; + PermutationMatrix m_P; // the permutation + PermutationMatrix m_Pinv; // the inverse permutation + + RealScalar m_shiftOffset; + RealScalar m_shiftScale; +}; + +template > class SimplicialLLT; +template > class SimplicialLDLT; +template > class SimplicialCholesky; + +namespace internal { + +template struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Ordering OrderingType; + enum { UpLo = _UpLo }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; + typedef TriangularView MatrixL; + typedef TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } + static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } +}; + +template struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Ordering OrderingType; + enum { UpLo = _UpLo }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; + typedef TriangularView MatrixL; + typedef TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } + static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } +}; + +template struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Ordering OrderingType; + enum { UpLo = _UpLo }; +}; + +} + +/** \ingroup SparseCholesky_Module + * \class SimplicialLLT + * \brief A direct sparse LLT Cholesky factorizations + * + * This class provides a LL^T Cholesky factorizations of sparse matrices that are + * selfadjoint and positive definite. The factorization allows for solving A.X = B where + * X and B can be either dense or sparse. + * + * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization + * such that the factorized matrix is P A P^-1. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<> + * + * \implsparsesolverconcept + * + * \sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering + */ +template + class SimplicialLLT : public SimplicialCholeskyBase > +{ +public: + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; + typedef SimplicialCholeskyBase Base; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; + typedef Matrix VectorType; + typedef internal::traits Traits; + typedef typename Traits::MatrixL MatrixL; + typedef typename Traits::MatrixU MatrixU; +public: + /** Default constructor */ + SimplicialLLT() : Base() {} + /** Constructs and performs the LLT factorization of \a matrix */ + explicit SimplicialLLT(const MatrixType& matrix) + : Base(matrix) {} + + /** \returns an expression of the factor L */ + inline const MatrixL matrixL() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized"); + return Traits::getL(Base::m_matrix); + } + + /** \returns an expression of the factor U (= L^*) */ + inline const MatrixU matrixU() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized"); + return Traits::getU(Base::m_matrix); + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + SimplicialLLT& compute(const MatrixType& matrix) + { + Base::template compute(matrix); + return *this; + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& a) + { + Base::analyzePattern(a, false); + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& a) + { + Base::template factorize(a); + } + + /** \returns the determinant of the underlying matrix from the current factorization */ + Scalar determinant() const + { + Scalar detL = Base::m_matrix.diagonal().prod(); + return numext::abs2(detL); + } +}; + +/** \ingroup SparseCholesky_Module + * \class SimplicialLDLT + * \brief A direct sparse LDLT Cholesky factorizations without square root. + * + * This class provides a LDL^T Cholesky factorizations without square root of sparse matrices that are + * selfadjoint and positive definite. The factorization allows for solving A.X = B where + * X and B can be either dense or sparse. + * + * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization + * such that the factorized matrix is P A P^-1. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<> + * + * \implsparsesolverconcept + * + * \sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering + */ +template + class SimplicialLDLT : public SimplicialCholeskyBase > +{ +public: + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; + typedef SimplicialCholeskyBase Base; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; + typedef Matrix VectorType; + typedef internal::traits Traits; + typedef typename Traits::MatrixL MatrixL; + typedef typename Traits::MatrixU MatrixU; +public: + /** Default constructor */ + SimplicialLDLT() : Base() {} + + /** Constructs and performs the LLT factorization of \a matrix */ + explicit SimplicialLDLT(const MatrixType& matrix) + : Base(matrix) {} + + /** \returns a vector expression of the diagonal D */ + inline const VectorType vectorD() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); + return Base::m_diag; + } + /** \returns an expression of the factor L */ + inline const MatrixL matrixL() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); + return Traits::getL(Base::m_matrix); + } + + /** \returns an expression of the factor U (= L^*) */ + inline const MatrixU matrixU() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); + return Traits::getU(Base::m_matrix); + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + SimplicialLDLT& compute(const MatrixType& matrix) + { + Base::template compute(matrix); + return *this; + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& a) + { + Base::analyzePattern(a, true); + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& a) + { + Base::template factorize(a); + } + + /** \returns the determinant of the underlying matrix from the current factorization */ + Scalar determinant() const + { + return Base::m_diag.prod(); + } +}; + +/** \deprecated use SimplicialLDLT or class SimplicialLLT + * \ingroup SparseCholesky_Module + * \class SimplicialCholesky + * + * \sa class SimplicialLDLT, class SimplicialLLT + */ +template + class SimplicialCholesky : public SimplicialCholeskyBase > +{ +public: + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; + typedef SimplicialCholeskyBase Base; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; + typedef Matrix VectorType; + typedef internal::traits Traits; + typedef internal::traits > LDLTTraits; + typedef internal::traits > LLTTraits; + public: + SimplicialCholesky() : Base(), m_LDLT(true) {} + + explicit SimplicialCholesky(const MatrixType& matrix) + : Base(), m_LDLT(true) + { + compute(matrix); + } + + SimplicialCholesky& setMode(SimplicialCholeskyMode mode) + { + switch(mode) + { + case SimplicialCholeskyLLT: + m_LDLT = false; + break; + case SimplicialCholeskyLDLT: + m_LDLT = true; + break; + default: + break; + } + + return *this; + } + + inline const VectorType vectorD() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized"); + return Base::m_diag; + } + inline const CholMatrixType rawMatrix() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized"); + return Base::m_matrix; + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + SimplicialCholesky& compute(const MatrixType& matrix) + { + if(m_LDLT) + Base::template compute(matrix); + else + Base::template compute(matrix); + return *this; + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& a) + { + Base::analyzePattern(a, m_LDLT); + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& a) + { + if(m_LDLT) + Base::template factorize(a); + else + Base::template factorize(a); + } + + /** \internal */ + template + void _solve_impl(const MatrixBase &b, MatrixBase &dest) const + { + eigen_assert(Base::m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + eigen_assert(Base::m_matrix.rows()==b.rows()); + + if(Base::m_info!=Success) + return; + + if(Base::m_P.size()>0) + dest = Base::m_P * b; + else + dest = b; + + if(Base::m_matrix.nonZeros()>0) // otherwise L==I + { + if(m_LDLT) + LDLTTraits::getL(Base::m_matrix).solveInPlace(dest); + else + LLTTraits::getL(Base::m_matrix).solveInPlace(dest); + } + + if(Base::m_diag.size()>0) + dest = Base::m_diag.asDiagonal().inverse() * dest; + + if (Base::m_matrix.nonZeros()>0) // otherwise I==I + { + if(m_LDLT) + LDLTTraits::getU(Base::m_matrix).solveInPlace(dest); + else + LLTTraits::getU(Base::m_matrix).solveInPlace(dest); + } + + if(Base::m_P.size()>0) + dest = Base::m_Pinv * dest; + } + + /** \internal */ + template + void _solve_impl(const SparseMatrixBase &b, SparseMatrixBase &dest) const + { + internal::solve_sparse_through_dense_panels(*this, b, dest); + } + + Scalar determinant() const + { + if(m_LDLT) + { + return Base::m_diag.prod(); + } + else + { + Scalar detL = Diagonal(Base::m_matrix).prod(); + return numext::abs2(detL); + } + } + + protected: + bool m_LDLT; +}; + +template +void SimplicialCholeskyBase::ordering(const MatrixType& a, ConstCholMatrixPtr &pmat, CholMatrixType& ap) +{ + eigen_assert(a.rows()==a.cols()); + const Index size = a.rows(); + pmat = ≈ + // Note that ordering methods compute the inverse permutation + if(!internal::is_same >::value) + { + { + CholMatrixType C; + C = a.template selfadjointView(); + + OrderingType ordering; + ordering(C,m_Pinv); + } + + if(m_Pinv.size()>0) m_P = m_Pinv.inverse(); + else m_P.resize(0); + + ap.resize(size,size); + ap.template selfadjointView() = a.template selfadjointView().twistedBy(m_P); + } + else + { + m_Pinv.resize(0); + m_P.resize(0); + if(int(UpLo)==int(Lower) || MatrixType::IsRowMajor) + { + // we have to transpose the lower part to to the upper one + ap.resize(size,size); + ap.template selfadjointView() = a.template selfadjointView(); + } + else + internal::simplicial_cholesky_grab_input::run(a, pmat, ap); + } +} + +} // end namespace Eigen + +#endif // EIGEN_SIMPLICIAL_CHOLESKY_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h new file mode 100644 index 000000000..31e06995b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h @@ -0,0 +1,199 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2012 Gael Guennebaud + +/* + +NOTE: thes functions vave been adapted from the LDL library: + +LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved. + +LDL License: + + Your use or distribution of LDL or any modified version of + LDL implies that you agree to this License. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 + USA + + Permission is hereby granted to use or copy this program under the + terms of the GNU LGPL, provided that the Copyright, this License, + and the Availability of the original version is retained on all copies. + User documentation of any code that uses this code or any modified + version of this code must cite the Copyright, this License, the + Availability note, and "Used by permission." Permission to modify + the code and to distribute modified code is granted, provided the + Copyright, this License, and the Availability note are retained, + and a notice that the code was modified is included. + */ + +#include "../Core/util/NonMPL2.h" + +#ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H +#define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H + +namespace Eigen { + +template +void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT) +{ + const StorageIndex size = StorageIndex(ap.rows()); + m_matrix.resize(size, size); + m_parent.resize(size); + m_nonZerosPerCol.resize(size); + + ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0); + + for(StorageIndex k = 0; k < size; ++k) + { + /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ + m_parent[k] = -1; /* parent of k is not yet known */ + tags[k] = k; /* mark node k as visited */ + m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ + for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) + { + StorageIndex i = it.index(); + if(i < k) + { + /* follow path from i to root of etree, stop at flagged node */ + for(; tags[i] != k; i = m_parent[i]) + { + /* find parent of i if not yet determined */ + if (m_parent[i] == -1) + m_parent[i] = k; + m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */ + tags[i] = k; /* mark i as visited */ + } + } + } + } + + /* construct Lp index array from m_nonZerosPerCol column counts */ + StorageIndex* Lp = m_matrix.outerIndexPtr(); + Lp[0] = 0; + for(StorageIndex k = 0; k < size; ++k) + Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1); + + m_matrix.resizeNonZeros(Lp[size]); + + m_isInitialized = true; + m_info = Success; + m_analysisIsOk = true; + m_factorizationIsOk = false; +} + + +template +template +void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& ap) +{ + using std::sqrt; + + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + eigen_assert(ap.rows()==ap.cols()); + eigen_assert(m_parent.size()==ap.rows()); + eigen_assert(m_nonZerosPerCol.size()==ap.rows()); + + const StorageIndex size = StorageIndex(ap.rows()); + const StorageIndex* Lp = m_matrix.outerIndexPtr(); + StorageIndex* Li = m_matrix.innerIndexPtr(); + Scalar* Lx = m_matrix.valuePtr(); + + ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0); + ei_declare_aligned_stack_constructed_variable(StorageIndex, pattern, size, 0); + ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0); + + bool ok = true; + m_diag.resize(DoLDLT ? size : 0); + + for(StorageIndex k = 0; k < size; ++k) + { + // compute nonzero pattern of kth row of L, in topological order + y[k] = 0.0; // Y(0:k) is now all zero + StorageIndex top = size; // stack for pattern is empty + tags[k] = k; // mark node k as visited + m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L + for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) + { + StorageIndex i = it.index(); + if(i <= k) + { + y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ + Index len; + for(len = 0; tags[i] != k; i = m_parent[i]) + { + pattern[len++] = i; /* L(k,i) is nonzero */ + tags[i] = k; /* mark i as visited */ + } + while(len > 0) + pattern[--top] = pattern[--len]; + } + } + + /* compute numerical values kth row of L (a sparse triangular solve) */ + + RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k) + y[k] = 0.0; + for(; top < size; ++top) + { + Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ + Scalar yi = y[i]; /* get and clear Y(i) */ + y[i] = 0.0; + + /* the nonzero entry L(k,i) */ + Scalar l_ki; + if(DoLDLT) + l_ki = yi / m_diag[i]; + else + yi = l_ki = yi / Lx[Lp[i]]; + + Index p2 = Lp[i] + m_nonZerosPerCol[i]; + Index p; + for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p) + y[Li[p]] -= numext::conj(Lx[p]) * yi; + d -= numext::real(l_ki * numext::conj(yi)); + Li[p] = k; /* store L(k,i) in column form of L */ + Lx[p] = l_ki; + ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */ + } + if(DoLDLT) + { + m_diag[k] = d; + if(d == RealScalar(0)) + { + ok = false; /* failure, D(k,k) is zero */ + break; + } + } + else + { + Index p = Lp[k] + m_nonZerosPerCol[k]++; + Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */ + if(d <= RealScalar(0)) { + ok = false; /* failure, matrix is not positive definite */ + break; + } + Lx[p] = sqrt(d) ; + } + } + + m_info = ok ? Success : NumericalIssue; + m_factorizationIsOk = true; +} + +} // end namespace Eigen + +#endif // EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/AmbiVector.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/AmbiVector.h new file mode 100644 index 000000000..2cb7747cc --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/AmbiVector.h @@ -0,0 +1,378 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_AMBIVECTOR_H +#define EIGEN_AMBIVECTOR_H + +namespace Eigen { + +namespace internal { + +/** \internal + * Hybrid sparse/dense vector class designed for intensive read-write operations. + * + * See BasicSparseLLT and SparseProduct for usage examples. + */ +template +class AmbiVector +{ + public: + typedef _Scalar Scalar; + typedef _StorageIndex StorageIndex; + typedef typename NumTraits::Real RealScalar; + + explicit AmbiVector(Index size) + : m_buffer(0), m_zero(0), m_size(0), m_end(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1) + { + resize(size); + } + + void init(double estimatedDensity); + void init(int mode); + + Index nonZeros() const; + + /** Specifies a sub-vector to work on */ + void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); } + + void setZero(); + + void restart(); + Scalar& coeffRef(Index i); + Scalar& coeff(Index i); + + class Iterator; + + ~AmbiVector() { delete[] m_buffer; } + + void resize(Index size) + { + if (m_allocatedSize < size) + reallocate(size); + m_size = convert_index(size); + } + + StorageIndex size() const { return m_size; } + + protected: + StorageIndex convert_index(Index idx) + { + return internal::convert_index(idx); + } + + void reallocate(Index size) + { + // if the size of the matrix is not too large, let's allocate a bit more than needed such + // that we can handle dense vector even in sparse mode. + delete[] m_buffer; + if (size<1000) + { + Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar); + m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl)); + m_buffer = new Scalar[allocSize]; + } + else + { + m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl)); + m_buffer = new Scalar[size]; + } + m_size = convert_index(size); + m_start = 0; + m_end = m_size; + } + + void reallocateSparse() + { + Index copyElements = m_allocatedElements; + m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size); + Index allocSize = m_allocatedElements * sizeof(ListEl); + allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar); + Scalar* newBuffer = new Scalar[allocSize]; + std::memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl)); + delete[] m_buffer; + m_buffer = newBuffer; + } + + protected: + // element type of the linked list + struct ListEl + { + StorageIndex next; + StorageIndex index; + Scalar value; + }; + + // used to store data in both mode + Scalar* m_buffer; + Scalar m_zero; + StorageIndex m_size; + StorageIndex m_start; + StorageIndex m_end; + StorageIndex m_allocatedSize; + StorageIndex m_allocatedElements; + StorageIndex m_mode; + + // linked list mode + StorageIndex m_llStart; + StorageIndex m_llCurrent; + StorageIndex m_llSize; +}; + +/** \returns the number of non zeros in the current sub vector */ +template +Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const +{ + if (m_mode==IsSparse) + return m_llSize; + else + return m_end - m_start; +} + +template +void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity) +{ + if (estimatedDensity>0.1) + init(IsDense); + else + init(IsSparse); +} + +template +void AmbiVector<_Scalar,_StorageIndex>::init(int mode) +{ + m_mode = mode; + // This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings + // if (m_mode==IsSparse) + { + m_llSize = 0; + m_llStart = -1; + } +} + +/** Must be called whenever we might perform a write access + * with an index smaller than the previous one. + * + * Don't worry, this function is extremely cheap. + */ +template +void AmbiVector<_Scalar,_StorageIndex>::restart() +{ + m_llCurrent = m_llStart; +} + +/** Set all coefficients of current subvector to zero */ +template +void AmbiVector<_Scalar,_StorageIndex>::setZero() +{ + if (m_mode==IsDense) + { + for (Index i=m_start; i +_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i) +{ + if (m_mode==IsDense) + return m_buffer[i]; + else + { + ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_buffer); + // TODO factorize the following code to reduce code generation + eigen_assert(m_mode==IsSparse); + if (m_llSize==0) + { + // this is the first element + m_llStart = 0; + m_llCurrent = 0; + ++m_llSize; + llElements[0].value = Scalar(0); + llElements[0].index = convert_index(i); + llElements[0].next = -1; + return llElements[0].value; + } + else if (i=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); + while (nextel >= 0 && llElements[nextel].index<=i) + { + m_llCurrent = nextel; + nextel = llElements[nextel].next; + } + + if (llElements[m_llCurrent].index==i) + { + // the coefficient already exists and we found it ! + return llElements[m_llCurrent].value; + } + else + { + if (m_llSize>=m_allocatedElements) + { + reallocateSparse(); + llElements = reinterpret_cast(m_buffer); + } + eigen_internal_assert(m_llSize +_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i) +{ + if (m_mode==IsDense) + return m_buffer[i]; + else + { + ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_buffer); + eigen_assert(m_mode==IsSparse); + if ((m_llSize==0) || (i= 0 && llElements[elid].index +class AmbiVector<_Scalar,_StorageIndex>::Iterator +{ + public: + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + /** Default constructor + * \param vec the vector on which we iterate + * \param epsilon the minimal value used to prune zero coefficients. + * In practice, all coefficients having a magnitude smaller than \a epsilon + * are skipped. + */ + explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0) + : m_vector(vec) + { + using std::abs; + m_epsilon = epsilon; + m_isDense = m_vector.m_mode==IsDense; + if (m_isDense) + { + m_currentEl = 0; // this is to avoid a compilation warning + m_cachedValue = 0; // this is to avoid a compilation warning + m_cachedIndex = m_vector.m_start-1; + ++(*this); + } + else + { + ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_vector.m_buffer); + m_currentEl = m_vector.m_llStart; + while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon) + m_currentEl = llElements[m_currentEl].next; + if (m_currentEl<0) + { + m_cachedValue = 0; // this is to avoid a compilation warning + m_cachedIndex = -1; + } + else + { + m_cachedIndex = llElements[m_currentEl].index; + m_cachedValue = llElements[m_currentEl].value; + } + } + } + + StorageIndex index() const { return m_cachedIndex; } + Scalar value() const { return m_cachedValue; } + + operator bool() const { return m_cachedIndex>=0; } + + Iterator& operator++() + { + using std::abs; + if (m_isDense) + { + do { + ++m_cachedIndex; + } while (m_cachedIndex(m_vector.m_buffer); + do { + m_currentEl = llElements[m_currentEl].next; + } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon); + if (m_currentEl<0) + { + m_cachedIndex = -1; + } + else + { + m_cachedIndex = llElements[m_currentEl].index; + m_cachedValue = llElements[m_currentEl].value; + } + } + return *this; + } + + protected: + const AmbiVector& m_vector; // the target vector + StorageIndex m_currentEl; // the current element in sparse/linked-list mode + RealScalar m_epsilon; // epsilon used to prune zero coefficients + StorageIndex m_cachedIndex; // current coordinate + Scalar m_cachedValue; // current value + bool m_isDense; // mode of the vector +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_AMBIVECTOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/CompressedStorage.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/CompressedStorage.h new file mode 100644 index 000000000..d89fa0dae --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/CompressedStorage.h @@ -0,0 +1,258 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPRESSED_STORAGE_H +#define EIGEN_COMPRESSED_STORAGE_H + +namespace Eigen { + +namespace internal { + +/** \internal + * Stores a sparse set of values as a list of values and a list of indices. + * + */ +template +class CompressedStorage +{ + public: + + typedef _Scalar Scalar; + typedef _StorageIndex StorageIndex; + + protected: + + typedef typename NumTraits::Real RealScalar; + + public: + + CompressedStorage() + : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) + {} + + explicit CompressedStorage(Index size) + : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) + { + resize(size); + } + + CompressedStorage(const CompressedStorage& other) + : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) + { + *this = other; + } + + CompressedStorage& operator=(const CompressedStorage& other) + { + resize(other.size()); + if(other.size()>0) + { + internal::smart_copy(other.m_values, other.m_values + m_size, m_values); + internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices); + } + return *this; + } + + void swap(CompressedStorage& other) + { + std::swap(m_values, other.m_values); + std::swap(m_indices, other.m_indices); + std::swap(m_size, other.m_size); + std::swap(m_allocatedSize, other.m_allocatedSize); + } + + ~CompressedStorage() + { + delete[] m_values; + delete[] m_indices; + } + + void reserve(Index size) + { + Index newAllocatedSize = m_size + size; + if (newAllocatedSize > m_allocatedSize) + reallocate(newAllocatedSize); + } + + void squeeze() + { + if (m_allocatedSize>m_size) + reallocate(m_size); + } + + void resize(Index size, double reserveSizeFactor = 0) + { + if (m_allocatedSize)(NumTraits::highest(), size + Index(reserveSizeFactor*double(size))); + if(realloc_size(i); + } + + inline Index size() const { return m_size; } + inline Index allocatedSize() const { return m_allocatedSize; } + inline void clear() { m_size = 0; } + + const Scalar* valuePtr() const { return m_values; } + Scalar* valuePtr() { return m_values; } + const StorageIndex* indexPtr() const { return m_indices; } + StorageIndex* indexPtr() { return m_indices; } + + inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; } + inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; } + + inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; } + inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; } + + /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */ + inline Index searchLowerIndex(Index key) const + { + return searchLowerIndex(0, m_size, key); + } + + /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */ + inline Index searchLowerIndex(Index start, Index end, Index key) const + { + while(end>start) + { + Index mid = (end+start)>>1; + if (m_indices[mid]=end) + return defaultValue; + else if (end>start && key==m_indices[end-1]) + return m_values[end-1]; + // ^^ optimization: let's first check if it is the last coefficient + // (very common in high level algorithms) + const Index id = searchLowerIndex(start,end-1,key); + return ((id=m_size || m_indices[id]!=key) + { + if (m_allocatedSize newValues(m_allocatedSize); + internal::scoped_array newIndices(m_allocatedSize); + + // copy first chunk + internal::smart_copy(m_values, m_values +id, newValues.ptr()); + internal::smart_copy(m_indices, m_indices+id, newIndices.ptr()); + + // copy the rest + if(m_size>id) + { + internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1); + internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1); + } + std::swap(m_values,newValues.ptr()); + std::swap(m_indices,newIndices.ptr()); + } + else if(m_size>id) + { + internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1); + internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1); + } + m_size++; + m_indices[id] = internal::convert_index(key); + m_values[id] = defaultValue; + } + return m_values[id]; + } + + void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits::dummy_precision()) + { + Index k = 0; + Index n = size(); + for (Index i=0; i newValues(size); + internal::scoped_array newIndices(size); + Index copySize = (std::min)(size, m_size); + if (copySize>0) { + internal::smart_copy(m_values, m_values+copySize, newValues.ptr()); + internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr()); + } + std::swap(m_values,newValues.ptr()); + std::swap(m_indices,newIndices.ptr()); + m_allocatedSize = size; + } + + protected: + Scalar* m_values; + StorageIndex* m_indices; + Index m_size; + Index m_allocatedSize; + +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPRESSED_STORAGE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h new file mode 100644 index 000000000..9db119b67 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h @@ -0,0 +1,352 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H +#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H + +namespace Eigen { + +namespace internal { + +template +static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false) +{ + typedef typename remove_all::type::Scalar LhsScalar; + typedef typename remove_all::type::Scalar RhsScalar; + typedef typename remove_all::type::Scalar ResScalar; + + // make sure to call innerSize/outerSize since we fake the storage order. + Index rows = lhs.innerSize(); + Index cols = rhs.outerSize(); + eigen_assert(lhs.outerSize() == rhs.innerSize()); + + ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0); + ei_declare_aligned_stack_constructed_variable(ResScalar, values, rows, 0); + ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0); + + std::memset(mask,0,sizeof(bool)*rows); + + evaluator lhsEval(lhs); + evaluator rhsEval(rhs); + + // estimate the number of non zero entries + // given a rhs column containing Y non zeros, we assume that the respective Y columns + // of the lhs differs in average of one non zeros, thus the number of non zeros for + // the product of a rhs column with the lhs is X+Y where X is the average number of non zero + // per column of the lhs. + // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) + Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate(); + + res.setZero(); + res.reserve(Index(estimated_nnz_prod)); + // we compute each column of the result, one after the other + for (Index j=0; j::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt) + { + RhsScalar y = rhsIt.value(); + Index k = rhsIt.index(); + for (typename evaluator::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt) + { + Index i = lhsIt.index(); + LhsScalar x = lhsIt.value(); + if(!mask[i]) + { + mask[i] = true; + values[i] = x * y; + indices[nnz] = i; + ++nnz; + } + else + values[i] += x * y; + } + } + if(!sortedInsertion) + { + // unordered insertion + for(Index k=0; k use a quick sort + // otherwise => loop through the entire vector + // In order to avoid to perform an expensive log2 when the + // result is clearly very sparse we use a linear bound up to 200. + if((nnz<200 && nnz1) std::sort(indices,indices+nnz); + for(Index k=0; k::Flags&RowMajorBit) ? RowMajor : ColMajor, + int RhsStorageOrder = (traits::Flags&RowMajorBit) ? RowMajor : ColMajor, + int ResStorageOrder = (traits::Flags&RowMajorBit) ? RowMajor : ColMajor> +struct conservative_sparse_sparse_product_selector; + +template +struct conservative_sparse_sparse_product_selector +{ + typedef typename remove_all::type LhsCleaned; + typedef typename LhsCleaned::Scalar Scalar; + + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrixAux; + typedef typename sparse_eval::type ColMajorMatrix; + + // If the result is tall and thin (in the extreme case a column vector) + // then it is faster to sort the coefficients inplace instead of transposing twice. + // FIXME, the following heuristic is probably not very good. + if(lhs.rows()>rhs.cols()) + { + ColMajorMatrix resCol(lhs.rows(),rhs.cols()); + // perform sorted insertion + internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol, true); + res = resCol.markAsRValue(); + } + else + { + ColMajorMatrixAux resCol(lhs.rows(),rhs.cols()); + // ressort to transpose to sort the entries + internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol, false); + RowMajorMatrix resRow(resCol); + res = resRow.markAsRValue(); + } + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix RowMajorRhs; + typedef SparseMatrix RowMajorRes; + RowMajorRhs rhsRow = rhs; + RowMajorRes resRow(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(rhsRow, lhs, resRow); + res = resRow; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix RowMajorLhs; + typedef SparseMatrix RowMajorRes; + RowMajorLhs lhsRow = lhs; + RowMajorRes resRow(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(rhs, lhsRow, resRow); + res = resRow; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix RowMajorMatrix; + RowMajorMatrix resRow(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); + res = resRow; + } +}; + + +template +struct conservative_sparse_sparse_product_selector +{ + typedef typename traits::type>::Scalar Scalar; + + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix ColMajorMatrix; + ColMajorMatrix resCol(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); + res = resCol; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix ColMajorLhs; + typedef SparseMatrix ColMajorRes; + ColMajorLhs lhsCol = lhs; + ColMajorRes resCol(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(lhsCol, rhs, resCol); + res = resCol; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix ColMajorRhs; + typedef SparseMatrix ColMajorRes; + ColMajorRhs rhsCol = rhs; + ColMajorRes resCol(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(lhs, rhsCol, resCol); + res = resCol; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrix; + RowMajorMatrix resRow(lhs.rows(),rhs.cols()); + internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); + // sort the non zeros: + ColMajorMatrix resCol(resRow); + res = resCol; + } +}; + +} // end namespace internal + + +namespace internal { + +template +static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res) +{ + typedef typename remove_all::type::Scalar LhsScalar; + typedef typename remove_all::type::Scalar RhsScalar; + Index cols = rhs.outerSize(); + eigen_assert(lhs.outerSize() == rhs.innerSize()); + + evaluator lhsEval(lhs); + evaluator rhsEval(rhs); + + for (Index j=0; j::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt) + { + RhsScalar y = rhsIt.value(); + Index k = rhsIt.index(); + for (typename evaluator::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt) + { + Index i = lhsIt.index(); + LhsScalar x = lhsIt.value(); + res.coeffRef(i,j) += x * y; + } + } + } +} + + +} // end namespace internal + +namespace internal { + +template::Flags&RowMajorBit) ? RowMajor : ColMajor, + int RhsStorageOrder = (traits::Flags&RowMajorBit) ? RowMajor : ColMajor> +struct sparse_sparse_to_dense_product_selector; + +template +struct sparse_sparse_to_dense_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + internal::sparse_sparse_to_dense_product_impl(lhs, rhs, res); + } +}; + +template +struct sparse_sparse_to_dense_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix ColMajorLhs; + ColMajorLhs lhsCol(lhs); + internal::sparse_sparse_to_dense_product_impl(lhsCol, rhs, res); + } +}; + +template +struct sparse_sparse_to_dense_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix ColMajorRhs; + ColMajorRhs rhsCol(rhs); + internal::sparse_sparse_to_dense_product_impl(lhs, rhsCol, res); + } +}; + +template +struct sparse_sparse_to_dense_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + Transpose trRes(res); + internal::sparse_sparse_to_dense_product_impl >(rhs, lhs, trRes); + } +}; + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h new file mode 100644 index 000000000..67718c85b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h @@ -0,0 +1,67 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MAPPED_SPARSEMATRIX_H +#define EIGEN_MAPPED_SPARSEMATRIX_H + +namespace Eigen { + +/** \deprecated Use Map > + * \class MappedSparseMatrix + * + * \brief Sparse matrix + * + * \param _Scalar the scalar type, i.e. the type of the coefficients + * + * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. + * + */ +namespace internal { +template +struct traits > : traits > +{}; +} // end namespace internal + +template +class MappedSparseMatrix + : public Map > +{ + typedef Map > Base; + + public: + + typedef typename Base::StorageIndex StorageIndex; + typedef typename Base::Scalar Scalar; + + inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZeroPtr = 0) + : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr) + {} + + /** Empty destructor */ + inline ~MappedSparseMatrix() {} +}; + +namespace internal { + +template +struct evaluator > + : evaluator > > +{ + typedef MappedSparseMatrix<_Scalar,_Options,_StorageIndex> XprType; + typedef evaluator > Base; + + evaluator() : Base() {} + explicit evaluator(const XprType &mat) : Base(mat) {} +}; + +} + +} // end namespace Eigen + +#endif // EIGEN_MAPPED_SPARSEMATRIX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseAssign.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseAssign.h new file mode 100644 index 000000000..18352a847 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseAssign.h @@ -0,0 +1,216 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEASSIGN_H +#define EIGEN_SPARSEASSIGN_H + +namespace Eigen { + +template +template +Derived& SparseMatrixBase::operator=(const EigenBase &other) +{ + internal::call_assignment_no_alias(derived(), other.derived()); + return derived(); +} + +template +template +Derived& SparseMatrixBase::operator=(const ReturnByValue& other) +{ + // TODO use the evaluator mechanism + other.evalTo(derived()); + return derived(); +} + +template +template +inline Derived& SparseMatrixBase::operator=(const SparseMatrixBase& other) +{ + // by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine + internal::Assignment > + ::run(derived(), other.derived(), internal::assign_op()); + return derived(); +} + +template +inline Derived& SparseMatrixBase::operator=(const Derived& other) +{ + internal::call_assignment_no_alias(derived(), other.derived()); + return derived(); +} + +namespace internal { + +template<> +struct storage_kind_to_evaluator_kind { + typedef IteratorBased Kind; +}; + +template<> +struct storage_kind_to_shape { + typedef SparseShape Shape; +}; + +struct Sparse2Sparse {}; +struct Sparse2Dense {}; + +template<> struct AssignmentKind { typedef Sparse2Sparse Kind; }; +template<> struct AssignmentKind { typedef Sparse2Sparse Kind; }; +template<> struct AssignmentKind { typedef Sparse2Dense Kind; }; +template<> struct AssignmentKind { typedef Sparse2Dense Kind; }; + + +template +void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src) +{ + typedef typename DstXprType::Scalar Scalar; + typedef internal::evaluator DstEvaluatorType; + typedef internal::evaluator SrcEvaluatorType; + + SrcEvaluatorType srcEvaluator(src); + + const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit); + const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols(); + if ((!transpose) && src.isRValue()) + { + // eval without temporary + dst.resize(src.rows(), src.cols()); + dst.setZero(); + dst.reserve((std::max)(src.rows(),src.cols())*2); + for (Index j=0; j::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) || + (!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) && + "the transpose operation is supposed to be handled in SparseMatrix::operator="); + + enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) }; + + + DstXprType temp(src.rows(), src.cols()); + + temp.reserve((std::max)(src.rows(),src.cols())*2); + for (Index j=0; j +struct Assignment +{ + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &/*func*/) + { + assign_sparse_to_sparse(dst.derived(), src.derived()); + } +}; + +// Generic Sparse to Dense assignment +template< typename DstXprType, typename SrcXprType, typename Functor> +struct Assignment +{ + static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) + { + if(internal::is_same >::value) + dst.setZero(); + + internal::evaluator srcEval(src); + resize_if_allowed(dst, src, func); + internal::evaluator dstEval(dst); + + const Index outerEvaluationSize = (internal::evaluator::Flags&RowMajorBit) ? src.rows() : src.cols(); + for (Index j=0; j::InnerIterator i(srcEval,j); i; ++i) + func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value()); + } +}; + +// Specialization for "dst = dec.solve(rhs)" +// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error +template +struct Assignment, internal::assign_op, Sparse2Sparse> +{ + typedef Solve SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + src.dec()._solve_impl(src.rhs(), dst); + } +}; + +struct Diagonal2Sparse {}; + +template<> struct AssignmentKind { typedef Diagonal2Sparse Kind; }; + +template< typename DstXprType, typename SrcXprType, typename Functor> +struct Assignment +{ + typedef typename DstXprType::StorageIndex StorageIndex; + typedef typename DstXprType::Scalar Scalar; + typedef Array ArrayXI; + typedef Array ArrayXS; + template + static void run(SparseMatrix &dst, const SrcXprType &src, const internal::assign_op &/*func*/) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + Index size = src.diagonal().size(); + dst.makeCompressed(); + dst.resizeNonZeros(size); + Map(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1); + Map(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size)); + Map(dst.valuePtr(), size) = src.diagonal(); + } + + template + static void run(SparseMatrixBase &dst, const SrcXprType &src, const internal::assign_op &/*func*/) + { + dst.diagonal() = src.diagonal(); + } + + static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op &/*func*/) + { dst.diagonal() += src.diagonal(); } + + static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op &/*func*/) + { dst.diagonal() -= src.diagonal(); } +}; +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPARSEASSIGN_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseBlock.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseBlock.h new file mode 100644 index 000000000..511e92b2f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseBlock.h @@ -0,0 +1,603 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_BLOCK_H +#define EIGEN_SPARSE_BLOCK_H + +namespace Eigen { + +// Subset of columns or rows +template +class BlockImpl + : public SparseMatrixBase > +{ + typedef typename internal::remove_all::type _MatrixTypeNested; + typedef Block BlockType; +public: + enum { IsRowMajor = internal::traits::IsRowMajor }; +protected: + enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; + typedef SparseMatrixBase Base; + using Base::convert_index; +public: + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) + + inline BlockImpl(XprType& xpr, Index i) + : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize) + {} + + inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) + : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) + {} + + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + Index nonZeros() const + { + typedef internal::evaluator EvaluatorType; + EvaluatorType matEval(m_matrix); + Index nnz = 0; + Index end = m_outerStart + m_outerSize.value(); + for(Index j=m_outerStart; j::non_const_type m_matrix; + Index m_outerStart; + const internal::variable_if_dynamic m_outerSize; + + protected: + // Disable assignment with clear error message. + // Note that simply removing operator= yields compilation errors with ICC+MSVC + template + BlockImpl& operator=(const T&) + { + EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY); + return *this; + } +}; + + +/*************************************************************************** +* specialization for SparseMatrix +***************************************************************************/ + +namespace internal { + +template +class sparse_matrix_block_impl + : public SparseCompressedBase > +{ + typedef typename internal::remove_all::type _MatrixTypeNested; + typedef Block BlockType; + typedef SparseCompressedBase > Base; + using Base::convert_index; +public: + enum { IsRowMajor = internal::traits::IsRowMajor }; + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) +protected: + typedef typename Base::IndexVector IndexVector; + enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; +public: + + inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i) + : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize) + {} + + inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) + : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) + {} + + template + inline BlockType& operator=(const SparseMatrixBase& other) + { + typedef typename internal::remove_all::type _NestedMatrixType; + _NestedMatrixType& matrix = m_matrix; + // This assignment is slow if this vector set is not empty + // and/or it is not at the end of the nonzeros of the underlying matrix. + + // 1 - eval to a temporary to avoid transposition and/or aliasing issues + Ref > tmp(other.derived()); + eigen_internal_assert(tmp.outerSize()==m_outerSize.value()); + + // 2 - let's check whether there is enough allocated memory + Index nnz = tmp.nonZeros(); + Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block + Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block + Index block_size = end - start; // available room in the current block + Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; + + Index free_size = m_matrix.isCompressed() + ? Index(matrix.data().allocatedSize()) + block_size + : block_size; + + Index tmp_start = tmp.outerIndexPtr()[0]; + + bool update_trailing_pointers = false; + if(nnz>free_size) + { + // realloc manually to reduce copies + typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz); + + internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr()); + internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr()); + + internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start); + internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start); + + internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz); + internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz); + + newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz); + + matrix.data().swap(newdata); + + update_trailing_pointers = true; + } + else + { + if(m_matrix.isCompressed()) + { + // no need to realloc, simply copy the tail at its respective position and insert tmp + matrix.data().resize(start + nnz + tail_size); + + internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz); + internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz); + + update_trailing_pointers = true; + } + + internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start); + internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start); + } + + // update outer index pointers and innerNonZeros + if(IsVectorAtCompileTime) + { + if(!m_matrix.isCompressed()) + matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz); + matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start); + } + else + { + StorageIndex p = StorageIndex(start); + for(Index k=0; k(tmp.innerVector(k).nonZeros()); + if(!m_matrix.isCompressed()) + matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k; + matrix.outerIndexPtr()[m_outerStart+k] = p; + p += nnz_k; + } + } + + if(update_trailing_pointers) + { + StorageIndex offset = internal::convert_index(nnz - block_size); + for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k) + { + matrix.outerIndexPtr()[k] += offset; + } + } + + return derived(); + } + + inline BlockType& operator=(const BlockType& other) + { + return operator=(other); + } + + inline const Scalar* valuePtr() const + { return m_matrix.valuePtr(); } + inline Scalar* valuePtr() + { return m_matrix.valuePtr(); } + + inline const StorageIndex* innerIndexPtr() const + { return m_matrix.innerIndexPtr(); } + inline StorageIndex* innerIndexPtr() + { return m_matrix.innerIndexPtr(); } + + inline const StorageIndex* outerIndexPtr() const + { return m_matrix.outerIndexPtr() + m_outerStart; } + inline StorageIndex* outerIndexPtr() + { return m_matrix.outerIndexPtr() + m_outerStart; } + + inline const StorageIndex* innerNonZeroPtr() const + { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); } + inline StorageIndex* innerNonZeroPtr() + { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); } + + bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; } + + inline Scalar& coeffRef(Index row, Index col) + { + return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); + } + + inline const Scalar coeff(Index row, Index col) const + { + return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); + } + + inline const Scalar coeff(Index index) const + { + return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); + } + + const Scalar& lastCoeff() const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl); + eigen_assert(Base::nonZeros()>0); + if(m_matrix.isCompressed()) + return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; + else + return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; + } + + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + inline const SparseMatrixType& nestedExpression() const { return m_matrix; } + inline SparseMatrixType& nestedExpression() { return m_matrix; } + Index startRow() const { return IsRowMajor ? m_outerStart : 0; } + Index startCol() const { return IsRowMajor ? 0 : m_outerStart; } + Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + protected: + + typename internal::ref_selector::non_const_type m_matrix; + Index m_outerStart; + const internal::variable_if_dynamic m_outerSize; + +}; + +} // namespace internal + +template +class BlockImpl,BlockRows,BlockCols,true,Sparse> + : public internal::sparse_matrix_block_impl,BlockRows,BlockCols> +{ +public: + typedef _StorageIndex StorageIndex; + typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType; + typedef internal::sparse_matrix_block_impl Base; + inline BlockImpl(SparseMatrixType& xpr, Index i) + : Base(xpr, i) + {} + + inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) + : Base(xpr, startRow, startCol, blockRows, blockCols) + {} + + using Base::operator=; +}; + +template +class BlockImpl,BlockRows,BlockCols,true,Sparse> + : public internal::sparse_matrix_block_impl,BlockRows,BlockCols> +{ +public: + typedef _StorageIndex StorageIndex; + typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType; + typedef internal::sparse_matrix_block_impl Base; + inline BlockImpl(SparseMatrixType& xpr, Index i) + : Base(xpr, i) + {} + + inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) + : Base(xpr, startRow, startCol, blockRows, blockCols) + {} + + using Base::operator=; +private: + template BlockImpl(const SparseMatrixBase& xpr, Index i); + template BlockImpl(const SparseMatrixBase& xpr); +}; + +//---------- + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). + */ +template +typename SparseMatrixBase::InnerVectorReturnType SparseMatrixBase::innerVector(Index outer) +{ return InnerVectorReturnType(derived(), outer); } + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). Read-only. + */ +template +const typename SparseMatrixBase::ConstInnerVectorReturnType SparseMatrixBase::innerVector(Index outer) const +{ return ConstInnerVectorReturnType(derived(), outer); } + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). + */ +template +typename SparseMatrixBase::InnerVectorsReturnType +SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) +{ + return Block(derived(), + IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, + IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); + +} + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). Read-only. + */ +template +const typename SparseMatrixBase::ConstInnerVectorsReturnType +SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) const +{ + return Block(derived(), + IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, + IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); + +} + +/** Generic implementation of sparse Block expression. + * Real-only. + */ +template +class BlockImpl + : public SparseMatrixBase >, internal::no_assignment_operator +{ + typedef Block BlockType; + typedef SparseMatrixBase Base; + using Base::convert_index; +public: + enum { IsRowMajor = internal::traits::IsRowMajor }; + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) + + typedef typename internal::remove_all::type _MatrixTypeNested; + + /** Column or Row constructor + */ + inline BlockImpl(XprType& xpr, Index i) + : m_matrix(xpr), + m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0), + m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0), + m_blockRows(BlockRows==1 ? 1 : xpr.rows()), + m_blockCols(BlockCols==1 ? 1 : xpr.cols()) + {} + + /** Dynamic-size constructor + */ + inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) + : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols)) + {} + + inline Index rows() const { return m_blockRows.value(); } + inline Index cols() const { return m_blockCols.value(); } + + inline Scalar& coeffRef(Index row, Index col) + { + return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value()); + } + + inline const Scalar coeff(Index row, Index col) const + { + return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); + } + + inline Scalar& coeffRef(Index index) + { + return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + inline const Scalar coeff(Index index) const + { + return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + inline const XprType& nestedExpression() const { return m_matrix; } + inline XprType& nestedExpression() { return m_matrix; } + Index startRow() const { return m_startRow.value(); } + Index startCol() const { return m_startCol.value(); } + Index blockRows() const { return m_blockRows.value(); } + Index blockCols() const { return m_blockCols.value(); } + + protected: +// friend class internal::GenericSparseBlockInnerIteratorImpl; + friend struct internal::unary_evaluator, internal::IteratorBased, Scalar >; + + Index nonZeros() const { return Dynamic; } + + typename internal::ref_selector::non_const_type m_matrix; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; + + protected: + // Disable assignment with clear error message. + // Note that simply removing operator= yields compilation errors with ICC+MSVC + template + BlockImpl& operator=(const T&) + { + EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY); + return *this; + } + +}; + +namespace internal { + +template +struct unary_evaluator, IteratorBased > + : public evaluator_base > +{ + class InnerVectorInnerIterator; + class OuterVectorInnerIterator; + public: + typedef Block XprType; + typedef typename XprType::StorageIndex StorageIndex; + typedef typename XprType::Scalar Scalar; + + enum { + IsRowMajor = XprType::IsRowMajor, + + OuterVector = (BlockCols==1 && ArgType::IsRowMajor) + | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&". + // revert to || as soon as not needed anymore. + (BlockRows==1 && !ArgType::IsRowMajor), + + CoeffReadCost = evaluator::CoeffReadCost, + Flags = XprType::Flags + }; + + typedef typename internal::conditional::type InnerIterator; + + explicit unary_evaluator(const XprType& op) + : m_argImpl(op.nestedExpression()), m_block(op) + {} + + inline Index nonZerosEstimate() const { + Index nnz = m_block.nonZeros(); + if(nnz<0) + return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size(); + return nnz; + } + + protected: + typedef typename evaluator::InnerIterator EvalIterator; + + evaluator m_argImpl; + const XprType &m_block; +}; + +template +class unary_evaluator, IteratorBased>::InnerVectorInnerIterator + : public EvalIterator +{ + enum { IsRowMajor = unary_evaluator::IsRowMajor }; + const XprType& m_block; + Index m_end; +public: + + EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer) + : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())), + m_block(aEval.m_block), + m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()) + { + while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) ) + EvalIterator::operator++(); + } + + inline StorageIndex index() const { return EvalIterator::index() - convert_index(IsRowMajor ? m_block.startCol() : m_block.startRow()); } + inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); } + inline Index row() const { return EvalIterator::row() - m_block.startRow(); } + inline Index col() const { return EvalIterator::col() - m_block.startCol(); } + + inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; } +}; + +template +class unary_evaluator, IteratorBased>::OuterVectorInnerIterator +{ + enum { IsRowMajor = unary_evaluator::IsRowMajor }; + const unary_evaluator& m_eval; + Index m_outerPos; + const Index m_innerIndex; + Index m_end; + EvalIterator m_it; +public: + + EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer) + : m_eval(aEval), + m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ), + m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()), + m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()), + m_it(m_eval.m_argImpl, m_outerPos) + { + EIGEN_UNUSED_VARIABLE(outer); + eigen_assert(outer==0); + + while(m_it && m_it.index() < m_innerIndex) ++m_it; + if((!m_it) || (m_it.index()!=m_innerIndex)) + ++(*this); + } + + inline StorageIndex index() const { return convert_index(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); } + inline Index outer() const { return 0; } + inline Index row() const { return IsRowMajor ? 0 : index(); } + inline Index col() const { return IsRowMajor ? index() : 0; } + + inline Scalar value() const { return m_it.value(); } + inline Scalar& valueRef() { return m_it.valueRef(); } + + inline OuterVectorInnerIterator& operator++() + { + // search next non-zero entry + while(++m_outerPos +struct unary_evaluator,BlockRows,BlockCols,true>, IteratorBased> + : evaluator,BlockRows,BlockCols,true> > > +{ + typedef Block,BlockRows,BlockCols,true> XprType; + typedef evaluator > Base; + explicit unary_evaluator(const XprType &xpr) : Base(xpr) {} +}; + +template +struct unary_evaluator,BlockRows,BlockCols,true>, IteratorBased> + : evaluator,BlockRows,BlockCols,true> > > +{ + typedef Block,BlockRows,BlockCols,true> XprType; + typedef evaluator > Base; + explicit unary_evaluator(const XprType &xpr) : Base(xpr) {} +}; + +} // end namespace internal + + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_BLOCK_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseColEtree.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseColEtree.h new file mode 100644 index 000000000..ebe02d1ab --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseColEtree.h @@ -0,0 +1,206 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +/* + + * NOTE: This file is the modified version of sp_coletree.c file in SuperLU + + * -- SuperLU routine (version 3.1) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * August 1, 2008 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ +#ifndef SPARSE_COLETREE_H +#define SPARSE_COLETREE_H + +namespace Eigen { + +namespace internal { + +/** Find the root of the tree/set containing the vertex i : Use Path halving */ +template +Index etree_find (Index i, IndexVector& pp) +{ + Index p = pp(i); // Parent + Index gp = pp(p); // Grand parent + while (gp != p) + { + pp(i) = gp; // Parent pointer on find path is changed to former grand parent + i = gp; + p = pp(i); + gp = pp(p); + } + return p; +} + +/** Compute the column elimination tree of a sparse matrix + * \param mat The matrix in column-major format. + * \param parent The elimination tree + * \param firstRowElt The column index of the first element in each row + * \param perm The permutation to apply to the column of \b mat + */ +template +int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0) +{ + typedef typename MatrixType::StorageIndex StorageIndex; + StorageIndex nc = convert_index(mat.cols()); // Number of columns + StorageIndex m = convert_index(mat.rows()); + StorageIndex diagSize = (std::min)(nc,m); + IndexVector root(nc); // root of subtree of etree + root.setZero(); + IndexVector pp(nc); // disjoint sets + pp.setZero(); // Initialize disjoint sets + parent.resize(mat.cols()); + //Compute first nonzero column in each row + firstRowElt.resize(m); + firstRowElt.setConstant(nc); + firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1); + bool found_diag; + for (StorageIndex col = 0; col < nc; col++) + { + StorageIndex pcol = col; + if(perm) pcol = perm[col]; + for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it) + { + Index row = it.row(); + firstRowElt(row) = (std::min)(firstRowElt(row), col); + } + } + /* Compute etree by Liu's algorithm for symmetric matrices, + except use (firstRowElt[r],c) in place of an edge (r,c) of A. + Thus each row clique in A'*A is replaced by a star + centered at its first vertex, which has the same fill. */ + StorageIndex rset, cset, rroot; + for (StorageIndex col = 0; col < nc; col++) + { + found_diag = col>=m; + pp(col) = col; + cset = col; + root(cset) = col; + parent(col) = nc; + /* The diagonal element is treated here even if it does not exist in the matrix + * hence the loop is executed once more */ + StorageIndex pcol = col; + if(perm) pcol = perm[col]; + for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it) + { // A sequence of interleaved find and union is performed + Index i = col; + if(it) i = it.index(); + if (i == col) found_diag = true; + + StorageIndex row = firstRowElt(i); + if (row >= col) continue; + rset = internal::etree_find(row, pp); // Find the name of the set containing row + rroot = root(rset); + if (rroot != col) + { + parent(rroot) = col; + pp(cset) = rset; + cset = rset; + root(cset) = col; + } + } + } + return 0; +} + +/** + * Depth-first search from vertex n. No recursion. + * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France. +*/ +template +void nr_etdfs (typename IndexVector::Scalar n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, typename IndexVector::Scalar postnum) +{ + typedef typename IndexVector::Scalar StorageIndex; + StorageIndex current = n, first, next; + while (postnum != n) + { + // No kid for the current node + first = first_kid(current); + + // no kid for the current node + if (first == -1) + { + // Numbering this node because it has no kid + post(current) = postnum++; + + // looking for the next kid + next = next_kid(current); + while (next == -1) + { + // No more kids : back to the parent node + current = parent(current); + // numbering the parent node + post(current) = postnum++; + + // Get the next kid + next = next_kid(current); + } + // stopping criterion + if (postnum == n+1) return; + + // Updating current node + current = next; + } + else + { + current = first; + } + } +} + + +/** + * \brief Post order a tree + * \param n the number of nodes + * \param parent Input tree + * \param post postordered tree + */ +template +void treePostorder(typename IndexVector::Scalar n, IndexVector& parent, IndexVector& post) +{ + typedef typename IndexVector::Scalar StorageIndex; + IndexVector first_kid, next_kid; // Linked list of children + StorageIndex postnum; + // Allocate storage for working arrays and results + first_kid.resize(n+1); + next_kid.setZero(n+1); + post.setZero(n+1); + + // Set up structure describing children + first_kid.setConstant(-1); + for (StorageIndex v = n-1; v >= 0; v--) + { + StorageIndex dad = parent(v); + next_kid(v) = first_kid(dad); + first_kid(dad) = v; + } + + // Depth-first search from dummy root vertex #n + postnum = 0; + internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // SPARSE_COLETREE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseCompressedBase.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseCompressedBase.h new file mode 100644 index 000000000..5ccb46656 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseCompressedBase.h @@ -0,0 +1,341 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_COMPRESSED_BASE_H +#define EIGEN_SPARSE_COMPRESSED_BASE_H + +namespace Eigen { + +template class SparseCompressedBase; + +namespace internal { + +template +struct traits > : traits +{}; + +} // end namespace internal + +/** \ingroup SparseCore_Module + * \class SparseCompressedBase + * \brief Common base class for sparse [compressed]-{row|column}-storage format. + * + * This class defines the common interface for all derived classes implementing the compressed sparse storage format, such as: + * - SparseMatrix + * - Ref + * - Map + * + */ +template +class SparseCompressedBase + : public SparseMatrixBase +{ + public: + typedef SparseMatrixBase Base; + EIGEN_SPARSE_PUBLIC_INTERFACE(SparseCompressedBase) + using Base::operator=; + using Base::IsRowMajor; + + class InnerIterator; + class ReverseInnerIterator; + + protected: + typedef typename Base::IndexVector IndexVector; + Eigen::Map innerNonZeros() { return Eigen::Map(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); } + const Eigen::Map innerNonZeros() const { return Eigen::Map(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); } + + public: + + /** \returns the number of non zero coefficients */ + inline Index nonZeros() const + { + if(Derived::IsVectorAtCompileTime && outerIndexPtr()==0) + return derived().nonZeros(); + else if(isCompressed()) + return outerIndexPtr()[derived().outerSize()]-outerIndexPtr()[0]; + else if(derived().outerSize()==0) + return 0; + else + return innerNonZeros().sum(); + } + + /** \returns a const pointer to the array of values. + * This function is aimed at interoperability with other libraries. + * \sa innerIndexPtr(), outerIndexPtr() */ + inline const Scalar* valuePtr() const { return derived().valuePtr(); } + /** \returns a non-const pointer to the array of values. + * This function is aimed at interoperability with other libraries. + * \sa innerIndexPtr(), outerIndexPtr() */ + inline Scalar* valuePtr() { return derived().valuePtr(); } + + /** \returns a const pointer to the array of inner indices. + * This function is aimed at interoperability with other libraries. + * \sa valuePtr(), outerIndexPtr() */ + inline const StorageIndex* innerIndexPtr() const { return derived().innerIndexPtr(); } + /** \returns a non-const pointer to the array of inner indices. + * This function is aimed at interoperability with other libraries. + * \sa valuePtr(), outerIndexPtr() */ + inline StorageIndex* innerIndexPtr() { return derived().innerIndexPtr(); } + + /** \returns a const pointer to the array of the starting positions of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \warning it returns the null pointer 0 for SparseVector + * \sa valuePtr(), innerIndexPtr() */ + inline const StorageIndex* outerIndexPtr() const { return derived().outerIndexPtr(); } + /** \returns a non-const pointer to the array of the starting positions of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \warning it returns the null pointer 0 for SparseVector + * \sa valuePtr(), innerIndexPtr() */ + inline StorageIndex* outerIndexPtr() { return derived().outerIndexPtr(); } + + /** \returns a const pointer to the array of the number of non zeros of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \warning it returns the null pointer 0 in compressed mode */ + inline const StorageIndex* innerNonZeroPtr() const { return derived().innerNonZeroPtr(); } + /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \warning it returns the null pointer 0 in compressed mode */ + inline StorageIndex* innerNonZeroPtr() { return derived().innerNonZeroPtr(); } + + /** \returns whether \c *this is in compressed form. */ + inline bool isCompressed() const { return innerNonZeroPtr()==0; } + + /** \returns a read-only view of the stored coefficients as a 1D array expression. + * + * \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise. + * + * \sa valuePtr(), isCompressed() */ + const Map > coeffs() const { eigen_assert(isCompressed()); return Array::Map(valuePtr(),nonZeros()); } + + /** \returns a read-write view of the stored coefficients as a 1D array expression + * + * \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise. + * + * Here is an example: + * \include SparseMatrix_coeffs.cpp + * and the output is: + * \include SparseMatrix_coeffs.out + * + * \sa valuePtr(), isCompressed() */ + Map > coeffs() { eigen_assert(isCompressed()); return Array::Map(valuePtr(),nonZeros()); } + + protected: + /** Default constructor. Do nothing. */ + SparseCompressedBase() {} + private: + template explicit SparseCompressedBase(const SparseCompressedBase&); +}; + +template +class SparseCompressedBase::InnerIterator +{ + public: + InnerIterator() + : m_values(0), m_indices(0), m_outer(0), m_id(0), m_end(0) + {} + + InnerIterator(const InnerIterator& other) + : m_values(other.m_values), m_indices(other.m_indices), m_outer(other.m_outer), m_id(other.m_id), m_end(other.m_end) + {} + + InnerIterator& operator=(const InnerIterator& other) + { + m_values = other.m_values; + m_indices = other.m_indices; + const_cast(m_outer).setValue(other.m_outer.value()); + m_id = other.m_id; + m_end = other.m_end; + return *this; + } + + InnerIterator(const SparseCompressedBase& mat, Index outer) + : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer) + { + if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0) + { + m_id = 0; + m_end = mat.nonZeros(); + } + else + { + m_id = mat.outerIndexPtr()[outer]; + if(mat.isCompressed()) + m_end = mat.outerIndexPtr()[outer+1]; + else + m_end = m_id + mat.innerNonZeroPtr()[outer]; + } + } + + explicit InnerIterator(const SparseCompressedBase& mat) + : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_id(0), m_end(mat.nonZeros()) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + } + + explicit InnerIterator(const internal::CompressedStorage& data) + : m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_id(0), m_end(data.size()) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + } + + inline InnerIterator& operator++() { m_id++; return *this; } + + inline const Scalar& value() const { return m_values[m_id]; } + inline Scalar& valueRef() { return const_cast(m_values[m_id]); } + + inline StorageIndex index() const { return m_indices[m_id]; } + inline Index outer() const { return m_outer.value(); } + inline Index row() const { return IsRowMajor ? m_outer.value() : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer.value(); } + + inline operator bool() const { return (m_id < m_end); } + + protected: + const Scalar* m_values; + const StorageIndex* m_indices; + typedef internal::variable_if_dynamic OuterType; + const OuterType m_outer; + Index m_id; + Index m_end; + private: + // If you get here, then you're not using the right InnerIterator type, e.g.: + // SparseMatrix A; + // SparseMatrix::InnerIterator it(A,0); + template InnerIterator(const SparseMatrixBase&, Index outer); +}; + +template +class SparseCompressedBase::ReverseInnerIterator +{ + public: + ReverseInnerIterator(const SparseCompressedBase& mat, Index outer) + : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer) + { + if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0) + { + m_start = 0; + m_id = mat.nonZeros(); + } + else + { + m_start = mat.outerIndexPtr()[outer]; + if(mat.isCompressed()) + m_id = mat.outerIndexPtr()[outer+1]; + else + m_id = m_start + mat.innerNonZeroPtr()[outer]; + } + } + + explicit ReverseInnerIterator(const SparseCompressedBase& mat) + : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_start(0), m_id(mat.nonZeros()) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + } + + explicit ReverseInnerIterator(const internal::CompressedStorage& data) + : m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_start(0), m_id(data.size()) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + } + + inline ReverseInnerIterator& operator--() { --m_id; return *this; } + + inline const Scalar& value() const { return m_values[m_id-1]; } + inline Scalar& valueRef() { return const_cast(m_values[m_id-1]); } + + inline StorageIndex index() const { return m_indices[m_id-1]; } + inline Index outer() const { return m_outer.value(); } + inline Index row() const { return IsRowMajor ? m_outer.value() : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer.value(); } + + inline operator bool() const { return (m_id > m_start); } + + protected: + const Scalar* m_values; + const StorageIndex* m_indices; + typedef internal::variable_if_dynamic OuterType; + const OuterType m_outer; + Index m_start; + Index m_id; +}; + +namespace internal { + +template +struct evaluator > + : evaluator_base +{ + typedef typename Derived::Scalar Scalar; + typedef typename Derived::InnerIterator InnerIterator; + + enum { + CoeffReadCost = NumTraits::ReadCost, + Flags = Derived::Flags + }; + + evaluator() : m_matrix(0), m_zero(0) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + explicit evaluator(const Derived &mat) : m_matrix(&mat), m_zero(0) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + inline Index nonZerosEstimate() const { + return m_matrix->nonZeros(); + } + + operator Derived&() { return m_matrix->const_cast_derived(); } + operator const Derived&() const { return *m_matrix; } + + typedef typename DenseCoeffsBase::CoeffReturnType CoeffReturnType; + const Scalar& coeff(Index row, Index col) const + { + Index p = find(row,col); + + if(p==Dynamic) + return m_zero; + else + return m_matrix->const_cast_derived().valuePtr()[p]; + } + + Scalar& coeffRef(Index row, Index col) + { + Index p = find(row,col); + eigen_assert(p!=Dynamic && "written coefficient does not exist"); + return m_matrix->const_cast_derived().valuePtr()[p]; + } + +protected: + + Index find(Index row, Index col) const + { + eigen_internal_assert(row>=0 && rowrows() && col>=0 && colcols()); + + const Index outer = Derived::IsRowMajor ? row : col; + const Index inner = Derived::IsRowMajor ? col : row; + + Index start = m_matrix->outerIndexPtr()[outer]; + Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer]; + eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist"); + const Index p = std::lower_bound(m_matrix->innerIndexPtr()+start, m_matrix->innerIndexPtr()+end,inner) - m_matrix->innerIndexPtr(); + + return ((pinnerIndexPtr()[p]==inner)) ? p : Dynamic; + } + + const Derived *m_matrix; + const Scalar m_zero; +}; + +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_COMPRESSED_BASE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h new file mode 100644 index 000000000..e315e3550 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h @@ -0,0 +1,726 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H +#define EIGEN_SPARSE_CWISE_BINARY_OP_H + +namespace Eigen { + +// Here we have to handle 3 cases: +// 1 - sparse op dense +// 2 - dense op sparse +// 3 - sparse op sparse +// We also need to implement a 4th iterator for: +// 4 - dense op dense +// Finally, we also need to distinguish between the product and other operations : +// configuration returned mode +// 1 - sparse op dense product sparse +// generic dense +// 2 - dense op sparse product sparse +// generic dense +// 3 - sparse op sparse product sparse +// generic sparse +// 4 - dense op dense product dense +// generic dense +// +// TODO to ease compiler job, we could specialize product/quotient with a scalar +// and fallback to cwise-unary evaluator using bind1st_op and bind2nd_op. + +template +class CwiseBinaryOpImpl + : public SparseMatrixBase > +{ + public: + typedef CwiseBinaryOp Derived; + typedef SparseMatrixBase Base; + EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) + CwiseBinaryOpImpl() + { + EIGEN_STATIC_ASSERT(( + (!internal::is_same::StorageKind, + typename internal::traits::StorageKind>::value) + || ((internal::evaluator::Flags&RowMajorBit) == (internal::evaluator::Flags&RowMajorBit))), + THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH); + } +}; + +namespace internal { + + +// Generic "sparse OP sparse" +template struct binary_sparse_evaluator; + +template +struct binary_evaluator, IteratorBased, IteratorBased> + : evaluator_base > +{ +protected: + typedef typename evaluator::InnerIterator LhsIterator; + typedef typename evaluator::InnerIterator RhsIterator; + typedef CwiseBinaryOp XprType; + typedef typename traits::Scalar Scalar; + typedef typename XprType::StorageIndex StorageIndex; +public: + + class InnerIterator + { + public: + + EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer) + : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor) + { + this->operator++(); + } + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { + if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index())) + { + m_id = m_lhsIter.index(); + m_value = m_functor(m_lhsIter.value(), m_rhsIter.value()); + ++m_lhsIter; + ++m_rhsIter; + } + else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index()))) + { + m_id = m_lhsIter.index(); + m_value = m_functor(m_lhsIter.value(), Scalar(0)); + ++m_lhsIter; + } + else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index()))) + { + m_id = m_rhsIter.index(); + m_value = m_functor(Scalar(0), m_rhsIter.value()); + ++m_rhsIter; + } + else + { + m_value = 0; // this is to avoid a compilation warning + m_id = -1; + } + return *this; + } + + EIGEN_STRONG_INLINE Scalar value() const { return m_value; } + + EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; } + EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); } + EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); } + EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); } + + EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; } + + protected: + LhsIterator m_lhsIter; + RhsIterator m_rhsIter; + const BinaryOp& m_functor; + Scalar m_value; + StorageIndex m_id; + }; + + + enum { + CoeffReadCost = evaluator::CoeffReadCost + evaluator::CoeffReadCost + functor_traits::Cost, + Flags = XprType::Flags + }; + + explicit binary_evaluator(const XprType& xpr) + : m_functor(xpr.functor()), + m_lhsImpl(xpr.lhs()), + m_rhsImpl(xpr.rhs()) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + inline Index nonZerosEstimate() const { + return m_lhsImpl.nonZerosEstimate() + m_rhsImpl.nonZerosEstimate(); + } + +protected: + const BinaryOp m_functor; + evaluator m_lhsImpl; + evaluator m_rhsImpl; +}; + +// dense op sparse +template +struct binary_evaluator, IndexBased, IteratorBased> + : evaluator_base > +{ +protected: + typedef typename evaluator::InnerIterator RhsIterator; + typedef CwiseBinaryOp XprType; + typedef typename traits::Scalar Scalar; + typedef typename XprType::StorageIndex StorageIndex; +public: + + class InnerIterator + { + enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit }; + public: + + EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer) + : m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.rhs().innerSize()) + { + this->operator++(); + } + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { + ++m_id; + if(m_id &m_lhsEval; + RhsIterator m_rhsIter; + const BinaryOp& m_functor; + Scalar m_value; + StorageIndex m_id; + StorageIndex m_innerSize; + }; + + + enum { + CoeffReadCost = evaluator::CoeffReadCost + evaluator::CoeffReadCost + functor_traits::Cost, + // Expose storage order of the sparse expression + Flags = (XprType::Flags & ~RowMajorBit) | (int(Rhs::Flags)&RowMajorBit) + }; + + explicit binary_evaluator(const XprType& xpr) + : m_functor(xpr.functor()), + m_lhsImpl(xpr.lhs()), + m_rhsImpl(xpr.rhs()), + m_expr(xpr) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + inline Index nonZerosEstimate() const { + return m_expr.size(); + } + +protected: + const BinaryOp m_functor; + evaluator m_lhsImpl; + evaluator m_rhsImpl; + const XprType &m_expr; +}; + +// sparse op dense +template +struct binary_evaluator, IteratorBased, IndexBased> + : evaluator_base > +{ +protected: + typedef typename evaluator::InnerIterator LhsIterator; + typedef CwiseBinaryOp XprType; + typedef typename traits::Scalar Scalar; + typedef typename XprType::StorageIndex StorageIndex; +public: + + class InnerIterator + { + enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit }; + public: + + EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer) + : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.lhs().innerSize()) + { + this->operator++(); + } + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { + ++m_id; + if(m_id &m_rhsEval; + const BinaryOp& m_functor; + Scalar m_value; + StorageIndex m_id; + StorageIndex m_innerSize; + }; + + + enum { + CoeffReadCost = evaluator::CoeffReadCost + evaluator::CoeffReadCost + functor_traits::Cost, + // Expose storage order of the sparse expression + Flags = (XprType::Flags & ~RowMajorBit) | (int(Lhs::Flags)&RowMajorBit) + }; + + explicit binary_evaluator(const XprType& xpr) + : m_functor(xpr.functor()), + m_lhsImpl(xpr.lhs()), + m_rhsImpl(xpr.rhs()), + m_expr(xpr) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + inline Index nonZerosEstimate() const { + return m_expr.size(); + } + +protected: + const BinaryOp m_functor; + evaluator m_lhsImpl; + evaluator m_rhsImpl; + const XprType &m_expr; +}; + +template::Kind, + typename RhsKind = typename evaluator_traits::Kind, + typename LhsScalar = typename traits::Scalar, + typename RhsScalar = typename traits::Scalar> struct sparse_conjunction_evaluator; + +// "sparse .* sparse" +template +struct binary_evaluator, Lhs, Rhs>, IteratorBased, IteratorBased> + : sparse_conjunction_evaluator, Lhs, Rhs> > +{ + typedef CwiseBinaryOp, Lhs, Rhs> XprType; + typedef sparse_conjunction_evaluator Base; + explicit binary_evaluator(const XprType& xpr) : Base(xpr) {} +}; +// "dense .* sparse" +template +struct binary_evaluator, Lhs, Rhs>, IndexBased, IteratorBased> + : sparse_conjunction_evaluator, Lhs, Rhs> > +{ + typedef CwiseBinaryOp, Lhs, Rhs> XprType; + typedef sparse_conjunction_evaluator Base; + explicit binary_evaluator(const XprType& xpr) : Base(xpr) {} +}; +// "sparse .* dense" +template +struct binary_evaluator, Lhs, Rhs>, IteratorBased, IndexBased> + : sparse_conjunction_evaluator, Lhs, Rhs> > +{ + typedef CwiseBinaryOp, Lhs, Rhs> XprType; + typedef sparse_conjunction_evaluator Base; + explicit binary_evaluator(const XprType& xpr) : Base(xpr) {} +}; + +// "sparse ./ dense" +template +struct binary_evaluator, Lhs, Rhs>, IteratorBased, IndexBased> + : sparse_conjunction_evaluator, Lhs, Rhs> > +{ + typedef CwiseBinaryOp, Lhs, Rhs> XprType; + typedef sparse_conjunction_evaluator Base; + explicit binary_evaluator(const XprType& xpr) : Base(xpr) {} +}; + +// "sparse && sparse" +template +struct binary_evaluator, IteratorBased, IteratorBased> + : sparse_conjunction_evaluator > +{ + typedef CwiseBinaryOp XprType; + typedef sparse_conjunction_evaluator Base; + explicit binary_evaluator(const XprType& xpr) : Base(xpr) {} +}; +// "dense && sparse" +template +struct binary_evaluator, IndexBased, IteratorBased> + : sparse_conjunction_evaluator > +{ + typedef CwiseBinaryOp XprType; + typedef sparse_conjunction_evaluator Base; + explicit binary_evaluator(const XprType& xpr) : Base(xpr) {} +}; +// "sparse && dense" +template +struct binary_evaluator, IteratorBased, IndexBased> + : sparse_conjunction_evaluator > +{ + typedef CwiseBinaryOp XprType; + typedef sparse_conjunction_evaluator Base; + explicit binary_evaluator(const XprType& xpr) : Base(xpr) {} +}; + +// "sparse ^ sparse" +template +struct sparse_conjunction_evaluator + : evaluator_base +{ +protected: + typedef typename XprType::Functor BinaryOp; + typedef typename XprType::Lhs LhsArg; + typedef typename XprType::Rhs RhsArg; + typedef typename evaluator::InnerIterator LhsIterator; + typedef typename evaluator::InnerIterator RhsIterator; + typedef typename XprType::StorageIndex StorageIndex; + typedef typename traits::Scalar Scalar; +public: + + class InnerIterator + { + public: + + EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer) + : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor) + { + while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) + { + if (m_lhsIter.index() < m_rhsIter.index()) + ++m_lhsIter; + else + ++m_rhsIter; + } + } + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { + ++m_lhsIter; + ++m_rhsIter; + while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) + { + if (m_lhsIter.index() < m_rhsIter.index()) + ++m_lhsIter; + else + ++m_rhsIter; + } + return *this; + } + + EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); } + + EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); } + EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } + + EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); } + + protected: + LhsIterator m_lhsIter; + RhsIterator m_rhsIter; + const BinaryOp& m_functor; + }; + + + enum { + CoeffReadCost = evaluator::CoeffReadCost + evaluator::CoeffReadCost + functor_traits::Cost, + Flags = XprType::Flags + }; + + explicit sparse_conjunction_evaluator(const XprType& xpr) + : m_functor(xpr.functor()), + m_lhsImpl(xpr.lhs()), + m_rhsImpl(xpr.rhs()) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + inline Index nonZerosEstimate() const { + return (std::min)(m_lhsImpl.nonZerosEstimate(), m_rhsImpl.nonZerosEstimate()); + } + +protected: + const BinaryOp m_functor; + evaluator m_lhsImpl; + evaluator m_rhsImpl; +}; + +// "dense ^ sparse" +template +struct sparse_conjunction_evaluator + : evaluator_base +{ +protected: + typedef typename XprType::Functor BinaryOp; + typedef typename XprType::Lhs LhsArg; + typedef typename XprType::Rhs RhsArg; + typedef evaluator LhsEvaluator; + typedef typename evaluator::InnerIterator RhsIterator; + typedef typename XprType::StorageIndex StorageIndex; + typedef typename traits::Scalar Scalar; +public: + + class InnerIterator + { + enum { IsRowMajor = (int(RhsArg::Flags)&RowMajorBit)==RowMajorBit }; + + public: + + EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer) + : m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_outer(outer) + {} + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { + ++m_rhsIter; + return *this; + } + + EIGEN_STRONG_INLINE Scalar value() const + { return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); } + + EIGEN_STRONG_INLINE StorageIndex index() const { return m_rhsIter.index(); } + EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); } + EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); } + + EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; } + + protected: + const LhsEvaluator &m_lhsEval; + RhsIterator m_rhsIter; + const BinaryOp& m_functor; + const Index m_outer; + }; + + + enum { + CoeffReadCost = evaluator::CoeffReadCost + evaluator::CoeffReadCost + functor_traits::Cost, + // Expose storage order of the sparse expression + Flags = (XprType::Flags & ~RowMajorBit) | (int(RhsArg::Flags)&RowMajorBit) + }; + + explicit sparse_conjunction_evaluator(const XprType& xpr) + : m_functor(xpr.functor()), + m_lhsImpl(xpr.lhs()), + m_rhsImpl(xpr.rhs()) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + inline Index nonZerosEstimate() const { + return m_rhsImpl.nonZerosEstimate(); + } + +protected: + const BinaryOp m_functor; + evaluator m_lhsImpl; + evaluator m_rhsImpl; +}; + +// "sparse ^ dense" +template +struct sparse_conjunction_evaluator + : evaluator_base +{ +protected: + typedef typename XprType::Functor BinaryOp; + typedef typename XprType::Lhs LhsArg; + typedef typename XprType::Rhs RhsArg; + typedef typename evaluator::InnerIterator LhsIterator; + typedef evaluator RhsEvaluator; + typedef typename XprType::StorageIndex StorageIndex; + typedef typename traits::Scalar Scalar; +public: + + class InnerIterator + { + enum { IsRowMajor = (int(LhsArg::Flags)&RowMajorBit)==RowMajorBit }; + + public: + + EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer) + : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_outer(outer) + {} + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { + ++m_lhsIter; + return *this; + } + + EIGEN_STRONG_INLINE Scalar value() const + { return m_functor(m_lhsIter.value(), + m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); } + + EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); } + EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } + + EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; } + + protected: + LhsIterator m_lhsIter; + const evaluator &m_rhsEval; + const BinaryOp& m_functor; + const Index m_outer; + }; + + + enum { + CoeffReadCost = evaluator::CoeffReadCost + evaluator::CoeffReadCost + functor_traits::Cost, + // Expose storage order of the sparse expression + Flags = (XprType::Flags & ~RowMajorBit) | (int(LhsArg::Flags)&RowMajorBit) + }; + + explicit sparse_conjunction_evaluator(const XprType& xpr) + : m_functor(xpr.functor()), + m_lhsImpl(xpr.lhs()), + m_rhsImpl(xpr.rhs()) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + inline Index nonZerosEstimate() const { + return m_lhsImpl.nonZerosEstimate(); + } + +protected: + const BinaryOp m_functor; + evaluator m_lhsImpl; + evaluator m_rhsImpl; +}; + +} + +/*************************************************************************** +* Implementation of SparseMatrixBase and SparseCwise functions/operators +***************************************************************************/ + +template +template +Derived& SparseMatrixBase::operator+=(const EigenBase &other) +{ + call_assignment(derived(), other.derived(), internal::add_assign_op()); + return derived(); +} + +template +template +Derived& SparseMatrixBase::operator-=(const EigenBase &other) +{ + call_assignment(derived(), other.derived(), internal::assign_op()); + return derived(); +} + +template +template +EIGEN_STRONG_INLINE Derived & +SparseMatrixBase::operator-=(const SparseMatrixBase &other) +{ + return derived() = derived() - other.derived(); +} + +template +template +EIGEN_STRONG_INLINE Derived & +SparseMatrixBase::operator+=(const SparseMatrixBase& other) +{ + return derived() = derived() + other.derived(); +} + +template +template +Derived& SparseMatrixBase::operator+=(const DiagonalBase& other) +{ + call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op()); + return derived(); +} + +template +template +Derived& SparseMatrixBase::operator-=(const DiagonalBase& other) +{ + call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op()); + return derived(); +} + +template +template +EIGEN_STRONG_INLINE const typename SparseMatrixBase::template CwiseProductDenseReturnType::Type +SparseMatrixBase::cwiseProduct(const MatrixBase &other) const +{ + return typename CwiseProductDenseReturnType::Type(derived(), other.derived()); +} + +template +EIGEN_STRONG_INLINE const CwiseBinaryOp, const DenseDerived, const SparseDerived> +operator+(const MatrixBase &a, const SparseMatrixBase &b) +{ + return CwiseBinaryOp, const DenseDerived, const SparseDerived>(a.derived(), b.derived()); +} + +template +EIGEN_STRONG_INLINE const CwiseBinaryOp, const SparseDerived, const DenseDerived> +operator+(const SparseMatrixBase &a, const MatrixBase &b) +{ + return CwiseBinaryOp, const SparseDerived, const DenseDerived>(a.derived(), b.derived()); +} + +template +EIGEN_STRONG_INLINE const CwiseBinaryOp, const DenseDerived, const SparseDerived> +operator-(const MatrixBase &a, const SparseMatrixBase &b) +{ + return CwiseBinaryOp, const DenseDerived, const SparseDerived>(a.derived(), b.derived()); +} + +template +EIGEN_STRONG_INLINE const CwiseBinaryOp, const SparseDerived, const DenseDerived> +operator-(const SparseMatrixBase &a, const MatrixBase &b) +{ + return CwiseBinaryOp, const SparseDerived, const DenseDerived>(a.derived(), b.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h new file mode 100644 index 000000000..ea7973790 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h @@ -0,0 +1,148 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H +#define EIGEN_SPARSE_CWISE_UNARY_OP_H + +namespace Eigen { + +namespace internal { + +template +struct unary_evaluator, IteratorBased> + : public evaluator_base > +{ + public: + typedef CwiseUnaryOp XprType; + + class InnerIterator; + + enum { + CoeffReadCost = evaluator::CoeffReadCost + functor_traits::Cost, + Flags = XprType::Flags + }; + + explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + inline Index nonZerosEstimate() const { + return m_argImpl.nonZerosEstimate(); + } + + protected: + typedef typename evaluator::InnerIterator EvalIterator; + + const UnaryOp m_functor; + evaluator m_argImpl; +}; + +template +class unary_evaluator, IteratorBased>::InnerIterator + : public unary_evaluator, IteratorBased>::EvalIterator +{ + typedef typename XprType::Scalar Scalar; + typedef typename unary_evaluator, IteratorBased>::EvalIterator Base; + public: + + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) + : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor) + {} + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { Base::operator++(); return *this; } + + EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); } + + protected: + const UnaryOp m_functor; + private: + Scalar& valueRef(); +}; + +template +struct unary_evaluator, IteratorBased> + : public evaluator_base > +{ + public: + typedef CwiseUnaryView XprType; + + class InnerIterator; + + enum { + CoeffReadCost = evaluator::CoeffReadCost + functor_traits::Cost, + Flags = XprType::Flags + }; + + explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + protected: + typedef typename evaluator::InnerIterator EvalIterator; + + const ViewOp m_functor; + evaluator m_argImpl; +}; + +template +class unary_evaluator, IteratorBased>::InnerIterator + : public unary_evaluator, IteratorBased>::EvalIterator +{ + typedef typename XprType::Scalar Scalar; + typedef typename unary_evaluator, IteratorBased>::EvalIterator Base; + public: + + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) + : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor) + {} + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { Base::operator++(); return *this; } + + EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); } + EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(Base::valueRef()); } + + protected: + const ViewOp m_functor; +}; + +} // end namespace internal + +template +EIGEN_STRONG_INLINE Derived& +SparseMatrixBase::operator*=(const Scalar& other) +{ + typedef typename internal::evaluator::InnerIterator EvalIterator; + internal::evaluator thisEval(derived()); + for (Index j=0; j +EIGEN_STRONG_INLINE Derived& +SparseMatrixBase::operator/=(const Scalar& other) +{ + typedef typename internal::evaluator::InnerIterator EvalIterator; + internal::evaluator thisEval(derived()); + for (Index j=0; j +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEDENSEPRODUCT_H +#define EIGEN_SPARSEDENSEPRODUCT_H + +namespace Eigen { + +namespace internal { + +template <> struct product_promote_storage_type { typedef Sparse ret; }; +template <> struct product_promote_storage_type { typedef Sparse ret; }; + +template +struct sparse_time_dense_product_impl; + +template +struct sparse_time_dense_product_impl +{ + typedef typename internal::remove_all::type Lhs; + typedef typename internal::remove_all::type Rhs; + typedef typename internal::remove_all::type Res; + typedef typename evaluator::InnerIterator LhsInnerIterator; + typedef evaluator LhsEval; + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) + { + LhsEval lhsEval(lhs); + + Index n = lhs.outerSize(); +#ifdef EIGEN_HAS_OPENMP + Eigen::initParallel(); + Index threads = Eigen::nbThreads(); +#endif + + for(Index c=0; c1 && lhsEval.nonZerosEstimate() > 20000) + { + #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads) + for(Index i=0; i let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators +// template +// struct ScalarBinaryOpTraits > +// { +// enum { +// Defined = 1 +// }; +// typedef typename CwiseUnaryOp, T2>::PlainObject ReturnType; +// }; + +template +struct sparse_time_dense_product_impl +{ + typedef typename internal::remove_all::type Lhs; + typedef typename internal::remove_all::type Rhs; + typedef typename internal::remove_all::type Res; + typedef typename evaluator::InnerIterator LhsInnerIterator; + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) + { + evaluator lhsEval(lhs); + for(Index c=0; c::ReturnType rhs_j(alpha * rhs.coeff(j,c)); + for(LhsInnerIterator it(lhsEval,j); it ;++it) + res.coeffRef(it.index(),c) += it.value() * rhs_j; + } + } + } +}; + +template +struct sparse_time_dense_product_impl +{ + typedef typename internal::remove_all::type Lhs; + typedef typename internal::remove_all::type Rhs; + typedef typename internal::remove_all::type Res; + typedef typename evaluator::InnerIterator LhsInnerIterator; + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) + { + evaluator lhsEval(lhs); + for(Index j=0; j +struct sparse_time_dense_product_impl +{ + typedef typename internal::remove_all::type Lhs; + typedef typename internal::remove_all::type Rhs; + typedef typename internal::remove_all::type Res; + typedef typename evaluator::InnerIterator LhsInnerIterator; + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) + { + evaluator lhsEval(lhs); + for(Index j=0; j +inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) +{ + sparse_time_dense_product_impl::run(lhs, rhs, res, alpha); +} + +} // end namespace internal + +namespace internal { + +template +struct generic_product_impl + : generic_product_impl_base > +{ + typedef typename Product::Scalar Scalar; + + template + static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) + { + typedef typename nested_eval::type LhsNested; + typedef typename nested_eval::type RhsNested; + LhsNested lhsNested(lhs); + RhsNested rhsNested(rhs); + internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha); + } +}; + +template +struct generic_product_impl + : generic_product_impl +{}; + +template +struct generic_product_impl + : generic_product_impl_base > +{ + typedef typename Product::Scalar Scalar; + + template + static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) + { + typedef typename nested_eval::type LhsNested; + typedef typename nested_eval::type RhsNested; + LhsNested lhsNested(lhs); + RhsNested rhsNested(rhs); + + // transpose everything + Transpose dstT(dst); + internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha); + } +}; + +template +struct generic_product_impl + : generic_product_impl +{}; + +template +struct sparse_dense_outer_product_evaluator +{ +protected: + typedef typename conditional::type Lhs1; + typedef typename conditional::type ActualRhs; + typedef Product ProdXprType; + + // if the actual left-hand side is a dense vector, + // then build a sparse-view so that we can seamlessly iterate over it. + typedef typename conditional::StorageKind,Sparse>::value, + Lhs1, SparseView >::type ActualLhs; + typedef typename conditional::StorageKind,Sparse>::value, + Lhs1 const&, SparseView >::type LhsArg; + + typedef evaluator LhsEval; + typedef evaluator RhsEval; + typedef typename evaluator::InnerIterator LhsIterator; + typedef typename ProdXprType::Scalar Scalar; + +public: + enum { + Flags = NeedToTranspose ? RowMajorBit : 0, + CoeffReadCost = HugeCost + }; + + class InnerIterator : public LhsIterator + { + public: + InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer) + : LhsIterator(xprEval.m_lhsXprImpl, 0), + m_outer(outer), + m_empty(false), + m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits::StorageKind() )) + {} + + EIGEN_STRONG_INLINE Index outer() const { return m_outer; } + EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } + EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } + + EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; } + EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); } + + protected: + Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const + { + return rhs.coeff(outer); + } + + Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse()) + { + typename RhsEval::InnerIterator it(rhs, outer); + if (it && it.index()==0 && it.value()!=Scalar(0)) + return it.value(); + m_empty = true; + return Scalar(0); + } + + Index m_outer; + bool m_empty; + Scalar m_factor; + }; + + sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs) + : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + // transpose case + sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs) + : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + +protected: + const LhsArg m_lhs; + evaluator m_lhsXprImpl; + evaluator m_rhsXprImpl; +}; + +// sparse * dense outer product +template +struct product_evaluator, OuterProduct, SparseShape, DenseShape> + : sparse_dense_outer_product_evaluator +{ + typedef sparse_dense_outer_product_evaluator Base; + + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + + explicit product_evaluator(const XprType& xpr) + : Base(xpr.lhs(), xpr.rhs()) + {} + +}; + +template +struct product_evaluator, OuterProduct, DenseShape, SparseShape> + : sparse_dense_outer_product_evaluator +{ + typedef sparse_dense_outer_product_evaluator Base; + + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + + explicit product_evaluator(const XprType& xpr) + : Base(xpr.lhs(), xpr.rhs()) + {} + +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPARSEDENSEPRODUCT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h new file mode 100644 index 000000000..941c03be3 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h @@ -0,0 +1,138 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H +#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H + +namespace Eigen { + +// The product of a diagonal matrix with a sparse matrix can be easily +// implemented using expression template. +// We have two consider very different cases: +// 1 - diag * row-major sparse +// => each inner vector <=> scalar * sparse vector product +// => so we can reuse CwiseUnaryOp::InnerIterator +// 2 - diag * col-major sparse +// => each inner vector <=> densevector * sparse vector cwise product +// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator +// for that particular case +// The two other cases are symmetric. + +namespace internal { + +enum { + SDP_AsScalarProduct, + SDP_AsCwiseProduct +}; + +template +struct sparse_diagonal_product_evaluator; + +template +struct product_evaluator, ProductTag, DiagonalShape, SparseShape> + : public sparse_diagonal_product_evaluator +{ + typedef Product XprType; + enum { CoeffReadCost = HugeCost, Flags = Rhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags + + typedef sparse_diagonal_product_evaluator Base; + explicit product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) {} +}; + +template +struct product_evaluator, ProductTag, SparseShape, DiagonalShape> + : public sparse_diagonal_product_evaluator, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct> +{ + typedef Product XprType; + enum { CoeffReadCost = HugeCost, Flags = Lhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags + + typedef sparse_diagonal_product_evaluator, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct> Base; + explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal().transpose()) {} +}; + +template +struct sparse_diagonal_product_evaluator +{ +protected: + typedef typename evaluator::InnerIterator SparseXprInnerIterator; + typedef typename SparseXprType::Scalar Scalar; + +public: + class InnerIterator : public SparseXprInnerIterator + { + public: + InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer) + : SparseXprInnerIterator(xprEval.m_sparseXprImpl, outer), + m_coeff(xprEval.m_diagCoeffImpl.coeff(outer)) + {} + + EIGEN_STRONG_INLINE Scalar value() const { return m_coeff * SparseXprInnerIterator::value(); } + protected: + typename DiagonalCoeffType::Scalar m_coeff; + }; + + sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagonalCoeffType &diagCoeff) + : m_sparseXprImpl(sparseXpr), m_diagCoeffImpl(diagCoeff) + {} + + Index nonZerosEstimate() const { return m_sparseXprImpl.nonZerosEstimate(); } + +protected: + evaluator m_sparseXprImpl; + evaluator m_diagCoeffImpl; +}; + + +template +struct sparse_diagonal_product_evaluator +{ + typedef typename SparseXprType::Scalar Scalar; + typedef typename SparseXprType::StorageIndex StorageIndex; + + typedef typename nested_eval::type DiagCoeffNested; + + class InnerIterator + { + typedef typename evaluator::InnerIterator SparseXprIter; + public: + InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer) + : m_sparseIter(xprEval.m_sparseXprEval, outer), m_diagCoeffNested(xprEval.m_diagCoeffNested) + {} + + inline Scalar value() const { return m_sparseIter.value() * m_diagCoeffNested.coeff(index()); } + inline StorageIndex index() const { return m_sparseIter.index(); } + inline Index outer() const { return m_sparseIter.outer(); } + inline Index col() const { return SparseXprType::IsRowMajor ? m_sparseIter.index() : m_sparseIter.outer(); } + inline Index row() const { return SparseXprType::IsRowMajor ? m_sparseIter.outer() : m_sparseIter.index(); } + + EIGEN_STRONG_INLINE InnerIterator& operator++() { ++m_sparseIter; return *this; } + inline operator bool() const { return m_sparseIter; } + + protected: + SparseXprIter m_sparseIter; + DiagCoeffNested m_diagCoeffNested; + }; + + sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagCoeffType &diagCoeff) + : m_sparseXprEval(sparseXpr), m_diagCoeffNested(diagCoeff) + {} + + Index nonZerosEstimate() const { return m_sparseXprEval.nonZerosEstimate(); } + +protected: + evaluator m_sparseXprEval; + DiagCoeffNested m_diagCoeffNested; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseDot.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseDot.h new file mode 100644 index 000000000..38bc4aa9e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseDot.h @@ -0,0 +1,98 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_DOT_H +#define EIGEN_SPARSE_DOT_H + +namespace Eigen { + +template +template +typename internal::traits::Scalar +SparseMatrixBase::dot(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + eigen_assert(size() == other.size()); + eigen_assert(other.size()>0 && "you are using a non initialized vector"); + + internal::evaluator thisEval(derived()); + typename internal::evaluator::InnerIterator i(thisEval, 0); + Scalar res(0); + while (i) + { + res += numext::conj(i.value()) * other.coeff(i.index()); + ++i; + } + return res; +} + +template +template +typename internal::traits::Scalar +SparseMatrixBase::dot(const SparseMatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + eigen_assert(size() == other.size()); + + internal::evaluator thisEval(derived()); + typename internal::evaluator::InnerIterator i(thisEval, 0); + + internal::evaluator otherEval(other.derived()); + typename internal::evaluator::InnerIterator j(otherEval, 0); + + Scalar res(0); + while (i && j) + { + if (i.index()==j.index()) + { + res += numext::conj(i.value()) * j.value(); + ++i; ++j; + } + else if (i.index() +inline typename NumTraits::Scalar>::Real +SparseMatrixBase::squaredNorm() const +{ + return numext::real((*this).cwiseAbs2().sum()); +} + +template +inline typename NumTraits::Scalar>::Real +SparseMatrixBase::norm() const +{ + using std::sqrt; + return sqrt(squaredNorm()); +} + +template +inline typename NumTraits::Scalar>::Real +SparseMatrixBase::blueNorm() const +{ + return internal::blueNorm_impl(*this); +} +} // end namespace Eigen + +#endif // EIGEN_SPARSE_DOT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseFuzzy.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseFuzzy.h new file mode 100644 index 000000000..7d47eb94d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseFuzzy.h @@ -0,0 +1,29 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_FUZZY_H +#define EIGEN_SPARSE_FUZZY_H + +namespace Eigen { + +template +template +bool SparseMatrixBase::isApprox(const SparseMatrixBase& other, const RealScalar &prec) const +{ + const typename internal::nested_eval::type actualA(derived()); + typename internal::conditional::type, + const PlainObject>::type actualB(other.derived()); + + return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm()); +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_FUZZY_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseMap.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseMap.h new file mode 100644 index 000000000..f99be3379 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseMap.h @@ -0,0 +1,305 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_MAP_H +#define EIGEN_SPARSE_MAP_H + +namespace Eigen { + +namespace internal { + +template +struct traits, Options, StrideType> > + : public traits > +{ + typedef SparseMatrix PlainObjectType; + typedef traits TraitsBase; + enum { + Flags = TraitsBase::Flags & (~NestByRefBit) + }; +}; + +template +struct traits, Options, StrideType> > + : public traits > +{ + typedef SparseMatrix PlainObjectType; + typedef traits TraitsBase; + enum { + Flags = TraitsBase::Flags & (~ (NestByRefBit | LvalueBit)) + }; +}; + +} // end namespace internal + +template::has_write_access ? WriteAccessors : ReadOnlyAccessors +> class SparseMapBase; + +/** \ingroup SparseCore_Module + * class SparseMapBase + * \brief Common base class for Map and Ref instance of sparse matrix and vector. + */ +template +class SparseMapBase + : public SparseCompressedBase +{ + public: + typedef SparseCompressedBase Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::StorageIndex StorageIndex; + enum { IsRowMajor = Base::IsRowMajor }; + using Base::operator=; + protected: + + typedef typename internal::conditional< + bool(internal::is_lvalue::value), + Scalar *, const Scalar *>::type ScalarPointer; + typedef typename internal::conditional< + bool(internal::is_lvalue::value), + StorageIndex *, const StorageIndex *>::type IndexPointer; + + Index m_outerSize; + Index m_innerSize; + Array m_zero_nnz; + IndexPointer m_outerIndex; + IndexPointer m_innerIndices; + ScalarPointer m_values; + IndexPointer m_innerNonZeros; + + public: + + /** \copydoc SparseMatrixBase::rows() */ + inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + /** \copydoc SparseMatrixBase::cols() */ + inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + /** \copydoc SparseMatrixBase::innerSize() */ + inline Index innerSize() const { return m_innerSize; } + /** \copydoc SparseMatrixBase::outerSize() */ + inline Index outerSize() const { return m_outerSize; } + /** \copydoc SparseCompressedBase::nonZeros */ + inline Index nonZeros() const { return m_zero_nnz[1]; } + + /** \copydoc SparseCompressedBase::isCompressed */ + bool isCompressed() const { return m_innerNonZeros==0; } + + //---------------------------------------- + // direct access interface + /** \copydoc SparseMatrix::valuePtr */ + inline const Scalar* valuePtr() const { return m_values; } + /** \copydoc SparseMatrix::innerIndexPtr */ + inline const StorageIndex* innerIndexPtr() const { return m_innerIndices; } + /** \copydoc SparseMatrix::outerIndexPtr */ + inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; } + /** \copydoc SparseMatrix::innerNonZeroPtr */ + inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; } + //---------------------------------------- + + /** \copydoc SparseMatrix::coeff */ + inline Scalar coeff(Index row, Index col) const + { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + Index start = m_outerIndex[outer]; + Index end = isCompressed() ? m_outerIndex[outer+1] : start + m_innerNonZeros[outer]; + if (start==end) + return Scalar(0); + else if (end>0 && inner==m_innerIndices[end-1]) + return m_values[end-1]; + // ^^ optimization: let's first check if it is the last coefficient + // (very common in high level algorithms) + + const StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner); + const Index id = r-&m_innerIndices[0]; + return ((*r==inner) && (id(nnz)), m_outerIndex(outerIndexPtr), + m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(innerNonZerosPtr) + {} + + // for vectors + inline SparseMapBase(Index size, Index nnz, IndexPointer innerIndexPtr, ScalarPointer valuePtr) + : m_outerSize(1), m_innerSize(size), m_zero_nnz(0,internal::convert_index(nnz)), m_outerIndex(m_zero_nnz.data()), + m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(0) + {} + + /** Empty destructor */ + inline ~SparseMapBase() {} + + protected: + inline SparseMapBase() {} +}; + +/** \ingroup SparseCore_Module + * class SparseMapBase + * \brief Common base class for writable Map and Ref instance of sparse matrix and vector. + */ +template +class SparseMapBase + : public SparseMapBase +{ + typedef MapBase ReadOnlyMapBase; + + public: + typedef SparseMapBase Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::StorageIndex StorageIndex; + enum { IsRowMajor = Base::IsRowMajor }; + + using Base::operator=; + + public: + + //---------------------------------------- + // direct access interface + using Base::valuePtr; + using Base::innerIndexPtr; + using Base::outerIndexPtr; + using Base::innerNonZeroPtr; + /** \copydoc SparseMatrix::valuePtr */ + inline Scalar* valuePtr() { return Base::m_values; } + /** \copydoc SparseMatrix::innerIndexPtr */ + inline StorageIndex* innerIndexPtr() { return Base::m_innerIndices; } + /** \copydoc SparseMatrix::outerIndexPtr */ + inline StorageIndex* outerIndexPtr() { return Base::m_outerIndex; } + /** \copydoc SparseMatrix::innerNonZeroPtr */ + inline StorageIndex* innerNonZeroPtr() { return Base::m_innerNonZeros; } + //---------------------------------------- + + /** \copydoc SparseMatrix::coeffRef */ + inline Scalar& coeffRef(Index row, Index col) + { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + Index start = Base::m_outerIndex[outer]; + Index end = Base::isCompressed() ? Base::m_outerIndex[outer+1] : start + Base::m_innerNonZeros[outer]; + eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); + eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient"); + StorageIndex* r = std::lower_bound(&Base::m_innerIndices[start],&Base::m_innerIndices[end],inner); + const Index id = r - &Base::m_innerIndices[0]; + eigen_assert((*r==inner) && (id(Base::m_values)[id]; + } + + inline SparseMapBase(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, + Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0) + : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr) + {} + + // for vectors + inline SparseMapBase(Index size, Index nnz, StorageIndex* innerIndexPtr, Scalar* valuePtr) + : Base(size, nnz, innerIndexPtr, valuePtr) + {} + + /** Empty destructor */ + inline ~SparseMapBase() {} + + protected: + inline SparseMapBase() {} +}; + +/** \ingroup SparseCore_Module + * + * \brief Specialization of class Map for SparseMatrix-like storage. + * + * \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix. + * + * \sa class Map, class SparseMatrix, class Ref + */ +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +class Map, Options, StrideType> + : public SparseMapBase, Options, StrideType> > +#else +template +class Map + : public SparseMapBase +#endif +{ + public: + typedef SparseMapBase Base; + EIGEN_SPARSE_PUBLIC_INTERFACE(Map) + enum { IsRowMajor = Base::IsRowMajor }; + + public: + + /** Constructs a read-write Map to a sparse matrix of size \a rows x \a cols, containing \a nnz non-zero coefficients, + * stored as a sparse format as defined by the pointers \a outerIndexPtr, \a innerIndexPtr, and \a valuePtr. + * If the optional parameter \a innerNonZerosPtr is the null pointer, then a standard compressed format is assumed. + * + * This constructor is available only if \c SparseMatrixType is non-const. + * + * More details on the expected storage schemes are given in the \ref TutorialSparse "manual pages". + */ + inline Map(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, + StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0) + : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr) + {} +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** Empty destructor */ + inline ~Map() {} +}; + +template +class Map, Options, StrideType> + : public SparseMapBase, Options, StrideType> > +{ + public: + typedef SparseMapBase Base; + EIGEN_SPARSE_PUBLIC_INTERFACE(Map) + enum { IsRowMajor = Base::IsRowMajor }; + + public: +#endif + /** This is the const version of the above constructor. + * + * This constructor is available only if \c SparseMatrixType is const, e.g.: + * \code Map > \endcode + */ + inline Map(Index rows, Index cols, Index nnz, const StorageIndex* outerIndexPtr, + const StorageIndex* innerIndexPtr, const Scalar* valuePtr, const StorageIndex* innerNonZerosPtr = 0) + : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr) + {} + + /** Empty destructor */ + inline ~Map() {} +}; + +namespace internal { + +template +struct evaluator, Options, StrideType> > + : evaluator, Options, StrideType> > > +{ + typedef evaluator, Options, StrideType> > > Base; + typedef Map, Options, StrideType> XprType; + evaluator() : Base() {} + explicit evaluator(const XprType &mat) : Base(mat) {} +}; + +template +struct evaluator, Options, StrideType> > + : evaluator, Options, StrideType> > > +{ + typedef evaluator, Options, StrideType> > > Base; + typedef Map, Options, StrideType> XprType; + evaluator() : Base() {} + explicit evaluator(const XprType &mat) : Base(mat) {} +}; + +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_MAP_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseMatrix.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseMatrix.h new file mode 100644 index 000000000..0a2490bcc --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseMatrix.h @@ -0,0 +1,1403 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEMATRIX_H +#define EIGEN_SPARSEMATRIX_H + +namespace Eigen { + +/** \ingroup SparseCore_Module + * + * \class SparseMatrix + * + * \brief A versatible sparse matrix representation + * + * This class implements a more versatile variants of the common \em compressed row/column storage format. + * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index. + * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra + * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero + * can be done with limited memory reallocation and copies. + * + * A call to the function makeCompressed() turns the matrix into the standard \em compressed format + * compatible with many library. + * + * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages". + * + * \tparam _Scalar the scalar type, i.e. the type of the coefficients + * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility + * is ColMajor or RowMajor. The default is 0 which means column-major. + * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int. + * + * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int), + * whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index. + * Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN. + */ + +namespace internal { +template +struct traits > +{ + typedef _Scalar Scalar; + typedef _StorageIndex StorageIndex; + typedef Sparse StorageKind; + typedef MatrixXpr XprKind; + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic, + Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit, + SupportedAccessPatterns = InnerRandomAccessPattern + }; +}; + +template +struct traits, DiagIndex> > +{ + typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType; + typedef typename ref_selector::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; + + typedef _Scalar Scalar; + typedef Dense StorageKind; + typedef _StorageIndex StorageIndex; + typedef MatrixXpr XprKind; + + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = 1, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = 1, + Flags = LvalueBit + }; +}; + +template +struct traits, DiagIndex> > + : public traits, DiagIndex> > +{ + enum { + Flags = 0 + }; +}; + +} // end namespace internal + +template +class SparseMatrix + : public SparseCompressedBase > +{ + typedef SparseCompressedBase Base; + using Base::convert_index; + friend class SparseVector<_Scalar,0,_StorageIndex>; + public: + using Base::isCompressed; + using Base::nonZeros; + EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix) + using Base::operator+=; + using Base::operator-=; + + typedef MappedSparseMatrix Map; + typedef Diagonal DiagonalReturnType; + typedef Diagonal ConstDiagonalReturnType; + typedef typename Base::InnerIterator InnerIterator; + typedef typename Base::ReverseInnerIterator ReverseInnerIterator; + + + using Base::IsRowMajor; + typedef internal::CompressedStorage Storage; + enum { + Options = _Options + }; + + typedef typename Base::IndexVector IndexVector; + typedef typename Base::ScalarVector ScalarVector; + protected: + typedef SparseMatrix TransposedSparseMatrix; + + Index m_outerSize; + Index m_innerSize; + StorageIndex* m_outerIndex; + StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed + Storage m_data; + + public: + + /** \returns the number of rows of the matrix */ + inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + /** \returns the number of columns of the matrix */ + inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + + /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */ + inline Index innerSize() const { return m_innerSize; } + /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */ + inline Index outerSize() const { return m_outerSize; } + + /** \returns a const pointer to the array of values. + * This function is aimed at interoperability with other libraries. + * \sa innerIndexPtr(), outerIndexPtr() */ + inline const Scalar* valuePtr() const { return m_data.valuePtr(); } + /** \returns a non-const pointer to the array of values. + * This function is aimed at interoperability with other libraries. + * \sa innerIndexPtr(), outerIndexPtr() */ + inline Scalar* valuePtr() { return m_data.valuePtr(); } + + /** \returns a const pointer to the array of inner indices. + * This function is aimed at interoperability with other libraries. + * \sa valuePtr(), outerIndexPtr() */ + inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); } + /** \returns a non-const pointer to the array of inner indices. + * This function is aimed at interoperability with other libraries. + * \sa valuePtr(), outerIndexPtr() */ + inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); } + + /** \returns a const pointer to the array of the starting positions of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \sa valuePtr(), innerIndexPtr() */ + inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; } + /** \returns a non-const pointer to the array of the starting positions of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \sa valuePtr(), innerIndexPtr() */ + inline StorageIndex* outerIndexPtr() { return m_outerIndex; } + + /** \returns a const pointer to the array of the number of non zeros of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \warning it returns the null pointer 0 in compressed mode */ + inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; } + /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \warning it returns the null pointer 0 in compressed mode */ + inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; } + + /** \internal */ + inline Storage& data() { return m_data; } + /** \internal */ + inline const Storage& data() const { return m_data; } + + /** \returns the value of the matrix at position \a i, \a j + * This function returns Scalar(0) if the element is an explicit \em zero */ + inline Scalar coeff(Index row, Index col) const + { + eigen_assert(row>=0 && row=0 && col=0 && row=0 && col=start && "you probably called coeffRef on a non finalized matrix"); + if(end<=start) + return insert(row,col); + const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner)); + if((pinnerSize() non zeros if reserve(Index) has not been called earlier. + * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be + * inserted by increasing outer-indices. + * + * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first + * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector. + * + * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1) + * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion. + * + */ + Scalar& insert(Index row, Index col); + + public: + + /** Removes all non zeros but keep allocated memory + * + * This function does not free the currently allocated memory. To release as much as memory as possible, + * call \code mat.data().squeeze(); \endcode after resizing it. + * + * \sa resize(Index,Index), data() + */ + inline void setZero() + { + m_data.clear(); + memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); + if(m_innerNonZeros) + memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); + } + + /** Preallocates \a reserveSize non zeros. + * + * Precondition: the matrix must be in compressed mode. */ + inline void reserve(Index reserveSize) + { + eigen_assert(isCompressed() && "This function does not make sense in non compressed mode."); + m_data.reserve(reserveSize); + } + + #ifdef EIGEN_PARSED_BY_DOXYGEN + /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j. + * + * This function turns the matrix in non-compressed mode. + * + * The type \c SizesType must expose the following interface: + \code + typedef value_type; + const value_type& operator[](i) const; + \endcode + * for \c i in the [0,this->outerSize()[ range. + * Typical choices include std::vector, Eigen::VectorXi, Eigen::VectorXi::Constant, etc. + */ + template + inline void reserve(const SizesType& reserveSizes); + #else + template + inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = + #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename + typename + #endif + SizesType::value_type()) + { + EIGEN_UNUSED_VARIABLE(enableif); + reserveInnerVectors(reserveSizes); + } + #endif // EIGEN_PARSED_BY_DOXYGEN + protected: + template + inline void reserveInnerVectors(const SizesType& reserveSizes) + { + if(isCompressed()) + { + Index totalReserveSize = 0; + // turn the matrix into non-compressed mode + m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(StorageIndex))); + if (!m_innerNonZeros) internal::throw_std_bad_alloc(); + + // temporarily use m_innerSizes to hold the new starting points. + StorageIndex* newOuterIndex = m_innerNonZeros; + + StorageIndex count = 0; + for(Index j=0; j=0; --j) + { + StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j]; + for(Index i=innerNNZ-1; i>=0; --i) + { + m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); + m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); + } + previousOuterIndex = m_outerIndex[j]; + m_outerIndex[j] = newOuterIndex[j]; + m_innerNonZeros[j] = innerNNZ; + } + m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1]; + + m_data.resize(m_outerIndex[m_outerSize]); + } + else + { + StorageIndex* newOuterIndex = static_cast(std::malloc((m_outerSize+1)*sizeof(StorageIndex))); + if (!newOuterIndex) internal::throw_std_bad_alloc(); + + StorageIndex count = 0; + for(Index j=0; j(reserveSizes[j], alreadyReserved); + count += toReserve + m_innerNonZeros[j]; + } + newOuterIndex[m_outerSize] = count; + + m_data.resize(count); + for(Index j=m_outerSize-1; j>=0; --j) + { + Index offset = newOuterIndex[j] - m_outerIndex[j]; + if(offset>0) + { + StorageIndex innerNNZ = m_innerNonZeros[j]; + for(Index i=innerNNZ-1; i>=0; --i) + { + m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); + m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); + } + } + } + + std::swap(m_outerIndex, newOuterIndex); + std::free(newOuterIndex); + } + + } + public: + + //--- low level purely coherent filling --- + + /** \internal + * \returns a reference to the non zero coefficient at position \a row, \a col assuming that: + * - the nonzero does not already exist + * - the new coefficient is the last one according to the storage order + * + * Before filling a given inner vector you must call the statVec(Index) function. + * + * After an insertion session, you should call the finalize() function. + * + * \sa insert, insertBackByOuterInner, startVec */ + inline Scalar& insertBack(Index row, Index col) + { + return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row); + } + + /** \internal + * \sa insertBack, startVec */ + inline Scalar& insertBackByOuterInner(Index outer, Index inner) + { + eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); + eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)(m_data.size()); + Index i = m_outerSize; + // find the last filled column + while (i>=0 && m_outerIndex[i]==0) + --i; + ++i; + while (i<=m_outerSize) + { + m_outerIndex[i] = size; + ++i; + } + } + } + + //--- + + template + void setFromTriplets(const InputIterators& begin, const InputIterators& end); + + template + void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func); + + void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op()); } + + template + void collapseDuplicates(DupFunctor dup_func = DupFunctor()); + + //--- + + /** \internal + * same as insert(Index,Index) except that the indices are given relative to the storage order */ + Scalar& insertByOuterInner(Index j, Index i) + { + return insert(IsRowMajor ? j : i, IsRowMajor ? i : j); + } + + /** Turns the matrix into the \em compressed format. + */ + void makeCompressed() + { + if(isCompressed()) + return; + + eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0); + + Index oldStart = m_outerIndex[1]; + m_outerIndex[1] = m_innerNonZeros[0]; + for(Index j=1; j0) + { + for(Index k=0; k(std::malloc(m_outerSize * sizeof(StorageIndex))); + for (Index i = 0; i < m_outerSize; i++) + { + m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; + } + } + + /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */ + void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits::dummy_precision()) + { + prune(default_prunning_func(reference,epsilon)); + } + + /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep. + * The functor type \a KeepFunc must implement the following function: + * \code + * bool operator() (const Index& row, const Index& col, const Scalar& value) const; + * \endcode + * \sa prune(Scalar,RealScalar) + */ + template + void prune(const KeepFunc& keep = KeepFunc()) + { + // TODO optimize the uncompressed mode to avoid moving and allocating the data twice + makeCompressed(); + + StorageIndex k = 0; + for(Index j=0; jrows() == rows && this->cols() == cols) return; + + // If one dimension is null, then there is nothing to be preserved + if(rows==0 || cols==0) return resize(rows,cols); + + Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows(); + Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols(); + StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows); + + // Deals with inner non zeros + if (m_innerNonZeros) + { + // Resize m_innerNonZeros + StorageIndex *newInnerNonZeros = static_cast(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex))); + if (!newInnerNonZeros) internal::throw_std_bad_alloc(); + m_innerNonZeros = newInnerNonZeros; + + for(Index i=m_outerSize; i(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex))); + if (!m_innerNonZeros) internal::throw_std_bad_alloc(); + for(Index i = 0; i < m_outerSize; i++) + m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; + } + + // Change the m_innerNonZeros in case of a decrease of inner size + if (m_innerNonZeros && innerChange < 0) + { + for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++) + { + StorageIndex &n = m_innerNonZeros[i]; + StorageIndex start = m_outerIndex[i]; + while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; + } + } + + m_innerSize = newInnerSize; + + // Re-allocate outer index structure if necessary + if (outerChange == 0) + return; + + StorageIndex *newOuterIndex = static_cast(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex))); + if (!newOuterIndex) internal::throw_std_bad_alloc(); + m_outerIndex = newOuterIndex; + if (outerChange > 0) + { + StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; + for(Index i=m_outerSize; i(std::malloc((outerSize + 1) * sizeof(StorageIndex))); + if (!m_outerIndex) internal::throw_std_bad_alloc(); + + m_outerSize = outerSize; + } + if(m_innerNonZeros) + { + std::free(m_innerNonZeros); + m_innerNonZeros = 0; + } + memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); + } + + /** \internal + * Resize the nonzero vector to \a size */ + void resizeNonZeros(Index size) + { + m_data.resize(size); + } + + /** \returns a const expression of the diagonal coefficients. */ + const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); } + + /** \returns a read-write expression of the diagonal coefficients. + * \warning If the diagonal entries are written, then all diagonal + * entries \b must already exist, otherwise an assertion will be raised. + */ + DiagonalReturnType diagonal() { return DiagonalReturnType(*this); } + + /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */ + inline SparseMatrix() + : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + resize(0, 0); + } + + /** Constructs a \a rows \c x \a cols empty matrix */ + inline SparseMatrix(Index rows, Index cols) + : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + resize(rows, cols); + } + + /** Constructs a sparse matrix from the sparse expression \a other */ + template + inline SparseMatrix(const SparseMatrixBase& other) + : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + check_template_parameters(); + const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator::Flags & RowMajorBit); + if (needToTranspose) + *this = other.derived(); + else + { + #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + #endif + internal::call_assignment_no_alias(*this, other.derived()); + } + } + + /** Constructs a sparse matrix from the sparse selfadjoint view \a other */ + template + inline SparseMatrix(const SparseSelfAdjointView& other) + : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + Base::operator=(other); + } + + /** Copy constructor (it performs a deep copy) */ + inline SparseMatrix(const SparseMatrix& other) + : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + *this = other.derived(); + } + + /** \brief Copy constructor with in-place evaluation */ + template + SparseMatrix(const ReturnByValue& other) + : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + initAssignment(other); + other.evalTo(*this); + } + + /** \brief Copy constructor with in-place evaluation */ + template + explicit SparseMatrix(const DiagonalBase& other) + : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + *this = other.derived(); + } + + /** Swaps the content of two sparse matrices of the same type. + * This is a fast operation that simply swaps the underlying pointers and parameters. */ + inline void swap(SparseMatrix& other) + { + //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); + std::swap(m_outerIndex, other.m_outerIndex); + std::swap(m_innerSize, other.m_innerSize); + std::swap(m_outerSize, other.m_outerSize); + std::swap(m_innerNonZeros, other.m_innerNonZeros); + m_data.swap(other.m_data); + } + + /** Sets *this to the identity matrix. + * This function also turns the matrix into compressed mode, and drop any reserved memory. */ + inline void setIdentity() + { + eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); + this->m_data.resize(rows()); + Eigen::Map(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1)); + Eigen::Map(this->m_data.valuePtr(), rows()).setOnes(); + Eigen::Map(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows())); + std::free(m_innerNonZeros); + m_innerNonZeros = 0; + } + inline SparseMatrix& operator=(const SparseMatrix& other) + { + if (other.isRValue()) + { + swap(other.const_cast_derived()); + } + else if(this!=&other) + { + #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + #endif + initAssignment(other); + if(other.isCompressed()) + { + internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex); + m_data = other.m_data; + } + else + { + Base::operator=(other); + } + } + return *this; + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + inline SparseMatrix& operator=(const EigenBase& other) + { return Base::operator=(other.derived()); } +#endif // EIGEN_PARSED_BY_DOXYGEN + + template + EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase& other); + + friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m) + { + EIGEN_DBG_SPARSE( + s << "Nonzero entries:\n"; + if(m.isCompressed()) + { + for (Index i=0; i&>(m); + return s; + } + + /** Destructor */ + inline ~SparseMatrix() + { + std::free(m_outerIndex); + std::free(m_innerNonZeros); + } + + /** Overloaded for performance */ + Scalar sum() const; + +# ifdef EIGEN_SPARSEMATRIX_PLUGIN +# include EIGEN_SPARSEMATRIX_PLUGIN +# endif + +protected: + + template + void initAssignment(const Other& other) + { + resize(other.rows(), other.cols()); + if(m_innerNonZeros) + { + std::free(m_innerNonZeros); + m_innerNonZeros = 0; + } + } + + /** \internal + * \sa insert(Index,Index) */ + EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col); + + /** \internal + * A vector object that is equal to 0 everywhere but v at the position i */ + class SingletonVector + { + StorageIndex m_index; + StorageIndex m_value; + public: + typedef StorageIndex value_type; + SingletonVector(Index i, Index v) + : m_index(convert_index(i)), m_value(convert_index(v)) + {} + + StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; } + }; + + /** \internal + * \sa insert(Index,Index) */ + EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col); + +public: + /** \internal + * \sa insert(Index,Index) */ + EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) + { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(!isCompressed()); + eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); + + Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++; + m_data.index(p) = convert_index(inner); + return (m_data.value(p) = Scalar(0)); + } + +private: + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); + EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); + } + + struct default_prunning_func { + default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {} + inline bool operator() (const Index&, const Index&, const Scalar& value) const + { + return !internal::isMuchSmallerThan(value, reference, epsilon); + } + Scalar reference; + RealScalar epsilon; + }; +}; + +namespace internal { + +template +void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func) +{ + enum { IsRowMajor = SparseMatrixType::IsRowMajor }; + typedef typename SparseMatrixType::Scalar Scalar; + typedef typename SparseMatrixType::StorageIndex StorageIndex; + SparseMatrix trMat(mat.rows(),mat.cols()); + + if(begin!=end) + { + // pass 1: count the nnz per inner-vector + typename SparseMatrixType::IndexVector wi(trMat.outerSize()); + wi.setZero(); + for(InputIterator it(begin); it!=end; ++it) + { + eigen_assert(it->row()>=0 && it->row()col()>=0 && it->col()col() : it->row())++; + } + + // pass 2: insert all the elements into trMat + trMat.reserve(wi); + for(InputIterator it(begin); it!=end; ++it) + trMat.insertBackUncompressed(it->row(),it->col()) = it->value(); + + // pass 3: + trMat.collapseDuplicates(dup_func); + } + + // pass 4: transposed copy -> implicit sorting + mat = trMat; +} + +} + + +/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end. + * + * A \em triplet is a tuple (i,j,value) defining a non-zero element. + * The input list of triplets does not have to be sorted, and can contains duplicated elements. + * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up. + * This is a \em O(n) operation, with \em n the number of triplet elements. + * The initial contents of \c *this is destroyed. + * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor, + * or the resize(Index,Index) method. The sizes are not extracted from the triplet list. + * + * The \a InputIterators value_type must provide the following interface: + * \code + * Scalar value() const; // the value + * Scalar row() const; // the row index i + * Scalar col() const; // the column index j + * \endcode + * See for instance the Eigen::Triplet template class. + * + * Here is a typical usage example: + * \code + typedef Triplet T; + std::vector tripletList; + triplets.reserve(estimation_of_entries); + for(...) + { + // ... + tripletList.push_back(T(i,j,v_ij)); + } + SparseMatrixType m(rows,cols); + m.setFromTriplets(tripletList.begin(), tripletList.end()); + // m is ready to go! + * \endcode + * + * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define + * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather + * be explicitely stored into a std::vector for instance. + */ +template +template +void SparseMatrix::setFromTriplets(const InputIterators& begin, const InputIterators& end) +{ + internal::set_from_triplets >(begin, end, *this, internal::scalar_sum_op()); +} + +/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied: + * \code + * value = dup_func(OldValue, NewValue) + * \endcode + * Here is a C++11 example keeping the latest entry only: + * \code + * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; }); + * \endcode + */ +template +template +void SparseMatrix::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func) +{ + internal::set_from_triplets, DupFunctor>(begin, end, *this, dup_func); +} + +/** \internal */ +template +template +void SparseMatrix::collapseDuplicates(DupFunctor dup_func) +{ + eigen_assert(!isCompressed()); + // TODO, in practice we should be able to use m_innerNonZeros for that task + IndexVector wi(innerSize()); + wi.fill(-1); + StorageIndex count = 0; + // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers + for(Index j=0; j=start) + { + // we already meet this entry => accumulate it + m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k)); + } + else + { + m_data.value(count) = m_data.value(k); + m_data.index(count) = m_data.index(k); + wi(i) = count; + ++count; + } + } + m_outerIndex[j] = start; + } + m_outerIndex[m_outerSize] = count; + + // turn the matrix into compressed form + std::free(m_innerNonZeros); + m_innerNonZeros = 0; + m_data.resize(m_outerIndex[m_outerSize]); +} + +template +template +EIGEN_DONT_INLINE SparseMatrix& SparseMatrix::operator=(const SparseMatrixBase& other) +{ + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + #endif + + const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator::Flags & RowMajorBit); + if (needToTranspose) + { + #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN + EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN + #endif + // two passes algorithm: + // 1 - compute the number of coeffs per dest inner vector + // 2 - do the actual copy/eval + // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed + typedef typename internal::nested_eval::type >::type OtherCopy; + typedef typename internal::remove_all::type _OtherCopy; + typedef internal::evaluator<_OtherCopy> OtherCopyEval; + OtherCopy otherCopy(other.derived()); + OtherCopyEval otherCopyEval(otherCopy); + + SparseMatrix dest(other.rows(),other.cols()); + Eigen::Map (dest.m_outerIndex,dest.outerSize()).setZero(); + + // pass 1 + // FIXME the above copy could be merged with that pass + for (Index j=0; jswap(dest); + return *this; + } + else + { + if(other.isRValue()) + { + initAssignment(other.derived()); + } + // there is no special optimization + return Base::operator=(other.derived()); + } +} + +template +typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col) +{ + eigen_assert(row>=0 && row=0 && col(std::malloc(m_outerSize * sizeof(StorageIndex))); + if(!m_innerNonZeros) internal::throw_std_bad_alloc(); + + memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); + + // pack all inner-vectors to the end of the pre-allocated space + // and allocate the entire free-space to the first inner-vector + StorageIndex end = convert_index(m_data.allocatedSize()); + for(Index j=1; j<=m_outerSize; ++j) + m_outerIndex[j] = end; + } + else + { + // turn the matrix into non-compressed mode + m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(StorageIndex))); + if(!m_innerNonZeros) internal::throw_std_bad_alloc(); + for(Index j=0; j=0 && m_innerNonZeros[j]==0) + m_outerIndex[j--] = p; + + // push back the new element + ++m_innerNonZeros[outer]; + m_data.append(Scalar(0), inner); + + // check for reallocation + if(data_end != m_data.allocatedSize()) + { + // m_data has been reallocated + // -> move remaining inner-vectors back to the end of the free-space + // so that the entire free-space is allocated to the current inner-vector. + eigen_internal_assert(data_end < m_data.allocatedSize()); + StorageIndex new_end = convert_index(m_data.allocatedSize()); + for(Index k=outer+1; k<=m_outerSize; ++k) + if(m_outerIndex[k]==data_end) + m_outerIndex[k] = new_end; + } + return m_data.value(p); + } + + // Second case: the next inner-vector is packed to the end + // and the current inner-vector end match the used-space. + if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size()) + { + eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0); + + // add space for the new element + ++m_innerNonZeros[outer]; + m_data.resize(m_data.size()+1); + + // check for reallocation + if(data_end != m_data.allocatedSize()) + { + // m_data has been reallocated + // -> move remaining inner-vectors back to the end of the free-space + // so that the entire free-space is allocated to the current inner-vector. + eigen_internal_assert(data_end < m_data.allocatedSize()); + StorageIndex new_end = convert_index(m_data.allocatedSize()); + for(Index k=outer+1; k<=m_outerSize; ++k) + if(m_outerIndex[k]==data_end) + m_outerIndex[k] = new_end; + } + + // and insert it at the right position (sorted insertion) + Index startId = m_outerIndex[outer]; + Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1; + while ( (p > startId) && (m_data.index(p-1) > inner) ) + { + m_data.index(p) = m_data.index(p-1); + m_data.value(p) = m_data.value(p-1); + --p; + } + + m_data.index(p) = convert_index(inner); + return (m_data.value(p) = 0); + } + + if(m_data.size() != m_data.allocatedSize()) + { + // make sure the matrix is compatible to random un-compressed insertion: + m_data.resize(m_data.allocatedSize()); + this->reserveInnerVectors(Array::Constant(m_outerSize, 2)); + } + + return insertUncompressed(row,col); +} + +template +EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col) +{ + eigen_assert(!isCompressed()); + + const Index outer = IsRowMajor ? row : col; + const StorageIndex inner = convert_index(IsRowMajor ? col : row); + + Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; + StorageIndex innerNNZ = m_innerNonZeros[outer]; + if(innerNNZ>=room) + { + // this inner vector is full, we need to reallocate the whole buffer :( + reserve(SingletonVector(outer,std::max(2,innerNNZ))); + } + + Index startId = m_outerIndex[outer]; + Index p = startId + m_innerNonZeros[outer]; + while ( (p > startId) && (m_data.index(p-1) > inner) ) + { + m_data.index(p) = m_data.index(p-1); + m_data.value(p) = m_data.value(p-1); + --p; + } + eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end"); + + m_innerNonZeros[outer]++; + + m_data.index(p) = inner; + return (m_data.value(p) = Scalar(0)); +} + +template +EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col) +{ + eigen_assert(isCompressed()); + + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + Index previousOuter = outer; + if (m_outerIndex[outer+1]==0) + { + // we start a new inner vector + while (previousOuter>=0 && m_outerIndex[previousOuter]==0) + { + m_outerIndex[previousOuter] = convert_index(m_data.size()); + --previousOuter; + } + m_outerIndex[outer+1] = m_outerIndex[outer]; + } + + // here we have to handle the tricky case where the outerIndex array + // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g., + // the 2nd inner vector... + bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0)) + && (std::size_t(m_outerIndex[outer+1]) == m_data.size()); + + std::size_t startId = m_outerIndex[outer]; + // FIXME let's make sure sizeof(long int) == sizeof(std::size_t) + std::size_t p = m_outerIndex[outer+1]; + ++m_outerIndex[outer+1]; + + double reallocRatio = 1; + if (m_data.allocatedSize()<=m_data.size()) + { + // if there is no preallocated memory, let's reserve a minimum of 32 elements + if (m_data.size()==0) + { + m_data.reserve(32); + } + else + { + // we need to reallocate the data, to reduce multiple reallocations + // we use a smart resize algorithm based on the current filling ratio + // in addition, we use double to avoid integers overflows + double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1); + reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size()); + // furthermore we bound the realloc ratio to: + // 1) reduce multiple minor realloc when the matrix is almost filled + // 2) avoid to allocate too much memory when the matrix is almost empty + reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.); + } + } + m_data.resize(m_data.size()+1,reallocRatio); + + if (!isLastVec) + { + if (previousOuter==-1) + { + // oops wrong guess. + // let's correct the outer offsets + for (Index k=0; k<=(outer+1); ++k) + m_outerIndex[k] = 0; + Index k=outer+1; + while(m_outerIndex[k]==0) + m_outerIndex[k++] = 1; + while (k<=m_outerSize && m_outerIndex[k]!=0) + m_outerIndex[k++]++; + p = 0; + --k; + k = m_outerIndex[k]-1; + while (k>0) + { + m_data.index(k) = m_data.index(k-1); + m_data.value(k) = m_data.value(k-1); + k--; + } + } + else + { + // we are not inserting into the last inner vec + // update outer indices: + Index j = outer+2; + while (j<=m_outerSize && m_outerIndex[j]!=0) + m_outerIndex[j++]++; + --j; + // shift data of last vecs: + Index k = m_outerIndex[j]-1; + while (k>=Index(p)) + { + m_data.index(k) = m_data.index(k-1); + m_data.value(k) = m_data.value(k-1); + k--; + } + } + } + + while ( (p > startId) && (m_data.index(p-1) > inner) ) + { + m_data.index(p) = m_data.index(p-1); + m_data.value(p) = m_data.value(p-1); + --p; + } + + m_data.index(p) = inner; + return (m_data.value(p) = Scalar(0)); +} + +namespace internal { + +template +struct evaluator > + : evaluator > > +{ + typedef evaluator > > Base; + typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType; + evaluator() : Base() {} + explicit evaluator(const SparseMatrixType &mat) : Base(mat) {} +}; + +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSEMATRIX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseMatrixBase.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseMatrixBase.h new file mode 100644 index 000000000..c6b548f11 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseMatrixBase.h @@ -0,0 +1,405 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEMATRIXBASE_H +#define EIGEN_SPARSEMATRIXBASE_H + +namespace Eigen { + +/** \ingroup SparseCore_Module + * + * \class SparseMatrixBase + * + * \brief Base class of any sparse matrices or sparse expressions + * + * \tparam Derived is the derived type, e.g. a sparse matrix type, or an expression, etc. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN. + */ +template class SparseMatrixBase + : public EigenBase +{ + public: + + typedef typename internal::traits::Scalar Scalar; + + /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex, etc. + * + * It is an alias for the Scalar type */ + typedef Scalar value_type; + + typedef typename internal::packet_traits::type PacketScalar; + typedef typename internal::traits::StorageKind StorageKind; + + /** The integer type used to \b store indices within a SparseMatrix. + * For a \c SparseMatrix it an alias of the third template parameter \c IndexType. */ + typedef typename internal::traits::StorageIndex StorageIndex; + + typedef typename internal::add_const_on_value_type_if_arithmetic< + typename internal::packet_traits::type + >::type PacketReturnType; + + typedef SparseMatrixBase StorageBaseType; + + typedef Matrix IndexVector; + typedef Matrix ScalarVector; + + template + Derived& operator=(const EigenBase &other); + + enum { + + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + /**< The number of rows at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ + + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + /**< The number of columns at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ + + + SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, + internal::traits::ColsAtCompileTime>::ret), + /**< This is equal to the number of coefficients, i.e. the number of + * rows times the number of columns, or to \a Dynamic if this is not + * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ + + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + + MaxSizeAtCompileTime = (internal::size_at_compile_time::ret), + + IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1, + /**< This is set to true if either the number of rows or the number of + * columns is known at compile-time to be equal to 1. Indeed, in that case, + * we are dealing with a column-vector (if there is only one column) or with + * a row-vector (if there is only one row). */ + + Flags = internal::traits::Flags, + /**< This stores expression \ref flags flags which may or may not be inherited by new expressions + * constructed from this one. See the \ref flags "list of flags". + */ + + IsRowMajor = Flags&RowMajorBit ? 1 : 0, + + InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) + : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + + #ifndef EIGEN_PARSED_BY_DOXYGEN + _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC + #endif + }; + + /** \internal the return type of MatrixBase::adjoint() */ + typedef typename internal::conditional::IsComplex, + CwiseUnaryOp, Eigen::Transpose >, + Transpose + >::type AdjointReturnType; + typedef Transpose TransposeReturnType; + typedef typename internal::add_const >::type ConstTransposeReturnType; + + // FIXME storage order do not match evaluator storage order + typedef SparseMatrix PlainObject; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is the "real scalar" type; if the \a Scalar type is already real numbers + * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If + * \a Scalar is \a std::complex then RealScalar is \a T. + * + * \sa class NumTraits + */ + typedef typename NumTraits::Real RealScalar; + + /** \internal the return type of coeff() + */ + typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType; + + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp,Matrix > ConstantReturnType; + + /** type of the equivalent dense matrix */ + typedef Matrix DenseMatrixType; + /** type of the equivalent square matrix */ + typedef Matrix SquareMatrixType; + + inline const Derived& derived() const { return *static_cast(this); } + inline Derived& derived() { return *static_cast(this); } + inline Derived& const_cast_derived() const + { return *static_cast(const_cast(this)); } + + typedef EigenBase Base; + +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase +#ifdef EIGEN_PARSED_BY_DOXYGEN +#define EIGEN_DOC_UNARY_ADDONS(METHOD,OP) /**

This method does not change the sparsity of \c *this: the OP is applied to explicitly stored coefficients only. \sa SparseCompressedBase::coeffs()

*/ +#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /**

\warning This method returns a read-only expression for any sparse matrices. \sa \ref TutorialSparse_SubMatrices "Sparse block operations"

*/ +#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) /**

\warning This method returns a read-write expression for COND sparse matrices only. Otherwise, the returned expression is read-only. \sa \ref TutorialSparse_SubMatrices "Sparse block operations"

*/ +#else +#define EIGEN_DOC_UNARY_ADDONS(X,Y) +#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) +#endif +# include "../plugins/CommonCwiseUnaryOps.h" +# include "../plugins/CommonCwiseBinaryOps.h" +# include "../plugins/MatrixCwiseUnaryOps.h" +# include "../plugins/MatrixCwiseBinaryOps.h" +# include "../plugins/BlockMethods.h" +# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN +# include EIGEN_SPARSEMATRIXBASE_PLUGIN +# endif +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS +#undef EIGEN_DOC_UNARY_ADDONS +#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF + + /** \returns the number of rows. \sa cols() */ + inline Index rows() const { return derived().rows(); } + /** \returns the number of columns. \sa rows() */ + inline Index cols() const { return derived().cols(); } + /** \returns the number of coefficients, which is \a rows()*cols(). + * \sa rows(), cols(). */ + inline Index size() const { return rows() * cols(); } + /** \returns true if either the number of rows or the number of columns is equal to 1. + * In other words, this function returns + * \code rows()==1 || cols()==1 \endcode + * \sa rows(), cols(), IsVectorAtCompileTime. */ + inline bool isVector() const { return rows()==1 || cols()==1; } + /** \returns the size of the storage major dimension, + * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ + Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } + /** \returns the size of the inner dimension according to the storage order, + * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ + Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } + + bool isRValue() const { return m_isRValue; } + Derived& markAsRValue() { m_isRValue = true; return derived(); } + + SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ } + + + template + Derived& operator=(const ReturnByValue& other); + + template + inline Derived& operator=(const SparseMatrixBase& other); + + inline Derived& operator=(const Derived& other); + + protected: + + template + inline Derived& assign(const OtherDerived& other); + + template + inline void assignGeneric(const OtherDerived& other); + + public: + + friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m) + { + typedef typename Derived::Nested Nested; + typedef typename internal::remove_all::type NestedCleaned; + + if (Flags&RowMajorBit) + { + Nested nm(m.derived()); + internal::evaluator thisEval(nm); + for (Index row=0; row::InnerIterator it(thisEval, row); it; ++it) + { + for ( ; col thisEval(nm); + if (m.cols() == 1) { + Index row = 0; + for (typename internal::evaluator::InnerIterator it(thisEval, 0); it; ++it) + { + for ( ; row trans = m; + s << static_cast >&>(trans); + } + } + return s; + } + + template + Derived& operator+=(const SparseMatrixBase& other); + template + Derived& operator-=(const SparseMatrixBase& other); + + template + Derived& operator+=(const DiagonalBase& other); + template + Derived& operator-=(const DiagonalBase& other); + + template + Derived& operator+=(const EigenBase &other); + template + Derived& operator-=(const EigenBase &other); + + Derived& operator*=(const Scalar& other); + Derived& operator/=(const Scalar& other); + + template struct CwiseProductDenseReturnType { + typedef CwiseBinaryOp::Scalar, + typename internal::traits::Scalar + >::ReturnType>, + const Derived, + const OtherDerived + > Type; + }; + + template + EIGEN_STRONG_INLINE const typename CwiseProductDenseReturnType::Type + cwiseProduct(const MatrixBase &other) const; + + // sparse * diagonal + template + const Product + operator*(const DiagonalBase &other) const + { return Product(derived(), other.derived()); } + + // diagonal * sparse + template friend + const Product + operator*(const DiagonalBase &lhs, const SparseMatrixBase& rhs) + { return Product(lhs.derived(), rhs.derived()); } + + // sparse * sparse + template + const Product + operator*(const SparseMatrixBase &other) const; + + // sparse * dense + template + const Product + operator*(const MatrixBase &other) const + { return Product(derived(), other.derived()); } + + // dense * sparse + template friend + const Product + operator*(const MatrixBase &lhs, const SparseMatrixBase& rhs) + { return Product(lhs.derived(), rhs.derived()); } + + /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */ + SparseSymmetricPermutationProduct twistedBy(const PermutationMatrix& perm) const + { + return SparseSymmetricPermutationProduct(derived(), perm); + } + + template + Derived& operator*=(const SparseMatrixBase& other); + + template + inline const TriangularView triangularView() const; + + template struct SelfAdjointViewReturnType { typedef SparseSelfAdjointView Type; }; + template struct ConstSelfAdjointViewReturnType { typedef const SparseSelfAdjointView Type; }; + + template inline + typename ConstSelfAdjointViewReturnType::Type selfadjointView() const; + template inline + typename SelfAdjointViewReturnType::Type selfadjointView(); + + template Scalar dot(const MatrixBase& other) const; + template Scalar dot(const SparseMatrixBase& other) const; + RealScalar squaredNorm() const; + RealScalar norm() const; + RealScalar blueNorm() const; + + TransposeReturnType transpose() { return TransposeReturnType(derived()); } + const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); } + const AdjointReturnType adjoint() const { return AdjointReturnType(transpose()); } + + // inner-vector + typedef Block InnerVectorReturnType; + typedef Block ConstInnerVectorReturnType; + InnerVectorReturnType innerVector(Index outer); + const ConstInnerVectorReturnType innerVector(Index outer) const; + + // set of inner-vectors + typedef Block InnerVectorsReturnType; + typedef Block ConstInnerVectorsReturnType; + InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize); + const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const; + + DenseMatrixType toDense() const + { + return DenseMatrixType(derived()); + } + + template + bool isApprox(const SparseMatrixBase& other, + const RealScalar& prec = NumTraits::dummy_precision()) const; + + template + bool isApprox(const MatrixBase& other, + const RealScalar& prec = NumTraits::dummy_precision()) const + { return toDense().isApprox(other,prec); } + + /** \returns the matrix or vector obtained by evaluating this expression. + * + * Notice that in the case of a plain matrix or vector (not an expression) this function just returns + * a const reference, in order to avoid a useless copy. + */ + inline const typename internal::eval::type eval() const + { return typename internal::eval::type(derived()); } + + Scalar sum() const; + + inline const SparseView + pruned(const Scalar& reference = Scalar(0), const RealScalar& epsilon = NumTraits::dummy_precision()) const; + + protected: + + bool m_isRValue; + + static inline StorageIndex convert_index(const Index idx) { + return internal::convert_index(idx); + } + private: + template void evalTo(Dest &) const; +}; + +} // end namespace Eigen + +#endif // EIGEN_SPARSEMATRIXBASE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparsePermutation.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparsePermutation.h new file mode 100644 index 000000000..ef38357ae --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparsePermutation.h @@ -0,0 +1,178 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_PERMUTATION_H +#define EIGEN_SPARSE_PERMUTATION_H + +// This file implements sparse * permutation products + +namespace Eigen { + +namespace internal { + +template +struct permutation_matrix_product +{ + typedef typename nested_eval::type MatrixType; + typedef typename remove_all::type MatrixTypeCleaned; + + typedef typename MatrixTypeCleaned::Scalar Scalar; + typedef typename MatrixTypeCleaned::StorageIndex StorageIndex; + + enum { + SrcStorageOrder = MatrixTypeCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, + MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight + }; + + typedef typename internal::conditional, + SparseMatrix >::type ReturnType; + + template + static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr) + { + MatrixType mat(xpr); + if(MoveOuter) + { + SparseMatrix tmp(mat.rows(), mat.cols()); + Matrix sizes(mat.outerSize()); + for(Index j=0; j tmp(mat.rows(), mat.cols()); + Matrix sizes(tmp.outerSize()); + sizes.setZero(); + PermutationMatrix perm_cpy; + if((Side==OnTheLeft) ^ Transposed) + perm_cpy = perm; + else + perm_cpy = perm.transpose(); + + for(Index j=0; j struct product_promote_storage_type { typedef Sparse ret; }; +template struct product_promote_storage_type { typedef Sparse ret; }; + +// TODO, the following two overloads are only needed to define the right temporary type through +// typename traits >::ReturnType +// whereas it should be correctly handled by traits >::PlainObject + +template +struct product_evaluator, ProductTag, PermutationShape, SparseShape> + : public evaluator::ReturnType> +{ + typedef Product XprType; + typedef typename permutation_matrix_product::ReturnType PlainObject; + typedef evaluator Base; + + enum { + Flags = Base::Flags | EvalBeforeNestingBit + }; + + explicit product_evaluator(const XprType& xpr) + : m_result(xpr.rows(), xpr.cols()) + { + ::new (static_cast(this)) Base(m_result); + generic_product_impl::evalTo(m_result, xpr.lhs(), xpr.rhs()); + } + +protected: + PlainObject m_result; +}; + +template +struct product_evaluator, ProductTag, SparseShape, PermutationShape > + : public evaluator::ReturnType> +{ + typedef Product XprType; + typedef typename permutation_matrix_product::ReturnType PlainObject; + typedef evaluator Base; + + enum { + Flags = Base::Flags | EvalBeforeNestingBit + }; + + explicit product_evaluator(const XprType& xpr) + : m_result(xpr.rows(), xpr.cols()) + { + ::new (static_cast(this)) Base(m_result); + generic_product_impl::evalTo(m_result, xpr.lhs(), xpr.rhs()); + } + +protected: + PlainObject m_result; +}; + +} // end namespace internal + +/** \returns the matrix with the permutation applied to the columns + */ +template +inline const Product +operator*(const SparseMatrixBase& matrix, const PermutationBase& perm) +{ return Product(matrix.derived(), perm.derived()); } + +/** \returns the matrix with the permutation applied to the rows + */ +template +inline const Product +operator*( const PermutationBase& perm, const SparseMatrixBase& matrix) +{ return Product(perm.derived(), matrix.derived()); } + + +/** \returns the matrix with the inverse permutation applied to the columns. + */ +template +inline const Product, AliasFreeProduct> +operator*(const SparseMatrixBase& matrix, const InverseImpl& tperm) +{ + return Product, AliasFreeProduct>(matrix.derived(), tperm.derived()); +} + +/** \returns the matrix with the inverse permutation applied to the rows. + */ +template +inline const Product, SparseDerived, AliasFreeProduct> +operator*(const InverseImpl& tperm, const SparseMatrixBase& matrix) +{ + return Product, SparseDerived, AliasFreeProduct>(tperm.derived(), matrix.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseProduct.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseProduct.h new file mode 100644 index 000000000..4cbf68781 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseProduct.h @@ -0,0 +1,169 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEPRODUCT_H +#define EIGEN_SPARSEPRODUCT_H + +namespace Eigen { + +/** \returns an expression of the product of two sparse matrices. + * By default a conservative product preserving the symbolic non zeros is performed. + * The automatic pruning of the small values can be achieved by calling the pruned() function + * in which case a totally different product algorithm is employed: + * \code + * C = (A*B).pruned(); // supress numerical zeros (exact) + * C = (A*B).pruned(ref); + * C = (A*B).pruned(ref,epsilon); + * \endcode + * where \c ref is a meaningful non zero reference value. + * */ +template +template +inline const Product +SparseMatrixBase::operator*(const SparseMatrixBase &other) const +{ + return Product(derived(), other.derived()); +} + +namespace internal { + +// sparse * sparse +template +struct generic_product_impl +{ + template + static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) + { + evalTo(dst, lhs, rhs, typename evaluator_traits::Shape()); + } + + // dense += sparse * sparse + template + static void addTo(Dest& dst, const ActualLhs& lhs, const Rhs& rhs, typename enable_if::Shape,DenseShape>::value,int*>::type* = 0) + { + typedef typename nested_eval::type LhsNested; + typedef typename nested_eval::type RhsNested; + LhsNested lhsNested(lhs); + RhsNested rhsNested(rhs); + internal::sparse_sparse_to_dense_product_selector::type, + typename remove_all::type, Dest>::run(lhsNested,rhsNested,dst); + } + + // dense -= sparse * sparse + template + static void subTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, typename enable_if::Shape,DenseShape>::value,int*>::type* = 0) + { + addTo(dst, -lhs, rhs); + } + +protected: + + // sparse = sparse * sparse + template + static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, SparseShape) + { + typedef typename nested_eval::type LhsNested; + typedef typename nested_eval::type RhsNested; + LhsNested lhsNested(lhs); + RhsNested rhsNested(rhs); + internal::conservative_sparse_sparse_product_selector::type, + typename remove_all::type, Dest>::run(lhsNested,rhsNested,dst); + } + + // dense = sparse * sparse + template + static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, DenseShape) + { + dst.setZero(); + addTo(dst, lhs, rhs); + } +}; + +// sparse * sparse-triangular +template +struct generic_product_impl + : public generic_product_impl +{}; + +// sparse-triangular * sparse +template +struct generic_product_impl + : public generic_product_impl +{}; + +// dense = sparse-product (can be sparse*sparse, sparse*perm, etc.) +template< typename DstXprType, typename Lhs, typename Rhs> +struct Assignment, internal::assign_op::Scalar>, Sparse2Dense> +{ + typedef Product SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + generic_product_impl::evalTo(dst,src.lhs(),src.rhs()); + } +}; + +// dense += sparse-product (can be sparse*sparse, sparse*perm, etc.) +template< typename DstXprType, typename Lhs, typename Rhs> +struct Assignment, internal::add_assign_op::Scalar>, Sparse2Dense> +{ + typedef Product SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op &) + { + generic_product_impl::addTo(dst,src.lhs(),src.rhs()); + } +}; + +// dense -= sparse-product (can be sparse*sparse, sparse*perm, etc.) +template< typename DstXprType, typename Lhs, typename Rhs> +struct Assignment, internal::sub_assign_op::Scalar>, Sparse2Dense> +{ + typedef Product SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op &) + { + generic_product_impl::subTo(dst,src.lhs(),src.rhs()); + } +}; + +template +struct unary_evaluator >, IteratorBased> + : public evaluator::PlainObject> +{ + typedef SparseView > XprType; + typedef typename XprType::PlainObject PlainObject; + typedef evaluator Base; + + explicit unary_evaluator(const XprType& xpr) + : m_result(xpr.rows(), xpr.cols()) + { + using std::abs; + ::new (static_cast(this)) Base(m_result); + typedef typename nested_eval::type LhsNested; + typedef typename nested_eval::type RhsNested; + LhsNested lhsNested(xpr.nestedExpression().lhs()); + RhsNested rhsNested(xpr.nestedExpression().rhs()); + + internal::sparse_sparse_product_with_pruning_selector::type, + typename remove_all::type, PlainObject>::run(lhsNested,rhsNested,m_result, + abs(xpr.reference())*xpr.epsilon()); + } + +protected: + PlainObject m_result; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPARSEPRODUCT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseRedux.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseRedux.h new file mode 100644 index 000000000..458774962 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseRedux.h @@ -0,0 +1,49 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEREDUX_H +#define EIGEN_SPARSEREDUX_H + +namespace Eigen { + +template +typename internal::traits::Scalar +SparseMatrixBase::sum() const +{ + eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); + Scalar res(0); + internal::evaluator thisEval(derived()); + for (Index j=0; j::InnerIterator iter(thisEval,j); iter; ++iter) + res += iter.value(); + return res; +} + +template +typename internal::traits >::Scalar +SparseMatrix<_Scalar,_Options,_Index>::sum() const +{ + eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); + if(this->isCompressed()) + return Matrix::Map(m_data.valuePtr(), m_data.size()).sum(); + else + return Base::sum(); +} + +template +typename internal::traits >::Scalar +SparseVector<_Scalar,_Options,_Index>::sum() const +{ + eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); + return Matrix::Map(m_data.valuePtr(), m_data.size()).sum(); +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSEREDUX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseRef.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseRef.h new file mode 100644 index 000000000..d91f38f97 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseRef.h @@ -0,0 +1,397 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_REF_H +#define EIGEN_SPARSE_REF_H + +namespace Eigen { + +enum { + StandardCompressedFormat = 2 /**< used by Ref to specify whether the input storage must be in standard compressed form */ +}; + +namespace internal { + +template class SparseRefBase; + +template +struct traits, _Options, _StrideType> > + : public traits > +{ + typedef SparseMatrix PlainObjectType; + enum { + Options = _Options, + Flags = traits::Flags | CompressedAccessBit | NestByRefBit + }; + + template struct match { + enum { + StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)), + MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && StorageOrderMatch + }; + typedef typename internal::conditional::type type; + }; + +}; + +template +struct traits, _Options, _StrideType> > + : public traits, _Options, _StrideType> > +{ + enum { + Flags = (traits >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit + }; +}; + +template +struct traits, _Options, _StrideType> > + : public traits > +{ + typedef SparseVector PlainObjectType; + enum { + Options = _Options, + Flags = traits::Flags | CompressedAccessBit | NestByRefBit + }; + + template struct match { + enum { + MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && Derived::IsVectorAtCompileTime + }; + typedef typename internal::conditional::type type; + }; + +}; + +template +struct traits, _Options, _StrideType> > + : public traits, _Options, _StrideType> > +{ + enum { + Flags = (traits >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit + }; +}; + +template +struct traits > : public traits {}; + +template class SparseRefBase + : public SparseMapBase +{ +public: + + typedef SparseMapBase Base; + EIGEN_SPARSE_PUBLIC_INTERFACE(SparseRefBase) + + SparseRefBase() + : Base(RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime, 0, 0, 0, 0, 0) + {} + +protected: + + template + void construct(Expression& expr) + { + if(expr.outerIndexPtr()==0) + ::new (static_cast(this)) Base(expr.size(), expr.nonZeros(), expr.innerIndexPtr(), expr.valuePtr()); + else + ::new (static_cast(this)) Base(expr.rows(), expr.cols(), expr.nonZeros(), expr.outerIndexPtr(), expr.innerIndexPtr(), expr.valuePtr(), expr.innerNonZeroPtr()); + } +}; + +} // namespace internal + + +/** + * \ingroup SparseCore_Module + * + * \brief A sparse matrix expression referencing an existing sparse expression + * + * \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix. + * \tparam Options specifies whether the a standard compressed format is required \c Options is \c #StandardCompressedFormat, or \c 0. + * The default is \c 0. + * + * \sa class Ref + */ +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +class Ref, Options, StrideType > + : public internal::SparseRefBase, Options, StrideType > > +#else +template +class Ref + : public SparseMapBase // yes, that's weird to use Derived here, but that works! +#endif +{ + typedef SparseMatrix PlainObjectType; + typedef internal::traits Traits; + template + inline Ref(const SparseMatrix& expr); + template + inline Ref(const MappedSparseMatrix& expr); + public: + + typedef internal::SparseRefBase Base; + EIGEN_SPARSE_PUBLIC_INTERFACE(Ref) + + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + inline Ref(SparseMatrix& expr) + { + EIGEN_STATIC_ASSERT(bool(Traits::template match >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) ); + Base::construct(expr.derived()); + } + + template + inline Ref(MappedSparseMatrix& expr) + { + EIGEN_STATIC_ASSERT(bool(Traits::template match >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) ); + Base::construct(expr.derived()); + } + + template + inline Ref(const SparseCompressedBase& expr) + #else + /** Implicit constructor from any sparse expression (2D matrix or 1D vector) */ + template + inline Ref(SparseCompressedBase& expr) + #endif + { + EIGEN_STATIC_ASSERT(bool(internal::is_lvalue::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); + EIGEN_STATIC_ASSERT(bool(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) ); + Base::construct(expr.const_cast_derived()); + } +}; + +// this is the const ref version +template +class Ref, Options, StrideType> + : public internal::SparseRefBase, Options, StrideType> > +{ + typedef SparseMatrix TPlainObjectType; + typedef internal::traits Traits; + public: + + typedef internal::SparseRefBase Base; + EIGEN_SPARSE_PUBLIC_INTERFACE(Ref) + + template + inline Ref(const SparseMatrixBase& expr) : m_hasCopy(false) + { + construct(expr.derived(), typename Traits::template match::type()); + } + + inline Ref(const Ref& other) : Base(other), m_hasCopy(false) { + // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy + } + + template + inline Ref(const RefBase& other) : m_hasCopy(false) { + construct(other.derived(), typename Traits::template match::type()); + } + + ~Ref() { + if(m_hasCopy) { + TPlainObjectType* obj = reinterpret_cast(m_object_bytes); + obj->~TPlainObjectType(); + } + } + + protected: + + template + void construct(const Expression& expr,internal::true_type) + { + if((Options & int(StandardCompressedFormat)) && (!expr.isCompressed())) + { + TPlainObjectType* obj = reinterpret_cast(m_object_bytes); + ::new (obj) TPlainObjectType(expr); + m_hasCopy = true; + Base::construct(*obj); + } + else + { + Base::construct(expr); + } + } + + template + void construct(const Expression& expr, internal::false_type) + { + TPlainObjectType* obj = reinterpret_cast(m_object_bytes); + ::new (obj) TPlainObjectType(expr); + m_hasCopy = true; + Base::construct(*obj); + } + + protected: + char m_object_bytes[sizeof(TPlainObjectType)]; + bool m_hasCopy; +}; + + + +/** + * \ingroup SparseCore_Module + * + * \brief A sparse vector expression referencing an existing sparse vector expression + * + * \tparam SparseVectorType the equivalent sparse vector type of the referenced data, it must be a template instance of class SparseVector. + * + * \sa class Ref + */ +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +class Ref, Options, StrideType > + : public internal::SparseRefBase, Options, StrideType > > +#else +template +class Ref + : public SparseMapBase +#endif +{ + typedef SparseVector PlainObjectType; + typedef internal::traits Traits; + template + inline Ref(const SparseVector& expr); + public: + + typedef internal::SparseRefBase Base; + EIGEN_SPARSE_PUBLIC_INTERFACE(Ref) + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + inline Ref(SparseVector& expr) + { + EIGEN_STATIC_ASSERT(bool(Traits::template match >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + Base::construct(expr.derived()); + } + + template + inline Ref(const SparseCompressedBase& expr) + #else + /** Implicit constructor from any 1D sparse vector expression */ + template + inline Ref(SparseCompressedBase& expr) + #endif + { + EIGEN_STATIC_ASSERT(bool(internal::is_lvalue::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); + EIGEN_STATIC_ASSERT(bool(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + Base::construct(expr.const_cast_derived()); + } +}; + +// this is the const ref version +template +class Ref, Options, StrideType> + : public internal::SparseRefBase, Options, StrideType> > +{ + typedef SparseVector TPlainObjectType; + typedef internal::traits Traits; + public: + + typedef internal::SparseRefBase Base; + EIGEN_SPARSE_PUBLIC_INTERFACE(Ref) + + template + inline Ref(const SparseMatrixBase& expr) : m_hasCopy(false) + { + construct(expr.derived(), typename Traits::template match::type()); + } + + inline Ref(const Ref& other) : Base(other), m_hasCopy(false) { + // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy + } + + template + inline Ref(const RefBase& other) : m_hasCopy(false) { + construct(other.derived(), typename Traits::template match::type()); + } + + ~Ref() { + if(m_hasCopy) { + TPlainObjectType* obj = reinterpret_cast(m_object_bytes); + obj->~TPlainObjectType(); + } + } + + protected: + + template + void construct(const Expression& expr,internal::true_type) + { + Base::construct(expr); + } + + template + void construct(const Expression& expr, internal::false_type) + { + TPlainObjectType* obj = reinterpret_cast(m_object_bytes); + ::new (obj) TPlainObjectType(expr); + m_hasCopy = true; + Base::construct(*obj); + } + + protected: + char m_object_bytes[sizeof(TPlainObjectType)]; + bool m_hasCopy; +}; + +namespace internal { + +// FIXME shall we introduce a general evaluatior_ref that we can specialize for any sparse object once, and thus remove this copy-pasta thing... + +template +struct evaluator, Options, StrideType> > + : evaluator, Options, StrideType> > > +{ + typedef evaluator, Options, StrideType> > > Base; + typedef Ref, Options, StrideType> XprType; + evaluator() : Base() {} + explicit evaluator(const XprType &mat) : Base(mat) {} +}; + +template +struct evaluator, Options, StrideType> > + : evaluator, Options, StrideType> > > +{ + typedef evaluator, Options, StrideType> > > Base; + typedef Ref, Options, StrideType> XprType; + evaluator() : Base() {} + explicit evaluator(const XprType &mat) : Base(mat) {} +}; + +template +struct evaluator, Options, StrideType> > + : evaluator, Options, StrideType> > > +{ + typedef evaluator, Options, StrideType> > > Base; + typedef Ref, Options, StrideType> XprType; + evaluator() : Base() {} + explicit evaluator(const XprType &mat) : Base(mat) {} +}; + +template +struct evaluator, Options, StrideType> > + : evaluator, Options, StrideType> > > +{ + typedef evaluator, Options, StrideType> > > Base; + typedef Ref, Options, StrideType> XprType; + evaluator() : Base() {} + explicit evaluator(const XprType &mat) : Base(mat) {} +}; + +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_REF_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h new file mode 100644 index 000000000..a0153c4aa --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h @@ -0,0 +1,656 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H +#define EIGEN_SPARSE_SELFADJOINTVIEW_H + +namespace Eigen { + +/** \ingroup SparseCore_Module + * \class SparseSelfAdjointView + * + * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix. + * + * \param MatrixType the type of the dense matrix storing the coefficients + * \param Mode can be either \c #Lower or \c #Upper + * + * This class is an expression of a sefladjoint matrix from a triangular part of a matrix + * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView() + * and most of the time this is the only way that it is used. + * + * \sa SparseMatrixBase::selfadjointView() + */ +namespace internal { + +template +struct traits > : traits { +}; + +template +void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm = 0); + +template +void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm = 0); + +} + +template class SparseSelfAdjointView + : public EigenBase > +{ + public: + + enum { + Mode = _Mode, + TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0), + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime + }; + + typedef EigenBase Base; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Matrix VectorI; + typedef typename internal::ref_selector::non_const_type MatrixTypeNested; + typedef typename internal::remove_all::type _MatrixTypeNested; + + explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix) + { + eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices"); + } + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + /** \internal \returns a reference to the nested matrix */ + const _MatrixTypeNested& matrix() const { return m_matrix; } + typename internal::remove_reference::type& matrix() { return m_matrix; } + + /** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs. + * + * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product. + * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product. + */ + template + Product + operator*(const SparseMatrixBase& rhs) const + { + return Product(*this, rhs.derived()); + } + + /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs. + * + * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product. + * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product. + */ + template friend + Product + operator*(const SparseMatrixBase& lhs, const SparseSelfAdjointView& rhs) + { + return Product(lhs.derived(), rhs); + } + + /** Efficient sparse self-adjoint matrix times dense vector/matrix product */ + template + Product + operator*(const MatrixBase& rhs) const + { + return Product(*this, rhs.derived()); + } + + /** Efficient dense vector/matrix times sparse self-adjoint matrix product */ + template friend + Product + operator*(const MatrixBase& lhs, const SparseSelfAdjointView& rhs) + { + return Product(lhs.derived(), rhs); + } + + /** Perform a symmetric rank K update of the selfadjoint matrix \c *this: + * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix. + * + * \returns a reference to \c *this + * + * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply + * call this function with u.adjoint(). + */ + template + SparseSelfAdjointView& rankUpdate(const SparseMatrixBase& u, const Scalar& alpha = Scalar(1)); + + /** \returns an expression of P H P^-1 */ + // TODO implement twists in a more evaluator friendly fashion + SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix& perm) const + { + return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm); + } + + template + SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct& permutedMatrix) + { + internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix); + return *this; + } + + SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src) + { + PermutationMatrix pnull; + return *this = src.twistedBy(pnull); + } + + template + SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src) + { + PermutationMatrix pnull; + return *this = src.twistedBy(pnull); + } + + void resize(Index rows, Index cols) + { + EIGEN_ONLY_USED_FOR_DEBUG(rows); + EIGEN_ONLY_USED_FOR_DEBUG(cols); + eigen_assert(rows == this->rows() && cols == this->cols() + && "SparseSelfadjointView::resize() does not actually allow one to resize."); + } + + protected: + + MatrixTypeNested m_matrix; + //mutable VectorI m_countPerRow; + //mutable VectorI m_countPerCol; + private: + template void evalTo(Dest &) const; +}; + +/*************************************************************************** +* Implementation of SparseMatrixBase methods +***************************************************************************/ + +template +template +typename SparseMatrixBase::template ConstSelfAdjointViewReturnType::Type SparseMatrixBase::selfadjointView() const +{ + return SparseSelfAdjointView(derived()); +} + +template +template +typename SparseMatrixBase::template SelfAdjointViewReturnType::Type SparseMatrixBase::selfadjointView() +{ + return SparseSelfAdjointView(derived()); +} + +/*************************************************************************** +* Implementation of SparseSelfAdjointView methods +***************************************************************************/ + +template +template +SparseSelfAdjointView& +SparseSelfAdjointView::rankUpdate(const SparseMatrixBase& u, const Scalar& alpha) +{ + SparseMatrix tmp = u * u.adjoint(); + if(alpha==Scalar(0)) + m_matrix = tmp.template triangularView(); + else + m_matrix += alpha * tmp.template triangularView(); + + return *this; +} + +namespace internal { + +// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.> +// in the future selfadjoint-ness should be defined by the expression traits +// such that Transpose > is valid. (currently TriangularBase::transpose() is overloaded to make it work) +template +struct evaluator_traits > +{ + typedef typename storage_kind_to_evaluator_kind::Kind Kind; + typedef SparseSelfAdjointShape Shape; +}; + +struct SparseSelfAdjoint2Sparse {}; + +template<> struct AssignmentKind { typedef SparseSelfAdjoint2Sparse Kind; }; +template<> struct AssignmentKind { typedef Sparse2Sparse Kind; }; + +template< typename DstXprType, typename SrcXprType, typename Functor> +struct Assignment +{ + typedef typename DstXprType::StorageIndex StorageIndex; + typedef internal::assign_op AssignOpType; + + template + static void run(SparseMatrix &dst, const SrcXprType &src, const AssignOpType&/*func*/) + { + internal::permute_symm_to_fullsymm(src.matrix(), dst); + } + + // FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to: + template + static void run(SparseMatrix &dst, const SrcXprType &src, const AssignFunc& func) + { + SparseMatrix tmp(src.rows(),src.cols()); + run(tmp, src, AssignOpType()); + call_assignment_no_alias_no_transpose(dst, tmp, func); + } + + template + static void run(SparseMatrix &dst, const SrcXprType &src, + const internal::add_assign_op& /* func */) + { + SparseMatrix tmp(src.rows(),src.cols()); + run(tmp, src, AssignOpType()); + dst += tmp; + } + + template + static void run(SparseMatrix &dst, const SrcXprType &src, + const internal::sub_assign_op& /* func */) + { + SparseMatrix tmp(src.rows(),src.cols()); + run(tmp, src, AssignOpType()); + dst -= tmp; + } + + template + static void run(DynamicSparseMatrix& dst, const SrcXprType &src, const AssignOpType&/*func*/) + { + // TODO directly evaluate into dst; + SparseMatrix tmp(dst.rows(),dst.cols()); + internal::permute_symm_to_fullsymm(src.matrix(), tmp); + dst = tmp; + } +}; + +} // end namespace internal + +/*************************************************************************** +* Implementation of sparse self-adjoint time dense matrix +***************************************************************************/ + +namespace internal { + +template +inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) +{ + EIGEN_ONLY_USED_FOR_DEBUG(alpha); + + typedef typename internal::nested_eval::type SparseLhsTypeNested; + typedef typename internal::remove_all::type SparseLhsTypeNestedCleaned; + typedef evaluator LhsEval; + typedef typename LhsEval::InnerIterator LhsIterator; + typedef typename SparseLhsType::Scalar LhsScalar; + + enum { + LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit, + ProcessFirstHalf = + ((Mode&(Upper|Lower))==(Upper|Lower)) + || ( (Mode&Upper) && !LhsIsRowMajor) + || ( (Mode&Lower) && LhsIsRowMajor), + ProcessSecondHalf = !ProcessFirstHalf + }; + + SparseLhsTypeNested lhs_nested(lhs); + LhsEval lhsEval(lhs_nested); + + // work on one column at once + for (Index k=0; k::ReturnType rhs_j(alpha*rhs(j,k)); + // accumulator for partial scalar product + typename DenseResType::Scalar res_j(0); + for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i) + { + LhsScalar lhs_ij = i.value(); + if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij); + res_j += lhs_ij * rhs.coeff(i.index(),k); + res(i.index(),k) += numext::conj(lhs_ij) * rhs_j; + } + res.coeffRef(j,k) += alpha * res_j; + + // handle diagonal coeff + if (ProcessFirstHalf && i && (i.index()==j)) + res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k); + } + } +} + + +template +struct generic_product_impl +: generic_product_impl_base > +{ + template + static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha) + { + typedef typename LhsView::_MatrixTypeNested Lhs; + typedef typename nested_eval::type LhsNested; + typedef typename nested_eval::type RhsNested; + LhsNested lhsNested(lhsView.matrix()); + RhsNested rhsNested(rhs); + + internal::sparse_selfadjoint_time_dense_product(lhsNested, rhsNested, dst, alpha); + } +}; + +template +struct generic_product_impl +: generic_product_impl_base > +{ + template + static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha) + { + typedef typename RhsView::_MatrixTypeNested Rhs; + typedef typename nested_eval::type LhsNested; + typedef typename nested_eval::type RhsNested; + LhsNested lhsNested(lhs); + RhsNested rhsNested(rhsView.matrix()); + + // transpose everything + Transpose dstT(dst); + internal::sparse_selfadjoint_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha); + } +}; + +// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix +// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore + +template +struct product_evaluator, ProductTag, SparseSelfAdjointShape, SparseShape> + : public evaluator::PlainObject> +{ + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + typedef evaluator Base; + + product_evaluator(const XprType& xpr) + : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols()) + { + ::new (static_cast(this)) Base(m_result); + generic_product_impl::evalTo(m_result, m_lhs, xpr.rhs()); + } + +protected: + typename Rhs::PlainObject m_lhs; + PlainObject m_result; +}; + +template +struct product_evaluator, ProductTag, SparseShape, SparseSelfAdjointShape> + : public evaluator::PlainObject> +{ + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + typedef evaluator Base; + + product_evaluator(const XprType& xpr) + : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols()) + { + ::new (static_cast(this)) Base(m_result); + generic_product_impl::evalTo(m_result, xpr.lhs(), m_rhs); + } + +protected: + typename Lhs::PlainObject m_rhs; + PlainObject m_result; +}; + +} // namespace internal + +/*************************************************************************** +* Implementation of symmetric copies and permutations +***************************************************************************/ +namespace internal { + +template +void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm) +{ + typedef typename MatrixType::StorageIndex StorageIndex; + typedef typename MatrixType::Scalar Scalar; + typedef SparseMatrix Dest; + typedef Matrix VectorI; + typedef evaluator MatEval; + typedef typename evaluator::InnerIterator MatIterator; + + MatEval matEval(mat); + Dest& dest(_dest.derived()); + enum { + StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor) + }; + + Index size = mat.rows(); + VectorI count; + count.resize(size); + count.setZero(); + dest.resize(size,size); + for(Index j = 0; jc) || ( Mode==Upper && r(it.index()); + Index r = it.row(); + Index c = it.col(); + + StorageIndex jp = perm ? perm[j] : j; + StorageIndex ip = perm ? perm[i] : i; + + if(Mode==(Upper|Lower)) + { + Index k = count[StorageOrderMatch ? jp : ip]++; + dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp; + dest.valuePtr()[k] = it.value(); + } + else if(r==c) + { + Index k = count[ip]++; + dest.innerIndexPtr()[k] = ip; + dest.valuePtr()[k] = it.value(); + } + else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r +void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm) +{ + typedef typename MatrixType::StorageIndex StorageIndex; + typedef typename MatrixType::Scalar Scalar; + SparseMatrix& dest(_dest.derived()); + typedef Matrix VectorI; + typedef evaluator MatEval; + typedef typename evaluator::InnerIterator MatIterator; + + enum { + SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor, + StorageOrderMatch = int(SrcOrder) == int(DstOrder), + DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode, + SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode + }; + + MatEval matEval(mat); + + Index size = mat.rows(); + VectorI count(size); + count.setZero(); + dest.resize(size,size); + for(StorageIndex j = 0; jj)) + continue; + + StorageIndex ip = perm ? perm[i] : i; + count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; + } + } + dest.outerIndexPtr()[0] = 0; + for(Index j=0; jj)) + continue; + + StorageIndex jp = perm ? perm[j] : j; + StorageIndex ip = perm? perm[i] : i; + + Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; + dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp); + + if(!StorageOrderMatch) std::swap(ip,jp); + if( ((int(DstMode)==int(Lower) && ipjp))) + dest.valuePtr()[k] = numext::conj(it.value()); + else + dest.valuePtr()[k] = it.value(); + } + } +} + +} + +// TODO implement twists in a more evaluator friendly fashion + +namespace internal { + +template +struct traits > : traits { +}; + +} + +template +class SparseSymmetricPermutationProduct + : public EigenBase > +{ + public: + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::StorageIndex StorageIndex; + enum { + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime + }; + protected: + typedef PermutationMatrix Perm; + public: + typedef Matrix VectorI; + typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename internal::remove_all::type NestedExpression; + + SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm) + : m_matrix(mat), m_perm(perm) + {} + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + const NestedExpression& matrix() const { return m_matrix; } + const Perm& perm() const { return m_perm; } + + protected: + MatrixTypeNested m_matrix; + const Perm& m_perm; + +}; + +namespace internal { + +template +struct Assignment, internal::assign_op, Sparse2Sparse> +{ + typedef SparseSymmetricPermutationProduct SrcXprType; + typedef typename DstXprType::StorageIndex DstIndex; + template + static void run(SparseMatrix &dst, const SrcXprType &src, const internal::assign_op &) + { + // internal::permute_symm_to_fullsymm(m_matrix,_dest,m_perm.indices().data()); + SparseMatrix tmp; + internal::permute_symm_to_fullsymm(src.matrix(),tmp,src.perm().indices().data()); + dst = tmp; + } + + template + static void run(SparseSelfAdjointView& dst, const SrcXprType &src, const internal::assign_op &) + { + internal::permute_symm_to_symm(src.matrix(),dst.matrix(),src.perm().indices().data()); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseSolverBase.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseSolverBase.h new file mode 100644 index 000000000..b4c9a422f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseSolverBase.h @@ -0,0 +1,124 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSESOLVERBASE_H +#define EIGEN_SPARSESOLVERBASE_H + +namespace Eigen { + +namespace internal { + + /** \internal + * Helper functions to solve with a sparse right-hand-side and result. + * The rhs is decomposed into small vertical panels which are solved through dense temporaries. + */ +template +typename enable_if::type +solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest) +{ + EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + typedef typename Dest::Scalar DestScalar; + // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix. + static const Index NbColsAtOnce = 4; + Index rhsCols = rhs.cols(); + Index size = rhs.rows(); + // the temporary matrices do not need more columns than NbColsAtOnce: + Index tmpCols = (std::min)(rhsCols, NbColsAtOnce); + Eigen::Matrix tmp(size,tmpCols); + Eigen::Matrix tmpX(size,tmpCols); + for(Index k=0; k(rhsCols-k, NbColsAtOnce); + tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols); + tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols)); + dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView(); + } +} + +// Overload for vector as rhs +template +typename enable_if::type +solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest) +{ + typedef typename Dest::Scalar DestScalar; + Index size = rhs.rows(); + Eigen::Matrix rhs_dense(rhs); + Eigen::Matrix dest_dense(size); + dest_dense = dec.solve(rhs_dense); + dest = dest_dense.sparseView(); +} + +} // end namespace internal + +/** \class SparseSolverBase + * \ingroup SparseCore_Module + * \brief A base class for sparse solvers + * + * \tparam Derived the actual type of the solver. + * + */ +template +class SparseSolverBase : internal::noncopyable +{ + public: + + /** Default constructor */ + SparseSolverBase() + : m_isInitialized(false) + {} + + ~SparseSolverBase() + {} + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + /** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const Solve + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "Solver is not initialized."); + eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); + return Solve(derived(), b.derived()); + } + + /** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const Solve + solve(const SparseMatrixBase& b) const + { + eigen_assert(m_isInitialized && "Solver is not initialized."); + eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); + return Solve(derived(), b.derived()); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal default implementation of solving with a sparse rhs */ + template + void _solve_impl(const SparseMatrixBase &b, SparseMatrixBase &dest) const + { + internal::solve_sparse_through_dense_panels(derived(), b.derived(), dest.derived()); + } + #endif // EIGEN_PARSED_BY_DOXYGEN + + protected: + + mutable bool m_isInitialized; +}; + +} // end namespace Eigen + +#endif // EIGEN_SPARSESOLVERBASE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h new file mode 100644 index 000000000..88820a48f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -0,0 +1,198 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H +#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H + +namespace Eigen { + +namespace internal { + + +// perform a pseudo in-place sparse * sparse product assuming all matrices are col major +template +static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance) +{ + // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res); + + typedef typename remove_all::type::Scalar RhsScalar; + typedef typename remove_all::type::Scalar ResScalar; + typedef typename remove_all::type::StorageIndex StorageIndex; + + // make sure to call innerSize/outerSize since we fake the storage order. + Index rows = lhs.innerSize(); + Index cols = rhs.outerSize(); + //Index size = lhs.outerSize(); + eigen_assert(lhs.outerSize() == rhs.innerSize()); + + // allocate a temporary buffer + AmbiVector tempVector(rows); + + // mimics a resizeByInnerOuter: + if(ResultType::IsRowMajor) + res.resize(cols, rows); + else + res.resize(rows, cols); + + evaluator lhsEval(lhs); + evaluator rhsEval(rhs); + + // estimate the number of non zero entries + // given a rhs column containing Y non zeros, we assume that the respective Y columns + // of the lhs differs in average of one non zeros, thus the number of non zeros for + // the product of a rhs column with the lhs is X+Y where X is the average number of non zero + // per column of the lhs. + // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) + Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate(); + + res.reserve(estimated_nnz_prod); + double ratioColRes = double(estimated_nnz_prod)/(double(lhs.rows())*double(rhs.cols())); + for (Index j=0; j::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt) + { + // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index()) + tempVector.restart(); + RhsScalar x = rhsIt.value(); + for (typename evaluator::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt) + { + tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x; + } + } + res.startVec(j); + for (typename AmbiVector::Iterator it(tempVector,tolerance); it; ++it) + res.insertBackByOuterInner(j,it.index()) = it.value(); + } + res.finalize(); +} + +template::Flags&RowMajorBit, + int RhsStorageOrder = traits::Flags&RowMajorBit, + int ResStorageOrder = traits::Flags&RowMajorBit> +struct sparse_sparse_product_with_pruning_selector; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) + { + typename remove_all::type _res(res.rows(), res.cols()); + internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); + res.swap(_res); + } +}; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) + { + // we need a col-major matrix to hold the result + typedef SparseMatrix SparseTemporaryType; + SparseTemporaryType _res(res.rows(), res.cols()); + internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); + res = _res; + } +}; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) + { + // let's transpose the product to get a column x column product + typename remove_all::type _res(res.rows(), res.cols()); + internal::sparse_sparse_product_with_pruning_impl(rhs, lhs, _res, tolerance); + res.swap(_res); + } +}; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) + { + typedef SparseMatrix ColMajorMatrixLhs; + typedef SparseMatrix ColMajorMatrixRhs; + ColMajorMatrixLhs colLhs(lhs); + ColMajorMatrixRhs colRhs(rhs); + internal::sparse_sparse_product_with_pruning_impl(colLhs, colRhs, res, tolerance); + + // let's transpose the product to get a column x column product +// typedef SparseMatrix SparseTemporaryType; +// SparseTemporaryType _res(res.cols(), res.rows()); +// sparse_sparse_product_with_pruning_impl(rhs, lhs, _res); +// res = _res.transpose(); + } +}; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) + { + typedef SparseMatrix RowMajorMatrixLhs; + RowMajorMatrixLhs rowLhs(lhs); + sparse_sparse_product_with_pruning_selector(rowLhs,rhs,res,tolerance); + } +}; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) + { + typedef SparseMatrix RowMajorMatrixRhs; + RowMajorMatrixRhs rowRhs(rhs); + sparse_sparse_product_with_pruning_selector(lhs,rowRhs,res,tolerance); + } +}; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) + { + typedef SparseMatrix ColMajorMatrixRhs; + ColMajorMatrixRhs colRhs(rhs); + internal::sparse_sparse_product_with_pruning_impl(lhs, colRhs, res, tolerance); + } +}; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) + { + typedef SparseMatrix ColMajorMatrixLhs; + ColMajorMatrixLhs colLhs(lhs); + internal::sparse_sparse_product_with_pruning_impl(colLhs, rhs, res, tolerance); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseTranspose.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseTranspose.h new file mode 100644 index 000000000..3757d4c6b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseTranspose.h @@ -0,0 +1,92 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSETRANSPOSE_H +#define EIGEN_SPARSETRANSPOSE_H + +namespace Eigen { + +namespace internal { + template + class SparseTransposeImpl + : public SparseMatrixBase > + {}; + + template + class SparseTransposeImpl + : public SparseCompressedBase > + { + typedef SparseCompressedBase > Base; + public: + using Base::derived; + typedef typename Base::Scalar Scalar; + typedef typename Base::StorageIndex StorageIndex; + + inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); } + + inline const Scalar* valuePtr() const { return derived().nestedExpression().valuePtr(); } + inline const StorageIndex* innerIndexPtr() const { return derived().nestedExpression().innerIndexPtr(); } + inline const StorageIndex* outerIndexPtr() const { return derived().nestedExpression().outerIndexPtr(); } + inline const StorageIndex* innerNonZeroPtr() const { return derived().nestedExpression().innerNonZeroPtr(); } + + inline Scalar* valuePtr() { return derived().nestedExpression().valuePtr(); } + inline StorageIndex* innerIndexPtr() { return derived().nestedExpression().innerIndexPtr(); } + inline StorageIndex* outerIndexPtr() { return derived().nestedExpression().outerIndexPtr(); } + inline StorageIndex* innerNonZeroPtr() { return derived().nestedExpression().innerNonZeroPtr(); } + }; +} + +template class TransposeImpl + : public internal::SparseTransposeImpl +{ + protected: + typedef internal::SparseTransposeImpl Base; +}; + +namespace internal { + +template +struct unary_evaluator, IteratorBased> + : public evaluator_base > +{ + typedef typename evaluator::InnerIterator EvalIterator; + public: + typedef Transpose XprType; + + inline Index nonZerosEstimate() const { + return m_argImpl.nonZerosEstimate(); + } + + class InnerIterator : public EvalIterator + { + public: + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) + : EvalIterator(unaryOp.m_argImpl,outer) + {} + + Index row() const { return EvalIterator::col(); } + Index col() const { return EvalIterator::row(); } + }; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + Flags = XprType::Flags + }; + + explicit unary_evaluator(const XprType& op) :m_argImpl(op.nestedExpression()) {} + + protected: + evaluator m_argImpl; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPARSETRANSPOSE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseTriangularView.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseTriangularView.h new file mode 100644 index 000000000..9ac120266 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseTriangularView.h @@ -0,0 +1,189 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2015 Gael Guennebaud +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H +#define EIGEN_SPARSE_TRIANGULARVIEW_H + +namespace Eigen { + +/** \ingroup SparseCore_Module + * + * \brief Base class for a triangular part in a \b sparse matrix + * + * This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated. + * It extends class TriangularView with additional methods which are available for sparse expressions only. + * + * \sa class TriangularView, SparseMatrixBase::triangularView() + */ +template class TriangularViewImpl + : public SparseMatrixBase > +{ + enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit)) + || ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)), + SkipLast = !SkipFirst, + SkipDiag = (Mode&ZeroDiag) ? 1 : 0, + HasUnitDiag = (Mode&UnitDiag) ? 1 : 0 + }; + + typedef TriangularView TriangularViewType; + + protected: + // dummy solve function to make TriangularView happy. + void solve() const; + + typedef SparseMatrixBase Base; + public: + + EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType) + + typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename internal::remove_reference::type MatrixTypeNestedNonRef; + typedef typename internal::remove_all::type MatrixTypeNestedCleaned; + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const { + if(!(internal::is_same::value && internal::extract_data(dst) == internal::extract_data(rhs))) + dst = rhs; + this->solveInPlace(dst); + } + + /** Applies the inverse of \c *this to the dense vector or matrix \a other, "in-place" */ + template void solveInPlace(MatrixBase& other) const; + + /** Applies the inverse of \c *this to the sparse vector or matrix \a other, "in-place" */ + template void solveInPlace(SparseMatrixBase& other) const; + +}; + +namespace internal { + +template +struct unary_evaluator, IteratorBased> + : evaluator_base > +{ + typedef TriangularView XprType; + +protected: + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::StorageIndex StorageIndex; + typedef typename evaluator::InnerIterator EvalIterator; + + enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit)) + || ((Mode&Upper) && (ArgType::Flags&RowMajorBit)), + SkipLast = !SkipFirst, + SkipDiag = (Mode&ZeroDiag) ? 1 : 0, + HasUnitDiag = (Mode&UnitDiag) ? 1 : 0 + }; + +public: + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + Flags = XprType::Flags + }; + + explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()), m_arg(xpr.nestedExpression()) {} + + inline Index nonZerosEstimate() const { + return m_argImpl.nonZerosEstimate(); + } + + class InnerIterator : public EvalIterator + { + typedef EvalIterator Base; + public: + + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer) + : Base(xprEval.m_argImpl,outer), m_returnOne(false), m_containsDiag(Base::outer()index()<=outer : this->index()=Base::outer())) + { + if((!SkipFirst) && Base::operator bool()) + Base::operator++(); + m_returnOne = m_containsDiag; + } + } + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { + if(HasUnitDiag && m_returnOne) + m_returnOne = false; + else + { + Base::operator++(); + if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer())) + { + if((!SkipFirst) && Base::operator bool()) + Base::operator++(); + m_returnOne = m_containsDiag; + } + } + return *this; + } + + EIGEN_STRONG_INLINE operator bool() const + { + if(HasUnitDiag && m_returnOne) + return true; + if(SkipFirst) return Base::operator bool(); + else + { + if (SkipDiag) return (Base::operator bool() && this->index() < this->outer()); + else return (Base::operator bool() && this->index() <= this->outer()); + } + } + +// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); } +// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); } + inline StorageIndex index() const + { + if(HasUnitDiag && m_returnOne) return internal::convert_index(Base::outer()); + else return Base::index(); + } + inline Scalar value() const + { + if(HasUnitDiag && m_returnOne) return Scalar(1); + else return Base::value(); + } + + protected: + bool m_returnOne; + bool m_containsDiag; + private: + Scalar& valueRef(); + }; + +protected: + evaluator m_argImpl; + const ArgType& m_arg; +}; + +} // end namespace internal + +template +template +inline const TriangularView +SparseMatrixBase::triangularView() const +{ + return TriangularView(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_TRIANGULARVIEW_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseUtil.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseUtil.h new file mode 100644 index 000000000..74df0d496 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseUtil.h @@ -0,0 +1,178 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEUTIL_H +#define EIGEN_SPARSEUTIL_H + +namespace Eigen { + +#ifdef NDEBUG +#define EIGEN_DBG_SPARSE(X) +#else +#define EIGEN_DBG_SPARSE(X) X +#endif + +#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \ +template \ +EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase& other) \ +{ \ + return Base::operator Op(other.derived()); \ +} \ +EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \ +{ \ + return Base::operator Op(other); \ +} + +#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \ +template \ +EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \ +{ \ + return Base::operator Op(scalar); \ +} + +#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ +EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) + + +#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \ + EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) + + +const int CoherentAccessPattern = 0x1; +const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern; +const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern; +const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern; + +template class SparseMatrix; +template class DynamicSparseMatrix; +template class SparseVector; +template class MappedSparseMatrix; + +template class SparseSelfAdjointView; +template class SparseDiagonalProduct; +template class SparseView; + +template class SparseSparseProduct; +template class SparseTimeDenseProduct; +template class DenseTimeSparseProduct; +template class SparseDenseOuterProduct; + +template struct SparseSparseProductReturnType; +template::ColsAtCompileTime,internal::traits::RowsAtCompileTime)> struct DenseSparseProductReturnType; + +template::ColsAtCompileTime,internal::traits::RowsAtCompileTime)> struct SparseDenseProductReturnType; +template class SparseSymmetricPermutationProduct; + +namespace internal { + +template struct sparse_eval; + +template struct eval + : sparse_eval::RowsAtCompileTime,traits::ColsAtCompileTime,traits::Flags> +{}; + +template struct sparse_eval { + typedef typename traits::Scalar _Scalar; + typedef typename traits::StorageIndex _StorageIndex; + public: + typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type; +}; + +template struct sparse_eval { + typedef typename traits::Scalar _Scalar; + typedef typename traits::StorageIndex _StorageIndex; + public: + typedef SparseVector<_Scalar, ColMajor, _StorageIndex> type; +}; + +// TODO this seems almost identical to plain_matrix_type +template struct sparse_eval { + typedef typename traits::Scalar _Scalar; + typedef typename traits::StorageIndex _StorageIndex; + enum { _Options = ((Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; + public: + typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type; +}; + +template struct sparse_eval { + typedef typename traits::Scalar _Scalar; + public: + typedef Matrix<_Scalar, 1, 1> type; +}; + +template struct plain_matrix_type +{ + typedef typename traits::Scalar _Scalar; + typedef typename traits::StorageIndex _StorageIndex; + enum { _Options = ((evaluator::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; + public: + typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type; +}; + +template +struct plain_object_eval + : sparse_eval::RowsAtCompileTime,traits::ColsAtCompileTime, evaluator::Flags> +{}; + +template +struct solve_traits +{ + typedef typename sparse_eval::Flags>::type PlainObject; +}; + +template +struct generic_xpr_base +{ + typedef SparseMatrixBase type; +}; + +struct SparseTriangularShape { static std::string debugName() { return "SparseTriangularShape"; } }; +struct SparseSelfAdjointShape { static std::string debugName() { return "SparseSelfAdjointShape"; } }; + +template<> struct glue_shapes { typedef SparseSelfAdjointShape type; }; +template<> struct glue_shapes { typedef SparseTriangularShape type; }; + +} // end namespace internal + +/** \ingroup SparseCore_Module + * + * \class Triplet + * + * \brief A small structure to hold a non zero as a triplet (i,j,value). + * + * \sa SparseMatrix::setFromTriplets() + */ +template::StorageIndex > +class Triplet +{ +public: + Triplet() : m_row(0), m_col(0), m_value(0) {} + + Triplet(const StorageIndex& i, const StorageIndex& j, const Scalar& v = Scalar(0)) + : m_row(i), m_col(j), m_value(v) + {} + + /** \returns the row index of the element */ + const StorageIndex& row() const { return m_row; } + + /** \returns the column index of the element */ + const StorageIndex& col() const { return m_col; } + + /** \returns the value of the element */ + const Scalar& value() const { return m_value; } +protected: + StorageIndex m_row, m_col; + Scalar m_value; +}; + +} // end namespace Eigen + +#endif // EIGEN_SPARSEUTIL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseVector.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseVector.h new file mode 100644 index 000000000..19b0fbc9d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseVector.h @@ -0,0 +1,478 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEVECTOR_H +#define EIGEN_SPARSEVECTOR_H + +namespace Eigen { + +/** \ingroup SparseCore_Module + * \class SparseVector + * + * \brief a sparse vector class + * + * \tparam _Scalar the scalar type, i.e. the type of the coefficients + * + * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN. + */ + +namespace internal { +template +struct traits > +{ + typedef _Scalar Scalar; + typedef _StorageIndex StorageIndex; + typedef Sparse StorageKind; + typedef MatrixXpr XprKind; + enum { + IsColVector = (_Options & RowMajorBit) ? 0 : 1, + + RowsAtCompileTime = IsColVector ? Dynamic : 1, + ColsAtCompileTime = IsColVector ? 1 : Dynamic, + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit, + SupportedAccessPatterns = InnerRandomAccessPattern + }; +}; + +// Sparse-Vector-Assignment kinds: +enum { + SVA_RuntimeSwitch, + SVA_Inner, + SVA_Outer +}; + +template< typename Dest, typename Src, + int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch + : Src::InnerSizeAtCompileTime==1 ? SVA_Outer + : SVA_Inner> +struct sparse_vector_assign_selector; + +} + +template +class SparseVector + : public SparseCompressedBase > +{ + typedef SparseCompressedBase Base; + using Base::convert_index; + public: + EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector) + EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=) + EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=) + + typedef internal::CompressedStorage Storage; + enum { IsColVector = internal::traits::IsColVector }; + + enum { + Options = _Options + }; + + EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; } + EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } + EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } + EIGEN_STRONG_INLINE Index outerSize() const { return 1; } + + EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); } + EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); } + + EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); } + EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); } + + inline const StorageIndex* outerIndexPtr() const { return 0; } + inline StorageIndex* outerIndexPtr() { return 0; } + inline const StorageIndex* innerNonZeroPtr() const { return 0; } + inline StorageIndex* innerNonZeroPtr() { return 0; } + + /** \internal */ + inline Storage& data() { return m_data; } + /** \internal */ + inline const Storage& data() const { return m_data; } + + inline Scalar coeff(Index row, Index col) const + { + eigen_assert(IsColVector ? (col==0 && row>=0 && row=0 && col=0 && i=0 && row=0 && col=0 && i=0 && row=0 && col=0 && i= startId) && (m_data.index(p) > i) ) + { + m_data.index(p+1) = m_data.index(p); + m_data.value(p+1) = m_data.value(p); + --p; + } + m_data.index(p+1) = convert_index(i); + m_data.value(p+1) = 0; + return m_data.value(p+1); + } + + /** + */ + inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); } + + + inline void finalize() {} + + /** \copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */ + void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits::dummy_precision()) + { + m_data.prune(reference,epsilon); + } + + /** Resizes the sparse vector to \a rows x \a cols + * + * This method is provided for compatibility with matrices. + * For a column vector, \a cols must be equal to 1. + * For a row vector, \a rows must be equal to 1. + * + * \sa resize(Index) + */ + void resize(Index rows, Index cols) + { + eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1"); + resize(IsColVector ? rows : cols); + } + + /** Resizes the sparse vector to \a newSize + * This method deletes all entries, thus leaving an empty sparse vector + * + * \sa conservativeResize(), setZero() */ + void resize(Index newSize) + { + m_size = newSize; + m_data.clear(); + } + + /** Resizes the sparse vector to \a newSize, while leaving old values untouched. + * + * If the size of the vector is decreased, then the storage of the out-of bounds coefficients is kept and reserved. + * Call .data().squeeze() to free extra memory. + * + * \sa reserve(), setZero() + */ + void conservativeResize(Index newSize) + { + if (newSize < m_size) + { + Index i = 0; + while (i + inline SparseVector(const SparseMatrixBase& other) + : m_size(0) + { + #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + #endif + check_template_parameters(); + *this = other.derived(); + } + + inline SparseVector(const SparseVector& other) + : Base(other), m_size(0) + { + check_template_parameters(); + *this = other.derived(); + } + + /** Swaps the values of \c *this and \a other. + * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only. + * \sa SparseMatrixBase::swap() + */ + inline void swap(SparseVector& other) + { + std::swap(m_size, other.m_size); + m_data.swap(other.m_data); + } + + template + inline void swap(SparseMatrix& other) + { + eigen_assert(other.outerSize()==1); + std::swap(m_size, other.m_innerSize); + m_data.swap(other.m_data); + } + + inline SparseVector& operator=(const SparseVector& other) + { + if (other.isRValue()) + { + swap(other.const_cast_derived()); + } + else + { + resize(other.size()); + m_data = other.m_data; + } + return *this; + } + + template + inline SparseVector& operator=(const SparseMatrixBase& other) + { + SparseVector tmp(other.size()); + internal::sparse_vector_assign_selector::run(tmp,other.derived()); + this->swap(tmp); + return *this; + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + inline SparseVector& operator=(const SparseSparseProduct& product) + { + return Base::operator=(product); + } + #endif + + friend std::ostream & operator << (std::ostream & s, const SparseVector& m) + { + for (Index i=0; i::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); + EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); + } + + Storage m_data; + Index m_size; +}; + +namespace internal { + +template +struct evaluator > + : evaluator_base > +{ + typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType; + typedef evaluator_base Base; + typedef typename SparseVectorType::InnerIterator InnerIterator; + typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator; + + enum { + CoeffReadCost = NumTraits<_Scalar>::ReadCost, + Flags = SparseVectorType::Flags + }; + + evaluator() : Base() {} + + explicit evaluator(const SparseVectorType &mat) : m_matrix(&mat) + { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + inline Index nonZerosEstimate() const { + return m_matrix->nonZeros(); + } + + operator SparseVectorType&() { return m_matrix->const_cast_derived(); } + operator const SparseVectorType&() const { return *m_matrix; } + + const SparseVectorType *m_matrix; +}; + +template< typename Dest, typename Src> +struct sparse_vector_assign_selector { + static void run(Dest& dst, const Src& src) { + eigen_internal_assert(src.innerSize()==src.size()); + typedef internal::evaluator SrcEvaluatorType; + SrcEvaluatorType srcEval(src); + for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it) + dst.insert(it.index()) = it.value(); + } +}; + +template< typename Dest, typename Src> +struct sparse_vector_assign_selector { + static void run(Dest& dst, const Src& src) { + eigen_internal_assert(src.outerSize()==src.size()); + typedef internal::evaluator SrcEvaluatorType; + SrcEvaluatorType srcEval(src); + for(Index i=0; i +struct sparse_vector_assign_selector { + static void run(Dest& dst, const Src& src) { + if(src.outerSize()==1) sparse_vector_assign_selector::run(dst, src); + else sparse_vector_assign_selector::run(dst, src); + } +}; + +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSEVECTOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseView.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseView.h new file mode 100644 index 000000000..7c4aea743 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/SparseView.h @@ -0,0 +1,253 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2014 Gael Guennebaud +// Copyright (C) 2010 Daniel Lowengrub +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEVIEW_H +#define EIGEN_SPARSEVIEW_H + +namespace Eigen { + +namespace internal { + +template +struct traits > : traits +{ + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Sparse StorageKind; + enum { + Flags = int(traits::Flags) & (RowMajorBit) + }; +}; + +} // end namespace internal + +/** \ingroup SparseCore_Module + * \class SparseView + * + * \brief Expression of a dense or sparse matrix with zero or too small values removed + * + * \tparam MatrixType the type of the object of which we are removing the small entries + * + * This class represents an expression of a given dense or sparse matrix with + * entries smaller than \c reference * \c epsilon are removed. + * It is the return type of MatrixBase::sparseView() and SparseMatrixBase::pruned() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::sparseView(), SparseMatrixBase::pruned() + */ +template +class SparseView : public SparseMatrixBase > +{ + typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename internal::remove_all::type _MatrixTypeNested; + typedef SparseMatrixBase Base; +public: + EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView) + typedef typename internal::remove_all::type NestedExpression; + + explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0), + const RealScalar &epsilon = NumTraits::dummy_precision()) + : m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {} + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + inline Index innerSize() const { return m_matrix.innerSize(); } + inline Index outerSize() const { return m_matrix.outerSize(); } + + /** \returns the nested expression */ + const typename internal::remove_all::type& + nestedExpression() const { return m_matrix; } + + Scalar reference() const { return m_reference; } + RealScalar epsilon() const { return m_epsilon; } + +protected: + MatrixTypeNested m_matrix; + Scalar m_reference; + RealScalar m_epsilon; +}; + +namespace internal { + +// TODO find a way to unify the two following variants +// This is tricky because implementing an inner iterator on top of an IndexBased evaluator is +// not easy because the evaluators do not expose the sizes of the underlying expression. + +template +struct unary_evaluator, IteratorBased> + : public evaluator_base > +{ + typedef typename evaluator::InnerIterator EvalIterator; + public: + typedef SparseView XprType; + + class InnerIterator : public EvalIterator + { + typedef typename XprType::Scalar Scalar; + public: + + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer) + : EvalIterator(sve.m_argImpl,outer), m_view(sve.m_view) + { + incrementToNonZero(); + } + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { + EvalIterator::operator++(); + incrementToNonZero(); + return *this; + } + + using EvalIterator::value; + + protected: + const XprType &m_view; + + private: + void incrementToNonZero() + { + while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon())) + { + EvalIterator::operator++(); + } + } + }; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + Flags = XprType::Flags + }; + + explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {} + + protected: + evaluator m_argImpl; + const XprType &m_view; +}; + +template +struct unary_evaluator, IndexBased> + : public evaluator_base > +{ + public: + typedef SparseView XprType; + protected: + enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::StorageIndex StorageIndex; + public: + + class InnerIterator + { + public: + + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer) + : m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize()) + { + incrementToNonZero(); + } + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { + m_inner++; + incrementToNonZero(); + return *this; + } + + EIGEN_STRONG_INLINE Scalar value() const + { + return (IsRowMajor) ? m_sve.m_argImpl.coeff(m_outer, m_inner) + : m_sve.m_argImpl.coeff(m_inner, m_outer); + } + + EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } + + EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } + + protected: + const unary_evaluator &m_sve; + Index m_inner; + const Index m_outer; + const Index m_end; + + private: + void incrementToNonZero() + { + while((bool(*this)) && internal::isMuchSmallerThan(value(), m_sve.m_view.reference(), m_sve.m_view.epsilon())) + { + m_inner++; + } + } + }; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + Flags = XprType::Flags + }; + + explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {} + + protected: + evaluator m_argImpl; + const XprType &m_view; +}; + +} // end namespace internal + +/** \ingroup SparseCore_Module + * + * \returns a sparse expression of the dense expression \c *this with values smaller than + * \a reference * \a epsilon removed. + * + * This method is typically used when prototyping to convert a quickly assembled dense Matrix \c D to a SparseMatrix \c S: + * \code + * MatrixXd D(n,m); + * SparseMatrix S; + * S = D.sparseView(); // suppress numerical zeros (exact) + * S = D.sparseView(reference); + * S = D.sparseView(reference,epsilon); + * \endcode + * where \a reference is a meaningful non zero reference value, + * and \a epsilon is a tolerance factor defaulting to NumTraits::dummy_precision(). + * + * \sa SparseMatrixBase::pruned(), class SparseView */ +template +const SparseView MatrixBase::sparseView(const Scalar& reference, + const typename NumTraits::Real& epsilon) const +{ + return SparseView(derived(), reference, epsilon); +} + +/** \returns an expression of \c *this with values smaller than + * \a reference * \a epsilon removed. + * + * This method is typically used in conjunction with the product of two sparse matrices + * to automatically prune the smallest values as follows: + * \code + * C = (A*B).pruned(); // suppress numerical zeros (exact) + * C = (A*B).pruned(ref); + * C = (A*B).pruned(ref,epsilon); + * \endcode + * where \c ref is a meaningful non zero reference value. + * */ +template +const SparseView +SparseMatrixBase::pruned(const Scalar& reference, + const RealScalar& epsilon) const +{ + return SparseView(derived(), reference, epsilon); +} + +} // end namespace Eigen + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/TriangularSolver.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/TriangularSolver.h new file mode 100644 index 000000000..f9c56ba79 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseCore/TriangularSolver.h @@ -0,0 +1,315 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSETRIANGULARSOLVER_H +#define EIGEN_SPARSETRIANGULARSOLVER_H + +namespace Eigen { + +namespace internal { + +template::Flags) & RowMajorBit> +struct sparse_solve_triangular_selector; + +// forward substitution, row-major +template +struct sparse_solve_triangular_selector +{ + typedef typename Rhs::Scalar Scalar; + typedef evaluator LhsEval; + typedef typename evaluator::InnerIterator LhsIterator; + static void run(const Lhs& lhs, Rhs& other) + { + LhsEval lhsEval(lhs); + for(Index col=0 ; col +struct sparse_solve_triangular_selector +{ + typedef typename Rhs::Scalar Scalar; + typedef evaluator LhsEval; + typedef typename evaluator::InnerIterator LhsIterator; + static void run(const Lhs& lhs, Rhs& other) + { + LhsEval lhsEval(lhs); + for(Index col=0 ; col=0 ; --i) + { + Scalar tmp = other.coeff(i,col); + Scalar l_ii(0); + LhsIterator it(lhsEval, i); + while(it && it.index() +struct sparse_solve_triangular_selector +{ + typedef typename Rhs::Scalar Scalar; + typedef evaluator LhsEval; + typedef typename evaluator::InnerIterator LhsIterator; + static void run(const Lhs& lhs, Rhs& other) + { + LhsEval lhsEval(lhs); + for(Index col=0 ; col +struct sparse_solve_triangular_selector +{ + typedef typename Rhs::Scalar Scalar; + typedef evaluator LhsEval; + typedef typename evaluator::InnerIterator LhsIterator; + static void run(const Lhs& lhs, Rhs& other) + { + LhsEval lhsEval(lhs); + for(Index col=0 ; col=0; --i) + { + Scalar& tmp = other.coeffRef(i,col); + if (tmp!=Scalar(0)) // optimization when other is actually sparse + { + if(!(Mode & UnitDiag)) + { + // TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements + LhsIterator it(lhsEval, i); + while(it && it.index()!=i) + ++it; + eigen_assert(it && it.index()==i); + other.coeffRef(i,col) /= it.value(); + } + LhsIterator it(lhsEval, i); + for(; it && it.index() +template +void TriangularViewImpl::solveInPlace(MatrixBase& other) const +{ + eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows()); + eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); + + enum { copy = internal::traits::Flags & RowMajorBit }; + + typedef typename internal::conditional::type, OtherDerived&>::type OtherCopy; + OtherCopy otherCopy(other.derived()); + + internal::sparse_solve_triangular_selector::type, Mode>::run(derived().nestedExpression(), otherCopy); + + if (copy) + other = otherCopy; +} +#endif + +// pure sparse path + +namespace internal { + +template +struct sparse_solve_triangular_sparse_selector; + +// forward substitution, col-major +template +struct sparse_solve_triangular_sparse_selector +{ + typedef typename Rhs::Scalar Scalar; + typedef typename promote_index_type::StorageIndex, + typename traits::StorageIndex>::type StorageIndex; + static void run(const Lhs& lhs, Rhs& other) + { + const bool IsLower = (UpLo==Lower); + AmbiVector tempVector(other.rows()*2); + tempVector.setBounds(0,other.rows()); + + Rhs res(other.rows(), other.cols()); + res.reserve(other.nonZeros()); + + for(Index col=0 ; col=0; + i+=IsLower?1:-1) + { + tempVector.restart(); + Scalar& ci = tempVector.coeffRef(i); + if (ci!=Scalar(0)) + { + // find + typename Lhs::InnerIterator it(lhs, i); + if(!(Mode & UnitDiag)) + { + if (IsLower) + { + eigen_assert(it.index()==i); + ci /= it.value(); + } + else + ci /= lhs.coeff(i,i); + } + tempVector.restart(); + if (IsLower) + { + if (it.index()==i) + ++it; + for(; it; ++it) + tempVector.coeffRef(it.index()) -= ci * it.value(); + } + else + { + for(; it && it.index()::Iterator it(tempVector/*,1e-12*/); it; ++it) + { + ++ count; +// std::cerr << "fill " << it.index() << ", " << col << "\n"; +// std::cout << it.value() << " "; + // FIXME use insertBack + res.insert(it.index(), col) = it.value(); + } +// std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n"; + } + res.finalize(); + other = res.markAsRValue(); + } +}; + +} // end namespace internal + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void TriangularViewImpl::solveInPlace(SparseMatrixBase& other) const +{ + eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows()); + eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); + +// enum { copy = internal::traits::Flags & RowMajorBit }; + +// typedef typename internal::conditional::type, OtherDerived&>::type OtherCopy; +// OtherCopy otherCopy(other.derived()); + + internal::sparse_solve_triangular_sparse_selector::run(derived().nestedExpression(), other.derived()); + +// if (copy) +// other = otherCopy; +} +#endif + +} // end namespace Eigen + +#endif // EIGEN_SPARSETRIANGULARSOLVER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU.h new file mode 100644 index 000000000..7104831c0 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU.h @@ -0,0 +1,773 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// Copyright (C) 2012-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_SPARSE_LU_H +#define EIGEN_SPARSE_LU_H + +namespace Eigen { + +template > class SparseLU; +template struct SparseLUMatrixLReturnType; +template struct SparseLUMatrixUReturnType; + +/** \ingroup SparseLU_Module + * \class SparseLU + * + * \brief Sparse supernodal LU factorization for general matrices + * + * This class implements the supernodal LU factorization for general matrices. + * It uses the main techniques from the sequential SuperLU package + * (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real + * and complex arithmetics with single and double precision, depending on the + * scalar type of your input matrix. + * The code has been optimized to provide BLAS-3 operations during supernode-panel updates. + * It benefits directly from the built-in high-performant Eigen BLAS routines. + * Moreover, when the size of a supernode is very small, the BLAS calls are avoided to + * enable a better optimization from the compiler. For best performance, + * you should compile it with NDEBUG flag to avoid the numerous bounds checking on vectors. + * + * An important parameter of this class is the ordering method. It is used to reorder the columns + * (and eventually the rows) of the matrix to reduce the number of new elements that are created during + * numerical factorization. The cheapest method available is COLAMD. + * See \link OrderingMethods_Module the OrderingMethods module \endlink for the list of + * built-in and external ordering methods. + * + * Simple example with key steps + * \code + * VectorXd x(n), b(n); + * SparseMatrix A; + * SparseLU, COLAMDOrdering > solver; + * // fill A and b; + * // Compute the ordering permutation vector from the structural pattern of A + * solver.analyzePattern(A); + * // Compute the numerical factorization + * solver.factorize(A); + * //Use the factors to solve the linear system + * x = solver.solve(b); + * \endcode + * + * \warning The input matrix A should be in a \b compressed and \b column-major form. + * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix. + * + * \note Unlike the initial SuperLU implementation, there is no step to equilibrate the matrix. + * For badly scaled matrices, this step can be useful to reduce the pivoting during factorization. + * If this is the case for your matrices, you can try the basic scaling method at + * "unsupported/Eigen/src/IterativeSolvers/Scaling.h" + * + * \tparam _MatrixType The type of the sparse matrix. It must be a column-major SparseMatrix<> + * \tparam _OrderingType The ordering method to use, either AMD, COLAMD or METIS. Default is COLMAD + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept + * \sa \ref OrderingMethods_Module + */ +template +class SparseLU : public SparseSolverBase >, public internal::SparseLUImpl +{ + protected: + typedef SparseSolverBase > APIBase; + using APIBase::m_isInitialized; + public: + using APIBase::_solve_impl; + + typedef _MatrixType MatrixType; + typedef _OrderingType OrderingType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix NCMatrix; + typedef internal::MappedSuperNodalMatrix SCMatrix; + typedef Matrix ScalarVector; + typedef Matrix IndexVector; + typedef PermutationMatrix PermutationType; + typedef internal::SparseLUImpl Base; + + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + public: + SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1) + { + initperfvalues(); + } + explicit SparseLU(const MatrixType& matrix) + : m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1) + { + initperfvalues(); + compute(matrix); + } + + ~SparseLU() + { + // Free all explicit dynamic pointers + } + + void analyzePattern (const MatrixType& matrix); + void factorize (const MatrixType& matrix); + void simplicialfactorize(const MatrixType& matrix); + + /** + * Compute the symbolic and numeric factorization of the input sparse matrix. + * The input matrix should be in column-major storage. + */ + void compute (const MatrixType& matrix) + { + // Analyze + analyzePattern(matrix); + //Factorize + factorize(matrix); + } + + inline Index rows() const { return m_mat.rows(); } + inline Index cols() const { return m_mat.cols(); } + /** Indicate that the pattern of the input matrix is symmetric */ + void isSymmetric(bool sym) + { + m_symmetricmode = sym; + } + + /** \returns an expression of the matrix L, internally stored as supernodes + * The only operation available with this expression is the triangular solve + * \code + * y = b; matrixL().solveInPlace(y); + * \endcode + */ + SparseLUMatrixLReturnType matrixL() const + { + return SparseLUMatrixLReturnType(m_Lstore); + } + /** \returns an expression of the matrix U, + * The only operation available with this expression is the triangular solve + * \code + * y = b; matrixU().solveInPlace(y); + * \endcode + */ + SparseLUMatrixUReturnType > matrixU() const + { + return SparseLUMatrixUReturnType >(m_Lstore, m_Ustore); + } + + /** + * \returns a reference to the row matrix permutation \f$ P_r \f$ such that \f$P_r A P_c^T = L U\f$ + * \sa colsPermutation() + */ + inline const PermutationType& rowsPermutation() const + { + return m_perm_r; + } + /** + * \returns a reference to the column matrix permutation\f$ P_c^T \f$ such that \f$P_r A P_c^T = L U\f$ + * \sa rowsPermutation() + */ + inline const PermutationType& colsPermutation() const + { + return m_perm_c; + } + /** Set the threshold used for a diagonal entry to be an acceptable pivot. */ + void setPivotThreshold(const RealScalar& thresh) + { + m_diagpivotthresh = thresh; + } + +#ifdef EIGEN_PARSED_BY_DOXYGEN + /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. + * + * \warning the destination matrix X in X = this->solve(B) must be colmun-major. + * + * \sa compute() + */ + template + inline const Solve solve(const MatrixBase& B) const; +#endif // EIGEN_PARSED_BY_DOXYGEN + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance + * \c InvalidInput if the input matrix is invalid + * + * \sa iparm() + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + /** + * \returns A string describing the type of error + */ + std::string lastErrorMessage() const + { + return m_lastError; + } + + template + bool _solve_impl(const MatrixBase &B, MatrixBase &X_base) const + { + Dest& X(X_base.derived()); + eigen_assert(m_factorizationIsOk && "The matrix should be factorized first"); + EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0, + THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + + // Permute the right hand side to form X = Pr*B + // on return, X is overwritten by the computed solution + X.resize(B.rows(),B.cols()); + + // this ugly const_cast_derived() helps to detect aliasing when applying the permutations + for(Index j = 0; j < B.cols(); ++j) + X.col(j) = rowsPermutation() * B.const_cast_derived().col(j); + + //Forward substitution with L + this->matrixL().solveInPlace(X); + this->matrixU().solveInPlace(X); + + // Permute back the solution + for (Index j = 0; j < B.cols(); ++j) + X.col(j) = colsPermutation().inverse() * X.col(j); + + return true; + } + + /** + * \returns the absolute value of the determinant of the matrix of which + * *this is the QR decomposition. + * + * \warning a determinant can be very big or small, so for matrices + * of large enough dimension, there is a risk of overflow/underflow. + * One way to work around that is to use logAbsDeterminant() instead. + * + * \sa logAbsDeterminant(), signDeterminant() + */ + Scalar absDeterminant() + { + using std::abs; + eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); + // Initialize with the determinant of the row matrix + Scalar det = Scalar(1.); + // Note that the diagonal blocks of U are stored in supernodes, + // which are available in the L part :) + for (Index j = 0; j < this->cols(); ++j) + { + for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) + { + if(it.index() == j) + { + det *= abs(it.value()); + break; + } + } + } + return det; + } + + /** \returns the natural log of the absolute value of the determinant of the matrix + * of which **this is the QR decomposition + * + * \note This method is useful to work around the risk of overflow/underflow that's + * inherent to the determinant computation. + * + * \sa absDeterminant(), signDeterminant() + */ + Scalar logAbsDeterminant() const + { + using std::log; + using std::abs; + + eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); + Scalar det = Scalar(0.); + for (Index j = 0; j < this->cols(); ++j) + { + for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) + { + if(it.row() < j) continue; + if(it.row() == j) + { + det += log(abs(it.value())); + break; + } + } + } + return det; + } + + /** \returns A number representing the sign of the determinant + * + * \sa absDeterminant(), logAbsDeterminant() + */ + Scalar signDeterminant() + { + eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); + // Initialize with the determinant of the row matrix + Index det = 1; + // Note that the diagonal blocks of U are stored in supernodes, + // which are available in the L part :) + for (Index j = 0; j < this->cols(); ++j) + { + for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) + { + if(it.index() == j) + { + if(it.value()<0) + det = -det; + else if(it.value()==0) + return 0; + break; + } + } + } + return det * m_detPermR * m_detPermC; + } + + /** \returns The determinant of the matrix. + * + * \sa absDeterminant(), logAbsDeterminant() + */ + Scalar determinant() + { + eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); + // Initialize with the determinant of the row matrix + Scalar det = Scalar(1.); + // Note that the diagonal blocks of U are stored in supernodes, + // which are available in the L part :) + for (Index j = 0; j < this->cols(); ++j) + { + for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) + { + if(it.index() == j) + { + det *= it.value(); + break; + } + } + } + return (m_detPermR * m_detPermC) > 0 ? det : -det; + } + + protected: + // Functions + void initperfvalues() + { + m_perfv.panel_size = 16; + m_perfv.relax = 1; + m_perfv.maxsuper = 128; + m_perfv.rowblk = 16; + m_perfv.colblk = 8; + m_perfv.fillfactor = 20; + } + + // Variables + mutable ComputationInfo m_info; + bool m_factorizationIsOk; + bool m_analysisIsOk; + std::string m_lastError; + NCMatrix m_mat; // The input (permuted ) matrix + SCMatrix m_Lstore; // The lower triangular matrix (supernodal) + MappedSparseMatrix m_Ustore; // The upper triangular matrix + PermutationType m_perm_c; // Column permutation + PermutationType m_perm_r ; // Row permutation + IndexVector m_etree; // Column elimination tree + + typename Base::GlobalLU_t m_glu; + + // SparseLU options + bool m_symmetricmode; + // values for performance + internal::perfvalues m_perfv; + RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot + Index m_nnzL, m_nnzU; // Nonzeros in L and U factors + Index m_detPermR, m_detPermC; // Determinants of the permutation matrices + private: + // Disable copy constructor + SparseLU (const SparseLU& ); + +}; // End class SparseLU + + + +// Functions needed by the anaysis phase +/** + * Compute the column permutation to minimize the fill-in + * + * - Apply this permutation to the input matrix - + * + * - Compute the column elimination tree on the permuted matrix + * + * - Postorder the elimination tree and the column permutation + * + */ +template +void SparseLU::analyzePattern(const MatrixType& mat) +{ + + //TODO It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat. + + // Firstly, copy the whole input matrix. + m_mat = mat; + + // Compute fill-in ordering + OrderingType ord; + ord(m_mat,m_perm_c); + + // Apply the permutation to the column of the input matrix + if (m_perm_c.size()) + { + m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used. + // Then, permute only the column pointers + ei_declare_aligned_stack_constructed_variable(StorageIndex,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast(mat.outerIndexPtr()):0); + + // If the input matrix 'mat' is uncompressed, then the outer-indices do not match the ones of m_mat, and a copy is thus needed. + if(!mat.isCompressed()) + IndexVector::Map(outerIndexPtr, mat.cols()+1) = IndexVector::Map(m_mat.outerIndexPtr(),mat.cols()+1); + + // Apply the permutation and compute the nnz per column. + for (Index i = 0; i < mat.cols(); i++) + { + m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i]; + m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i]; + } + } + + // Compute the column elimination tree of the permuted matrix + IndexVector firstRowElt; + internal::coletree(m_mat, m_etree,firstRowElt); + + // In symmetric mode, do not do postorder here + if (!m_symmetricmode) { + IndexVector post, iwork; + // Post order etree + internal::treePostorder(StorageIndex(m_mat.cols()), m_etree, post); + + + // Renumber etree in postorder + Index m = m_mat.cols(); + iwork.resize(m+1); + for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i)); + m_etree = iwork; + + // Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree + PermutationType post_perm(m); + for (Index i = 0; i < m; i++) + post_perm.indices()(i) = post(i); + + // Combine the two permutations : postorder the permutation for future use + if(m_perm_c.size()) { + m_perm_c = post_perm * m_perm_c; + } + + } // end postordering + + m_analysisIsOk = true; +} + +// Functions needed by the numerical factorization phase + + +/** + * - Numerical factorization + * - Interleaved with the symbolic factorization + * On exit, info is + * + * = 0: successful factorization + * + * > 0: if info = i, and i is + * + * <= A->ncol: U(i,i) is exactly zero. The factorization has + * been completed, but the factor U is exactly singular, + * and division by zero will occur if it is used to solve a + * system of equations. + * + * > A->ncol: number of bytes allocated when memory allocation + * failure occurred, plus A->ncol. If lwork = -1, it is + * the estimated amount of space needed, plus A->ncol. + */ +template +void SparseLU::factorize(const MatrixType& matrix) +{ + using internal::emptyIdxLU; + eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); + eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices"); + + m_isInitialized = true; + + + // Apply the column permutation computed in analyzepattern() + // m_mat = matrix * m_perm_c.inverse(); + m_mat = matrix; + if (m_perm_c.size()) + { + m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. + //Then, permute only the column pointers + const StorageIndex * outerIndexPtr; + if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr(); + else + { + StorageIndex* outerIndexPtr_t = new StorageIndex[matrix.cols()+1]; + for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i]; + outerIndexPtr = outerIndexPtr_t; + } + for (Index i = 0; i < matrix.cols(); i++) + { + m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i]; + m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i]; + } + if(!matrix.isCompressed()) delete[] outerIndexPtr; + } + else + { //FIXME This should not be needed if the empty permutation is handled transparently + m_perm_c.resize(matrix.cols()); + for(StorageIndex i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i; + } + + Index m = m_mat.rows(); + Index n = m_mat.cols(); + Index nnz = m_mat.nonZeros(); + Index maxpanel = m_perfv.panel_size * m; + // Allocate working storage common to the factor routines + Index lwork = 0; + Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu); + if (info) + { + m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ; + m_factorizationIsOk = false; + return ; + } + + // Set up pointers for integer working arrays + IndexVector segrep(m); segrep.setZero(); + IndexVector parent(m); parent.setZero(); + IndexVector xplore(m); xplore.setZero(); + IndexVector repfnz(maxpanel); + IndexVector panel_lsub(maxpanel); + IndexVector xprune(n); xprune.setZero(); + IndexVector marker(m*internal::LUNoMarker); marker.setZero(); + + repfnz.setConstant(-1); + panel_lsub.setConstant(-1); + + // Set up pointers for scalar working arrays + ScalarVector dense; + dense.setZero(maxpanel); + ScalarVector tempv; + tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) ); + + // Compute the inverse of perm_c + PermutationType iperm_c(m_perm_c.inverse()); + + // Identify initial relaxed snodes + IndexVector relax_end(n); + if ( m_symmetricmode == true ) + Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end); + else + Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end); + + + m_perm_r.resize(m); + m_perm_r.indices().setConstant(-1); + marker.setConstant(-1); + m_detPermR = 1; // Record the determinant of the row permutation + + m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0); + m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0); + + // Work on one 'panel' at a time. A panel is one of the following : + // (a) a relaxed supernode at the bottom of the etree, or + // (b) panel_size contiguous columns, defined by the user + Index jcol; + IndexVector panel_histo(n); + Index pivrow; // Pivotal row number in the original row matrix + Index nseg1; // Number of segments in U-column above panel row jcol + Index nseg; // Number of segments in each U-column + Index irep; + Index i, k, jj; + for (jcol = 0; jcol < n; ) + { + // Adjust panel size so that a panel won't overlap with the next relaxed snode. + Index panel_size = m_perfv.panel_size; // upper bound on panel width + for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++) + { + if (relax_end(k) != emptyIdxLU) + { + panel_size = k - jcol; + break; + } + } + if (k == n) + panel_size = n - jcol; + + // Symbolic outer factorization on a panel of columns + Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu); + + // Numeric sup-panel updates in topological order + Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu); + + // Sparse LU within the panel, and below the panel diagonal + for ( jj = jcol; jj< jcol + panel_size; jj++) + { + k = (jj - jcol) * m; // Column index for w-wide arrays + + nseg = nseg1; // begin after all the panel segments + //Depth-first-search for the current column + VectorBlock panel_lsubk(panel_lsub, k, m); + VectorBlock repfnz_k(repfnz, k, m); + info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu); + if ( info ) + { + m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() "; + m_info = NumericalIssue; + m_factorizationIsOk = false; + return; + } + // Numeric updates to this column + VectorBlock dense_k(dense, k, m); + VectorBlock segrep_k(segrep, nseg1, m-nseg1); + info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu); + if ( info ) + { + m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() "; + m_info = NumericalIssue; + m_factorizationIsOk = false; + return; + } + + // Copy the U-segments to ucol(*) + info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu); + if ( info ) + { + m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() "; + m_info = NumericalIssue; + m_factorizationIsOk = false; + return; + } + + // Form the L-segment + info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu); + if ( info ) + { + m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT "; + std::ostringstream returnInfo; + returnInfo << info; + m_lastError += returnInfo.str(); + m_info = NumericalIssue; + m_factorizationIsOk = false; + return; + } + + // Update the determinant of the row permutation matrix + // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot. + if (pivrow != jj) m_detPermR = -m_detPermR; + + // Prune columns (0:jj-1) using column jj + Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu); + + // Reset repfnz for this column + for (i = 0; i < nseg; i++) + { + irep = segrep(i); + repfnz_k(irep) = emptyIdxLU; + } + } // end SparseLU within the panel + jcol += panel_size; // Move to the next panel + } // end for -- end elimination + + m_detPermR = m_perm_r.determinant(); + m_detPermC = m_perm_c.determinant(); + + // Count the number of nonzeros in factors + Base::countnz(n, m_nnzL, m_nnzU, m_glu); + // Apply permutation to the L subscripts + Base::fixupL(n, m_perm_r.indices(), m_glu); + + // Create supernode matrix L + m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup); + // Create the column major upper sparse matrix U; + new (&m_Ustore) MappedSparseMatrix ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() ); + + m_info = Success; + m_factorizationIsOk = true; +} + +template +struct SparseLUMatrixLReturnType : internal::no_assignment_operator +{ + typedef typename MappedSupernodalType::Scalar Scalar; + explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL) + { } + Index rows() { return m_mapL.rows(); } + Index cols() { return m_mapL.cols(); } + template + void solveInPlace( MatrixBase &X) const + { + m_mapL.solveInPlace(X); + } + const MappedSupernodalType& m_mapL; +}; + +template +struct SparseLUMatrixUReturnType : internal::no_assignment_operator +{ + typedef typename MatrixLType::Scalar Scalar; + SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU) + : m_mapL(mapL),m_mapU(mapU) + { } + Index rows() { return m_mapL.rows(); } + Index cols() { return m_mapL.cols(); } + + template void solveInPlace(MatrixBase &X) const + { + Index nrhs = X.cols(); + Index n = X.rows(); + // Backward solve with U + for (Index k = m_mapL.nsuper(); k >= 0; k--) + { + Index fsupc = m_mapL.supToCol()[k]; + Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension + Index nsupc = m_mapL.supToCol()[k+1] - fsupc; + Index luptr = m_mapL.colIndexPtr()[fsupc]; + + if (nsupc == 1) + { + for (Index j = 0; j < nrhs; j++) + { + X(fsupc, j) /= m_mapL.valuePtr()[luptr]; + } + } + else + { + Map, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); + Map< Matrix, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) ); + U = A.template triangularView().solve(U); + } + + for (Index j = 0; j < nrhs; ++j) + { + for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++) + { + typename MatrixUType::InnerIterator it(m_mapU, jcol); + for ( ; it; ++it) + { + Index irow = it.index(); + X(irow, j) -= X(jcol, j) * it.value(); + } + } + } + } // End For U-solve + } + const MatrixLType& m_mapL; + const MatrixUType& m_mapU; +}; + +} // End namespace Eigen + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLUImpl.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLUImpl.h new file mode 100644 index 000000000..fc0cfc4de --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLUImpl.h @@ -0,0 +1,66 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +#ifndef SPARSELU_IMPL_H +#define SPARSELU_IMPL_H + +namespace Eigen { +namespace internal { + +/** \ingroup SparseLU_Module + * \class SparseLUImpl + * Base class for sparseLU + */ +template +class SparseLUImpl +{ + public: + typedef Matrix ScalarVector; + typedef Matrix IndexVector; + typedef Matrix ScalarMatrix; + typedef Map > MappedMatrixBlock; + typedef typename ScalarVector::RealScalar RealScalar; + typedef Ref > BlockScalarVector; + typedef Ref > BlockIndexVector; + typedef LU_GlobalLU_t GlobalLU_t; + typedef SparseMatrix MatrixType; + + protected: + template + Index expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions); + Index memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu); + template + Index memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions); + void heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end); + void relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end); + Index snode_dfs(const Index jcol, const Index kcol,const MatrixType& mat, IndexVector& xprune, IndexVector& marker, GlobalLU_t& glu); + Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu); + Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu); + template + void dfs_kernel(const StorageIndex jj, IndexVector& perm_r, + Index& nseg, IndexVector& panel_lsub, IndexVector& segrep, + Ref repfnz_col, IndexVector& xprune, Ref marker, IndexVector& parent, + IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits); + void panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu); + + void panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu); + Index column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu); + Index column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu); + Index copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu); + void pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu); + void countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu); + void fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu); + + template + friend struct column_dfs_traits; +}; + +} // end namespace internal +} // namespace Eigen + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_Memory.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_Memory.h new file mode 100644 index 000000000..4dc42e87b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_Memory.h @@ -0,0 +1,226 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* + + * NOTE: This file is the modified version of [s,d,c,z]memory.c files in SuperLU + + * -- SuperLU routine (version 3.1) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * August 1, 2008 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ + +#ifndef EIGEN_SPARSELU_MEMORY +#define EIGEN_SPARSELU_MEMORY + +namespace Eigen { +namespace internal { + +enum { LUNoMarker = 3 }; +enum {emptyIdxLU = -1}; +inline Index LUnumTempV(Index& m, Index& w, Index& t, Index& b) +{ + return (std::max)(m, (t+b)*w); +} + +template< typename Scalar> +inline Index LUTempSpace(Index&m, Index& w) +{ + return (2*w + 4 + LUNoMarker) * m * sizeof(Index) + (w + 1) * m * sizeof(Scalar); +} + + + + +/** + * Expand the existing storage to accomodate more fill-ins + * \param vec Valid pointer to the vector to allocate or expand + * \param[in,out] length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector + * \param[in] nbElts Current number of elements in the factors + * \param keep_prev 1: use length and do not expand the vector; 0: compute new_len and expand + * \param[in,out] num_expansions Number of times the memory has been expanded + */ +template +template +Index SparseLUImpl::expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions) +{ + + float alpha = 1.5; // Ratio of the memory increase + Index new_len; // New size of the allocated memory + + if(num_expansions == 0 || keep_prev) + new_len = length ; // First time allocate requested + else + new_len = (std::max)(length+1,Index(alpha * length)); + + VectorType old_vec; // Temporary vector to hold the previous values + if (nbElts > 0 ) + old_vec = vec.segment(0,nbElts); + + //Allocate or expand the current vector +#ifdef EIGEN_EXCEPTIONS + try +#endif + { + vec.resize(new_len); + } +#ifdef EIGEN_EXCEPTIONS + catch(std::bad_alloc& ) +#else + if(!vec.size()) +#endif + { + if (!num_expansions) + { + // First time to allocate from LUMemInit() + // Let LUMemInit() deals with it. + return -1; + } + if (keep_prev) + { + // In this case, the memory length should not not be reduced + return new_len; + } + else + { + // Reduce the size and increase again + Index tries = 0; // Number of attempts + do + { + alpha = (alpha + 1)/2; + new_len = (std::max)(length+1,Index(alpha * length)); +#ifdef EIGEN_EXCEPTIONS + try +#endif + { + vec.resize(new_len); + } +#ifdef EIGEN_EXCEPTIONS + catch(std::bad_alloc& ) +#else + if (!vec.size()) +#endif + { + tries += 1; + if ( tries > 10) return new_len; + } + } while (!vec.size()); + } + } + //Copy the previous values to the newly allocated space + if (nbElts > 0) + vec.segment(0, nbElts) = old_vec; + + + length = new_len; + if(num_expansions) ++num_expansions; + return 0; +} + +/** + * \brief Allocate various working space for the numerical factorization phase. + * \param m number of rows of the input matrix + * \param n number of columns + * \param annz number of initial nonzeros in the matrix + * \param lwork if lwork=-1, this routine returns an estimated size of the required memory + * \param glu persistent data to facilitate multiple factors : will be deleted later ?? + * \param fillratio estimated ratio of fill in the factors + * \param panel_size Size of a panel + * \return an estimated size of the required memory if lwork = -1; otherwise, return the size of actually allocated memory when allocation failed, and 0 on success + * \note Unlike SuperLU, this routine does not support successive factorization with the same pattern and the same row permutation + */ +template +Index SparseLUImpl::memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu) +{ + Index& num_expansions = glu.num_expansions; //No memory expansions so far + num_expansions = 0; + glu.nzumax = glu.nzlumax = (std::min)(fillratio * (annz+1) / n, m) * n; // estimated number of nonzeros in U + glu.nzlmax = (std::max)(Index(4), fillratio) * (annz+1) / 4; // estimated nnz in L factor + // Return the estimated size to the user if necessary + Index tempSpace; + tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar); + if (lwork == emptyIdxLU) + { + Index estimated_size; + estimated_size = (5 * n + 5) * sizeof(Index) + tempSpace + + (glu.nzlmax + glu.nzumax) * sizeof(Index) + (glu.nzlumax+glu.nzumax) * sizeof(Scalar) + n; + return estimated_size; + } + + // Setup the required space + + // First allocate Integer pointers for L\U factors + glu.xsup.resize(n+1); + glu.supno.resize(n+1); + glu.xlsub.resize(n+1); + glu.xlusup.resize(n+1); + glu.xusub.resize(n+1); + + // Reserve memory for L/U factors + do + { + if( (expand(glu.lusup, glu.nzlumax, 0, 0, num_expansions)<0) + || (expand(glu.ucol, glu.nzumax, 0, 0, num_expansions)<0) + || (expand (glu.lsub, glu.nzlmax, 0, 0, num_expansions)<0) + || (expand (glu.usub, glu.nzumax, 0, 1, num_expansions)<0) ) + { + //Reduce the estimated size and retry + glu.nzlumax /= 2; + glu.nzumax /= 2; + glu.nzlmax /= 2; + if (glu.nzlumax < annz ) return glu.nzlumax; + } + } while (!glu.lusup.size() || !glu.ucol.size() || !glu.lsub.size() || !glu.usub.size()); + + ++num_expansions; + return 0; + +} // end LuMemInit + +/** + * \brief Expand the existing storage + * \param vec vector to expand + * \param[in,out] maxlen On input, previous size of vec (Number of elements to copy ). on output, new size + * \param nbElts current number of elements in the vector. + * \param memtype Type of the element to expand + * \param num_expansions Number of expansions + * \return 0 on success, > 0 size of the memory allocated so far + */ +template +template +Index SparseLUImpl::memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions) +{ + Index failed_size; + if (memtype == USUB) + failed_size = this->expand(vec, maxlen, nbElts, 1, num_expansions); + else + failed_size = this->expand(vec, maxlen, nbElts, 0, num_expansions); + + if (failed_size) + return failed_size; + + return 0 ; +} + +} // end namespace internal + +} // end namespace Eigen +#endif // EIGEN_SPARSELU_MEMORY diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_Structs.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_Structs.h new file mode 100644 index 000000000..cf5ec449b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_Structs.h @@ -0,0 +1,110 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* + * NOTE: This file comes from a partly modified version of files slu_[s,d,c,z]defs.h + * -- SuperLU routine (version 4.1) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * November, 2010 + * + * Global data structures used in LU factorization - + * + * nsuper: #supernodes = nsuper + 1, numbered [0, nsuper]. + * (xsup,supno): supno[i] is the supernode no to which i belongs; + * xsup(s) points to the beginning of the s-th supernode. + * e.g. supno 0 1 2 2 3 3 3 4 4 4 4 4 (n=12) + * xsup 0 1 2 4 7 12 + * Note: dfs will be performed on supernode rep. relative to the new + * row pivoting ordering + * + * (xlsub,lsub): lsub[*] contains the compressed subscript of + * rectangular supernodes; xlsub[j] points to the starting + * location of the j-th column in lsub[*]. Note that xlsub + * is indexed by column. + * Storage: original row subscripts + * + * During the course of sparse LU factorization, we also use + * (xlsub,lsub) for the purpose of symmetric pruning. For each + * supernode {s,s+1,...,t=s+r} with first column s and last + * column t, the subscript set + * lsub[j], j=xlsub[s], .., xlsub[s+1]-1 + * is the structure of column s (i.e. structure of this supernode). + * It is used for the storage of numerical values. + * Furthermore, + * lsub[j], j=xlsub[t], .., xlsub[t+1]-1 + * is the structure of the last column t of this supernode. + * It is for the purpose of symmetric pruning. Therefore, the + * structural subscripts can be rearranged without making physical + * interchanges among the numerical values. + * + * However, if the supernode has only one column, then we + * only keep one set of subscripts. For any subscript interchange + * performed, similar interchange must be done on the numerical + * values. + * + * The last column structures (for pruning) will be removed + * after the numercial LU factorization phase. + * + * (xlusup,lusup): lusup[*] contains the numerical values of the + * rectangular supernodes; xlusup[j] points to the starting + * location of the j-th column in storage vector lusup[*] + * Note: xlusup is indexed by column. + * Each rectangular supernode is stored by column-major + * scheme, consistent with Fortran 2-dim array storage. + * + * (xusub,ucol,usub): ucol[*] stores the numerical values of + * U-columns outside the rectangular supernodes. The row + * subscript of nonzero ucol[k] is stored in usub[k]. + * xusub[i] points to the starting location of column i in ucol. + * Storage: new row subscripts; that is subscripts of PA. + */ + +#ifndef EIGEN_LU_STRUCTS +#define EIGEN_LU_STRUCTS +namespace Eigen { +namespace internal { + +typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType; + +template +struct LU_GlobalLU_t { + typedef typename IndexVector::Scalar StorageIndex; + IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode + IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping) + ScalarVector lusup; // nonzero values of L ordered by columns + IndexVector lsub; // Compressed row indices of L rectangular supernodes. + IndexVector xlusup; // pointers to the beginning of each column in lusup + IndexVector xlsub; // pointers to the beginning of each column in lsub + Index nzlmax; // Current max size of lsub + Index nzlumax; // Current max size of lusup + ScalarVector ucol; // nonzero values of U ordered by columns + IndexVector usub; // row indices of U columns in ucol + IndexVector xusub; // Pointers to the beginning of each column of U in ucol + Index nzumax; // Current max size of ucol + Index n; // Number of columns in the matrix + Index num_expansions; +}; + +// Values to set for performance +struct perfvalues { + Index panel_size; // a panel consists of at most consecutive columns + Index relax; // To control degree of relaxing supernodes. If the number of nodes (columns) + // in a subtree of the elimination tree is less than relax, this subtree is considered + // as one supernode regardless of the row structures of those columns + Index maxsuper; // The maximum size for a supernode in complete LU + Index rowblk; // The minimum row dimension for 2-D blocking to be used; + Index colblk; // The minimum column dimension for 2-D blocking to be used; + Index fillfactor; // The estimated fills factors for L and U, compared with A +}; + +} // end namespace internal + +} // end namespace Eigen +#endif // EIGEN_LU_STRUCTS diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h new file mode 100644 index 000000000..721e1883b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h @@ -0,0 +1,301 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSELU_SUPERNODAL_MATRIX_H +#define EIGEN_SPARSELU_SUPERNODAL_MATRIX_H + +namespace Eigen { +namespace internal { + +/** \ingroup SparseLU_Module + * \brief a class to manipulate the L supernodal factor from the SparseLU factorization + * + * This class contain the data to easily store + * and manipulate the supernodes during the factorization and solution phase of Sparse LU. + * Only the lower triangular matrix has supernodes. + * + * NOTE : This class corresponds to the SCformat structure in SuperLU + * + */ +/* TODO + * InnerIterator as for sparsematrix + * SuperInnerIterator to iterate through all supernodes + * Function for triangular solve + */ +template +class MappedSuperNodalMatrix +{ + public: + typedef _Scalar Scalar; + typedef _StorageIndex StorageIndex; + typedef Matrix IndexVector; + typedef Matrix ScalarVector; + public: + MappedSuperNodalMatrix() + { + + } + MappedSuperNodalMatrix(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, + IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col ) + { + setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col); + } + + ~MappedSuperNodalMatrix() + { + + } + /** + * Set appropriate pointers for the lower triangular supernodal matrix + * These infos are available at the end of the numerical factorization + * FIXME This class will be modified such that it can be use in the course + * of the factorization. + */ + void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, + IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col ) + { + m_row = m; + m_col = n; + m_nzval = nzval.data(); + m_nzval_colptr = nzval_colptr.data(); + m_rowind = rowind.data(); + m_rowind_colptr = rowind_colptr.data(); + m_nsuper = col_to_sup(n); + m_col_to_sup = col_to_sup.data(); + m_sup_to_col = sup_to_col.data(); + } + + /** + * Number of rows + */ + Index rows() { return m_row; } + + /** + * Number of columns + */ + Index cols() { return m_col; } + + /** + * Return the array of nonzero values packed by column + * + * The size is nnz + */ + Scalar* valuePtr() { return m_nzval; } + + const Scalar* valuePtr() const + { + return m_nzval; + } + /** + * Return the pointers to the beginning of each column in \ref valuePtr() + */ + StorageIndex* colIndexPtr() + { + return m_nzval_colptr; + } + + const StorageIndex* colIndexPtr() const + { + return m_nzval_colptr; + } + + /** + * Return the array of compressed row indices of all supernodes + */ + StorageIndex* rowIndex() { return m_rowind; } + + const StorageIndex* rowIndex() const + { + return m_rowind; + } + + /** + * Return the location in \em rowvaluePtr() which starts each column + */ + StorageIndex* rowIndexPtr() { return m_rowind_colptr; } + + const StorageIndex* rowIndexPtr() const + { + return m_rowind_colptr; + } + + /** + * Return the array of column-to-supernode mapping + */ + StorageIndex* colToSup() { return m_col_to_sup; } + + const StorageIndex* colToSup() const + { + return m_col_to_sup; + } + /** + * Return the array of supernode-to-column mapping + */ + StorageIndex* supToCol() { return m_sup_to_col; } + + const StorageIndex* supToCol() const + { + return m_sup_to_col; + } + + /** + * Return the number of supernodes + */ + Index nsuper() const + { + return m_nsuper; + } + + class InnerIterator; + template + void solveInPlace( MatrixBase&X) const; + + + + + protected: + Index m_row; // Number of rows + Index m_col; // Number of columns + Index m_nsuper; // Number of supernodes + Scalar* m_nzval; //array of nonzero values packed by column + StorageIndex* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j + StorageIndex* m_rowind; // Array of compressed row indices of rectangular supernodes + StorageIndex* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j + StorageIndex* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs + StorageIndex* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode + + private : +}; + +/** + * \brief InnerIterator class to iterate over nonzero values of the current column in the supernodal matrix L + * + */ +template +class MappedSuperNodalMatrix::InnerIterator +{ + public: + InnerIterator(const MappedSuperNodalMatrix& mat, Index outer) + : m_matrix(mat), + m_outer(outer), + m_supno(mat.colToSup()[outer]), + m_idval(mat.colIndexPtr()[outer]), + m_startidval(m_idval), + m_endidval(mat.colIndexPtr()[outer+1]), + m_idrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]]), + m_endidrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]+1]) + {} + inline InnerIterator& operator++() + { + m_idval++; + m_idrow++; + return *this; + } + inline Scalar value() const { return m_matrix.valuePtr()[m_idval]; } + + inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_idval]); } + + inline Index index() const { return m_matrix.rowIndex()[m_idrow]; } + inline Index row() const { return index(); } + inline Index col() const { return m_outer; } + + inline Index supIndex() const { return m_supno; } + + inline operator bool() const + { + return ( (m_idval < m_endidval) && (m_idval >= m_startidval) + && (m_idrow < m_endidrow) ); + } + + protected: + const MappedSuperNodalMatrix& m_matrix; // Supernodal lower triangular matrix + const Index m_outer; // Current column + const Index m_supno; // Current SuperNode number + Index m_idval; // Index to browse the values in the current column + const Index m_startidval; // Start of the column value + const Index m_endidval; // End of the column value + Index m_idrow; // Index to browse the row indices + Index m_endidrow; // End index of row indices of the current column +}; + +/** + * \brief Solve with the supernode triangular matrix + * + */ +template +template +void MappedSuperNodalMatrix::solveInPlace( MatrixBase&X) const +{ + /* Explicit type conversion as the Index type of MatrixBase may be wider than Index */ +// eigen_assert(X.rows() <= NumTraits::highest()); +// eigen_assert(X.cols() <= NumTraits::highest()); + Index n = int(X.rows()); + Index nrhs = Index(X.cols()); + const Scalar * Lval = valuePtr(); // Nonzero values + Matrix work(n, nrhs); // working vector + work.setZero(); + for (Index k = 0; k <= nsuper(); k ++) + { + Index fsupc = supToCol()[k]; // First column of the current supernode + Index istart = rowIndexPtr()[fsupc]; // Pointer index to the subscript of the current column + Index nsupr = rowIndexPtr()[fsupc+1] - istart; // Number of rows in the current supernode + Index nsupc = supToCol()[k+1] - fsupc; // Number of columns in the current supernode + Index nrow = nsupr - nsupc; // Number of rows in the non-diagonal part of the supernode + Index irow; //Current index row + + if (nsupc == 1 ) + { + for (Index j = 0; j < nrhs; j++) + { + InnerIterator it(*this, fsupc); + ++it; // Skip the diagonal element + for (; it; ++it) + { + irow = it.row(); + X(irow, j) -= X(fsupc, j) * it.value(); + } + } + } + else + { + // The supernode has more than one column + Index luptr = colIndexPtr()[fsupc]; + Index lda = colIndexPtr()[fsupc+1] - luptr; + + // Triangular solve + Map, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) ); + Map< Matrix, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) ); + U = A.template triangularView().solve(U); + + // Matrix-vector product + new (&A) Map, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); + work.topRows(nrow).noalias() = A * U; + + //Begin Scatter + for (Index j = 0; j < nrhs; j++) + { + Index iptr = istart + nsupc; + for (Index i = 0; i < nrow; i++) + { + irow = rowIndex()[iptr]; + X(irow, j) -= work(i, j); // Scatter operation + work(i, j) = Scalar(0); + iptr++; + } + } + } + } +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPARSELU_MATRIX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_Utils.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_Utils.h new file mode 100644 index 000000000..9e3dab44d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_Utils.h @@ -0,0 +1,80 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_SPARSELU_UTILS_H +#define EIGEN_SPARSELU_UTILS_H + +namespace Eigen { +namespace internal { + +/** + * \brief Count Nonzero elements in the factors + */ +template +void SparseLUImpl::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu) +{ + nnzL = 0; + nnzU = (glu.xusub)(n); + Index nsuper = (glu.supno)(n); + Index jlen; + Index i, j, fsupc; + if (n <= 0 ) return; + // For each supernode + for (i = 0; i <= nsuper; i++) + { + fsupc = glu.xsup(i); + jlen = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); + + for (j = fsupc; j < glu.xsup(i+1); j++) + { + nnzL += jlen; + nnzU += j - fsupc + 1; + jlen--; + } + } +} + +/** + * \brief Fix up the data storage lsub for L-subscripts. + * + * It removes the subscripts sets for structural pruning, + * and applies permutation to the remaining subscripts + * + */ +template +void SparseLUImpl::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu) +{ + Index fsupc, i, j, k, jstart; + + StorageIndex nextl = 0; + Index nsuper = (glu.supno)(n); + + // For each supernode + for (i = 0; i <= nsuper; i++) + { + fsupc = glu.xsup(i); + jstart = glu.xlsub(fsupc); + glu.xlsub(fsupc) = nextl; + for (j = jstart; j < glu.xlsub(fsupc + 1); j++) + { + glu.lsub(nextl) = perm_r(glu.lsub(j)); // Now indexed into P*A + nextl++; + } + for (k = fsupc+1; k < glu.xsup(i+1); k++) + glu.xlsub(k) = nextl; // other columns in supernode i + } + + glu.xlsub(n) = nextl; +} + +} // end namespace internal + +} // end namespace Eigen +#endif // EIGEN_SPARSELU_UTILS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h new file mode 100644 index 000000000..b57f06802 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h @@ -0,0 +1,181 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* + + * NOTE: This file is the modified version of xcolumn_bmod.c file in SuperLU + + * -- SuperLU routine (version 3.0) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * October 15, 2003 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ +#ifndef SPARSELU_COLUMN_BMOD_H +#define SPARSELU_COLUMN_BMOD_H + +namespace Eigen { + +namespace internal { +/** + * \brief Performs numeric block updates (sup-col) in topological order + * + * \param jcol current column to update + * \param nseg Number of segments in the U part + * \param dense Store the full representation of the column + * \param tempv working array + * \param segrep segment representative ... + * \param repfnz ??? First nonzero column in each row ??? ... + * \param fpanelc First column in the current panel + * \param glu Global LU data. + * \return 0 - successful return + * > 0 - number of bytes allocated when run out of space + * + */ +template +Index SparseLUImpl::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, + BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu) +{ + Index jsupno, k, ksub, krep, ksupno; + Index lptr, nrow, isub, irow, nextlu, new_next, ufirst; + Index fsupc, nsupc, nsupr, luptr, kfnz, no_zeros; + /* krep = representative of current k-th supernode + * fsupc = first supernodal column + * nsupc = number of columns in a supernode + * nsupr = number of rows in a supernode + * luptr = location of supernodal LU-block in storage + * kfnz = first nonz in the k-th supernodal segment + * no_zeros = no lf leading zeros in a supernodal U-segment + */ + + jsupno = glu.supno(jcol); + // For each nonzero supernode segment of U[*,j] in topological order + k = nseg - 1; + Index d_fsupc; // distance between the first column of the current panel and the + // first column of the current snode + Index fst_col; // First column within small LU update + Index segsize; + for (ksub = 0; ksub < nseg; ksub++) + { + krep = segrep(k); k--; + ksupno = glu.supno(krep); + if (jsupno != ksupno ) + { + // outside the rectangular supernode + fsupc = glu.xsup(ksupno); + fst_col = (std::max)(fsupc, fpanelc); + + // Distance from the current supernode to the current panel; + // d_fsupc = 0 if fsupc > fpanelc + d_fsupc = fst_col - fsupc; + + luptr = glu.xlusup(fst_col) + d_fsupc; + lptr = glu.xlsub(fsupc) + d_fsupc; + + kfnz = repfnz(krep); + kfnz = (std::max)(kfnz, fpanelc); + + segsize = krep - kfnz + 1; + nsupc = krep - fst_col + 1; + nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); + nrow = nsupr - d_fsupc - nsupc; + Index lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col); + + + // Perform a triangular solver and block update, + // then scatter the result of sup-col update to dense + no_zeros = kfnz - fst_col; + if(segsize==1) + LU_kernel_bmod<1>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); + else + LU_kernel_bmod::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); + } // end if jsupno + } // end for each segment + + // Process the supernodal portion of L\U[*,j] + nextlu = glu.xlusup(jcol); + fsupc = glu.xsup(jsupno); + + // copy the SPA dense into L\U[*,j] + Index mem; + new_next = nextlu + glu.xlsub(fsupc + 1) - glu.xlsub(fsupc); + Index offset = internal::first_multiple(new_next, internal::packet_traits::size) - new_next; + if(offset) + new_next += offset; + while (new_next > glu.nzlumax ) + { + mem = memXpand(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions); + if (mem) return mem; + } + + for (isub = glu.xlsub(fsupc); isub < glu.xlsub(fsupc+1); isub++) + { + irow = glu.lsub(isub); + glu.lusup(nextlu) = dense(irow); + dense(irow) = Scalar(0.0); + ++nextlu; + } + + if(offset) + { + glu.lusup.segment(nextlu,offset).setZero(); + nextlu += offset; + } + glu.xlusup(jcol + 1) = StorageIndex(nextlu); // close L\U(*,jcol); + + /* For more updates within the panel (also within the current supernode), + * should start from the first column of the panel, or the first column + * of the supernode, whichever is bigger. There are two cases: + * 1) fsupc < fpanelc, then fst_col <-- fpanelc + * 2) fsupc >= fpanelc, then fst_col <-- fsupc + */ + fst_col = (std::max)(fsupc, fpanelc); + + if (fst_col < jcol) + { + // Distance between the current supernode and the current panel + // d_fsupc = 0 if fsupc >= fpanelc + d_fsupc = fst_col - fsupc; + + lptr = glu.xlsub(fsupc) + d_fsupc; + luptr = glu.xlusup(fst_col) + d_fsupc; + nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); // leading dimension + nsupc = jcol - fst_col; // excluding jcol + nrow = nsupr - d_fsupc - nsupc; + + // points to the beginning of jcol in snode L\U(jsupno) + ufirst = glu.xlusup(jcol) + d_fsupc; + Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol); + MappedMatrixBlock A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); + VectorBlock u(glu.lusup, ufirst, nsupc); + u = A.template triangularView().solve(u); + + new (&A) MappedMatrixBlock ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); + VectorBlock l(glu.lusup, ufirst+nsupc, nrow); + l.noalias() -= A * u; + + } // End if fst_col + return 0; +} + +} // end namespace internal +} // end namespace Eigen + +#endif // SPARSELU_COLUMN_BMOD_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_column_dfs.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_column_dfs.h new file mode 100644 index 000000000..c98b30e32 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_column_dfs.h @@ -0,0 +1,179 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* + + * NOTE: This file is the modified version of [s,d,c,z]column_dfs.c file in SuperLU + + * -- SuperLU routine (version 2.0) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * November 15, 1997 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ +#ifndef SPARSELU_COLUMN_DFS_H +#define SPARSELU_COLUMN_DFS_H + +template class SparseLUImpl; +namespace Eigen { + +namespace internal { + +template +struct column_dfs_traits : no_assignment_operator +{ + typedef typename ScalarVector::Scalar Scalar; + typedef typename IndexVector::Scalar StorageIndex; + column_dfs_traits(Index jcol, Index& jsuper, typename SparseLUImpl::GlobalLU_t& glu, SparseLUImpl& luImpl) + : m_jcol(jcol), m_jsuper_ref(jsuper), m_glu(glu), m_luImpl(luImpl) + {} + bool update_segrep(Index /*krep*/, Index /*jj*/) + { + return true; + } + void mem_expand(IndexVector& lsub, Index& nextl, Index chmark) + { + if (nextl >= m_glu.nzlmax) + m_luImpl.memXpand(lsub, m_glu.nzlmax, nextl, LSUB, m_glu.num_expansions); + if (chmark != (m_jcol-1)) m_jsuper_ref = emptyIdxLU; + } + enum { ExpandMem = true }; + + Index m_jcol; + Index& m_jsuper_ref; + typename SparseLUImpl::GlobalLU_t& m_glu; + SparseLUImpl& m_luImpl; +}; + + +/** + * \brief Performs a symbolic factorization on column jcol and decide the supernode boundary + * + * A supernode representative is the last column of a supernode. + * The nonzeros in U[*,j] are segments that end at supernodes representatives. + * The routine returns a list of the supernodal representatives + * in topological order of the dfs that generates them. + * The location of the first nonzero in each supernodal segment + * (supernodal entry location) is also returned. + * + * \param m number of rows in the matrix + * \param jcol Current column + * \param perm_r Row permutation + * \param maxsuper Maximum number of column allowed in a supernode + * \param [in,out] nseg Number of segments in current U[*,j] - new segments appended + * \param lsub_col defines the rhs vector to start the dfs + * \param [in,out] segrep Segment representatives - new segments appended + * \param repfnz First nonzero location in each row + * \param xprune + * \param marker marker[i] == jj, if i was visited during dfs of current column jj; + * \param parent + * \param xplore working array + * \param glu global LU data + * \return 0 success + * > 0 number of bytes allocated when run out of space + * + */ +template +Index SparseLUImpl::column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, + BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, + IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu) +{ + + Index jsuper = glu.supno(jcol); + Index nextl = glu.xlsub(jcol); + VectorBlock marker2(marker, 2*m, m); + + + column_dfs_traits traits(jcol, jsuper, glu, *this); + + // For each nonzero in A(*,jcol) do dfs + for (Index k = 0; ((k < m) ? lsub_col[k] != emptyIdxLU : false) ; k++) + { + Index krow = lsub_col(k); + lsub_col(k) = emptyIdxLU; + Index kmark = marker2(krow); + + // krow was visited before, go to the next nonz; + if (kmark == jcol) continue; + + dfs_kernel(StorageIndex(jcol), perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent, + xplore, glu, nextl, krow, traits); + } // for each nonzero ... + + Index fsupc; + StorageIndex nsuper = glu.supno(jcol); + StorageIndex jcolp1 = StorageIndex(jcol) + 1; + Index jcolm1 = jcol - 1; + + // check to see if j belongs in the same supernode as j-1 + if ( jcol == 0 ) + { // Do nothing for column 0 + nsuper = glu.supno(0) = 0 ; + } + else + { + fsupc = glu.xsup(nsuper); + StorageIndex jptr = glu.xlsub(jcol); // Not yet compressed + StorageIndex jm1ptr = glu.xlsub(jcolm1); + + // Use supernodes of type T2 : see SuperLU paper + if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU; + + // Make sure the number of columns in a supernode doesn't + // exceed threshold + if ( (jcol - fsupc) >= maxsuper) jsuper = emptyIdxLU; + + /* If jcol starts a new supernode, reclaim storage space in + * glu.lsub from previous supernode. Note we only store + * the subscript set of the first and last columns of + * a supernode. (first for num values, last for pruning) + */ + if (jsuper == emptyIdxLU) + { // starts a new supernode + if ( (fsupc < jcolm1-1) ) + { // >= 3 columns in nsuper + StorageIndex ito = glu.xlsub(fsupc+1); + glu.xlsub(jcolm1) = ito; + StorageIndex istop = ito + jptr - jm1ptr; + xprune(jcolm1) = istop; // intialize xprune(jcol-1) + glu.xlsub(jcol) = istop; + + for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito) + glu.lsub(ito) = glu.lsub(ifrom); + nextl = ito; // = istop + length(jcol) + } + nsuper++; + glu.supno(jcol) = nsuper; + } // if a new supernode + } // end else: jcol > 0 + + // Tidy up the pointers before exit + glu.xsup(nsuper+1) = jcolp1; + glu.supno(jcolp1) = nsuper; + xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning + glu.xlsub(jcolp1) = StorageIndex(nextl); + + return 0; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h new file mode 100644 index 000000000..c32d8d8b1 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h @@ -0,0 +1,107 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +/* + + * NOTE: This file is the modified version of [s,d,c,z]copy_to_ucol.c file in SuperLU + + * -- SuperLU routine (version 2.0) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * November 15, 1997 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ +#ifndef SPARSELU_COPY_TO_UCOL_H +#define SPARSELU_COPY_TO_UCOL_H + +namespace Eigen { +namespace internal { + +/** + * \brief Performs numeric block updates (sup-col) in topological order + * + * \param jcol current column to update + * \param nseg Number of segments in the U part + * \param segrep segment representative ... + * \param repfnz First nonzero column in each row ... + * \param perm_r Row permutation + * \param dense Store the full representation of the column + * \param glu Global LU data. + * \return 0 - successful return + * > 0 - number of bytes allocated when run out of space + * + */ +template +Index SparseLUImpl::copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, + BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu) +{ + Index ksub, krep, ksupno; + + Index jsupno = glu.supno(jcol); + + // For each nonzero supernode segment of U[*,j] in topological order + Index k = nseg - 1, i; + StorageIndex nextu = glu.xusub(jcol); + Index kfnz, isub, segsize; + Index new_next,irow; + Index fsupc, mem; + for (ksub = 0; ksub < nseg; ksub++) + { + krep = segrep(k); k--; + ksupno = glu.supno(krep); + if (jsupno != ksupno ) // should go into ucol(); + { + kfnz = repfnz(krep); + if (kfnz != emptyIdxLU) + { // Nonzero U-segment + fsupc = glu.xsup(ksupno); + isub = glu.xlsub(fsupc) + kfnz - fsupc; + segsize = krep - kfnz + 1; + new_next = nextu + segsize; + while (new_next > glu.nzumax) + { + mem = memXpand(glu.ucol, glu.nzumax, nextu, UCOL, glu.num_expansions); + if (mem) return mem; + mem = memXpand(glu.usub, glu.nzumax, nextu, USUB, glu.num_expansions); + if (mem) return mem; + + } + + for (i = 0; i < segsize; i++) + { + irow = glu.lsub(isub); + glu.usub(nextu) = perm_r(irow); // Unlike the L part, the U part is stored in its final order + glu.ucol(nextu) = dense(irow); + dense(irow) = Scalar(0.0); + nextu++; + isub++; + } + + } // end nonzero U-segment + + } // end if jsupno + + } // end for each segment + glu.xusub(jcol + 1) = nextu; // close U(*,jcol) + return 0; +} + +} // namespace internal +} // end namespace Eigen + +#endif // SPARSELU_COPY_TO_UCOL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_gemm_kernel.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_gemm_kernel.h new file mode 100644 index 000000000..95ba7413f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_gemm_kernel.h @@ -0,0 +1,280 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSELU_GEMM_KERNEL_H +#define EIGEN_SPARSELU_GEMM_KERNEL_H + +namespace Eigen { + +namespace internal { + + +/** \internal + * A general matrix-matrix product kernel optimized for the SparseLU factorization. + * - A, B, and C must be column major + * - lda and ldc must be multiples of the respective packet size + * - C must have the same alignment as A + */ +template +EIGEN_DONT_INLINE +void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc) +{ + using namespace Eigen::internal; + + typedef typename packet_traits::type Packet; + enum { + NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, + PacketSize = packet_traits::size, + PM = 8, // peeling in M + RN = 2, // register blocking + RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking + BM = 4096/sizeof(Scalar), // number of rows of A-C per chunk + SM = PM*PacketSize // step along M + }; + Index d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking + Index n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once + Index i0 = internal::first_default_aligned(A,m); + + eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_default_aligned(C,m))); + + // handle the non aligned rows of A and C without any optimization: + for(Index i=0; i(BM, m-ib); // actual number of rows + Index actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling + Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization + + // Let's process two columns of B-C at once + for(Index j=0; j(Bc0[0]); } + { b10 = pset1(Bc0[1]); } + if(RK==4) { b20 = pset1(Bc0[2]); } + if(RK==4) { b30 = pset1(Bc0[3]); } + { b01 = pset1(Bc1[0]); } + { b11 = pset1(Bc1[1]); } + if(RK==4) { b21 = pset1(Bc1[2]); } + if(RK==4) { b31 = pset1(Bc1[3]); } + + Packet a0, a1, a2, a3, c0, c1, t0, t1; + + const Scalar* A0 = A+ib+(k+0)*lda; + const Scalar* A1 = A+ib+(k+1)*lda; + const Scalar* A2 = A+ib+(k+2)*lda; + const Scalar* A3 = A+ib+(k+3)*lda; + + Scalar* C0 = C+ib+(j+0)*ldc; + Scalar* C1 = C+ib+(j+1)*ldc; + + a0 = pload(A0); + a1 = pload(A1); + if(RK==4) + { + a2 = pload(A2); + a3 = pload(A3); + } + else + { + // workaround "may be used uninitialized in this function" warning + a2 = a3 = a0; + } + +#define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);} +#define WORK(I) \ + c0 = pload(C0+i+(I)*PacketSize); \ + c1 = pload(C1+i+(I)*PacketSize); \ + KMADD(c0, a0, b00, t0) \ + KMADD(c1, a0, b01, t1) \ + a0 = pload(A0+i+(I+1)*PacketSize); \ + KMADD(c0, a1, b10, t0) \ + KMADD(c1, a1, b11, t1) \ + a1 = pload(A1+i+(I+1)*PacketSize); \ + if(RK==4){ KMADD(c0, a2, b20, t0) }\ + if(RK==4){ KMADD(c1, a2, b21, t1) }\ + if(RK==4){ a2 = pload(A2+i+(I+1)*PacketSize); }\ + if(RK==4){ KMADD(c0, a3, b30, t0) }\ + if(RK==4){ KMADD(c1, a3, b31, t1) }\ + if(RK==4){ a3 = pload(A3+i+(I+1)*PacketSize); }\ + pstore(C0+i+(I)*PacketSize, c0); \ + pstore(C1+i+(I)*PacketSize, c1) + + // process rows of A' - C' with aggressive vectorization and peeling + for(Index i=0; i0) + { + const Scalar* Bc0 = B+(n-1)*ldb; + + for(Index k=0; k(Bc0[0]); + b10 = pset1(Bc0[1]); + if(RK==4) b20 = pset1(Bc0[2]); + if(RK==4) b30 = pset1(Bc0[3]); + + Packet a0, a1, a2, a3, c0, t0/*, t1*/; + + const Scalar* A0 = A+ib+(k+0)*lda; + const Scalar* A1 = A+ib+(k+1)*lda; + const Scalar* A2 = A+ib+(k+2)*lda; + const Scalar* A3 = A+ib+(k+3)*lda; + + Scalar* C0 = C+ib+(n_end)*ldc; + + a0 = pload(A0); + a1 = pload(A1); + if(RK==4) + { + a2 = pload(A2); + a3 = pload(A3); + } + else + { + // workaround "may be used uninitialized in this function" warning + a2 = a3 = a0; + } + +#define WORK(I) \ + c0 = pload(C0+i+(I)*PacketSize); \ + KMADD(c0, a0, b00, t0) \ + a0 = pload(A0+i+(I+1)*PacketSize); \ + KMADD(c0, a1, b10, t0) \ + a1 = pload(A1+i+(I+1)*PacketSize); \ + if(RK==4){ KMADD(c0, a2, b20, t0) }\ + if(RK==4){ a2 = pload(A2+i+(I+1)*PacketSize); }\ + if(RK==4){ KMADD(c0, a3, b30, t0) }\ + if(RK==4){ a3 = pload(A3+i+(I+1)*PacketSize); }\ + pstore(C0+i+(I)*PacketSize, c0); + + // agressive vectorization and peeling + for(Index i=0; i0) + { + for(Index j=0; j1 ? Aligned : 0 + }; + typedef Map, Alignment > MapVector; + typedef Map, Alignment > ConstMapVector; + if(rd==1) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b); + + else if(rd==2) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b) + + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b); + + else MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b) + + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b) + + B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b); + } + } + + } // blocking on the rows of A and C +} +#undef KMADD + +} // namespace internal + +} // namespace Eigen + +#endif // EIGEN_SPARSELU_GEMM_KERNEL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h new file mode 100644 index 000000000..6f75d500e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h @@ -0,0 +1,126 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* This file is a modified version of heap_relax_snode.c file in SuperLU + * -- SuperLU routine (version 3.0) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * October 15, 2003 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ + +#ifndef SPARSELU_HEAP_RELAX_SNODE_H +#define SPARSELU_HEAP_RELAX_SNODE_H + +namespace Eigen { +namespace internal { + +/** + * \brief Identify the initial relaxed supernodes + * + * This routine applied to a symmetric elimination tree. + * It assumes that the matrix has been reordered according to the postorder of the etree + * \param n The number of columns + * \param et elimination tree + * \param relax_columns Maximum number of columns allowed in a relaxed snode + * \param descendants Number of descendants of each node in the etree + * \param relax_end last column in a supernode + */ +template +void SparseLUImpl::heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end) +{ + + // The etree may not be postordered, but its heap ordered + IndexVector post; + internal::treePostorder(StorageIndex(n), et, post); // Post order etree + IndexVector inv_post(n+1); + for (StorageIndex i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()??? + + // Renumber etree in postorder + IndexVector iwork(n); + IndexVector et_save(n+1); + for (Index i = 0; i < n; ++i) + { + iwork(post(i)) = post(et(i)); + } + et_save = et; // Save the original etree + et = iwork; + + // compute the number of descendants of each node in the etree + relax_end.setConstant(emptyIdxLU); + Index j, parent; + descendants.setZero(); + for (j = 0; j < n; j++) + { + parent = et(j); + if (parent != n) // not the dummy root + descendants(parent) += descendants(j) + 1; + } + // Identify the relaxed supernodes by postorder traversal of the etree + Index snode_start; // beginning of a snode + StorageIndex k; + Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree + Index nsuper_et = 0; // Number of relaxed snodes in the original etree + StorageIndex l; + for (j = 0; j < n; ) + { + parent = et(j); + snode_start = j; + while ( parent != n && descendants(parent) < relax_columns ) + { + j = parent; + parent = et(j); + } + // Found a supernode in postordered etree, j is the last column + ++nsuper_et_post; + k = StorageIndex(n); + for (Index i = snode_start; i <= j; ++i) + k = (std::min)(k, inv_post(i)); + l = inv_post(j); + if ( (l - k) == (j - snode_start) ) // Same number of columns in the snode + { + // This is also a supernode in the original etree + relax_end(k) = l; // Record last column + ++nsuper_et; + } + else + { + for (Index i = snode_start; i <= j; ++i) + { + l = inv_post(i); + if (descendants(i) == 0) + { + relax_end(l) = l; + ++nsuper_et; + } + } + } + j++; + // Search for a new leaf + while (descendants(j) != 0 && j < n) j++; + } // End postorder traversal of the etree + + // Recover the original etree + et = et_save; +} + +} // end namespace internal + +} // end namespace Eigen +#endif // SPARSELU_HEAP_RELAX_SNODE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h new file mode 100644 index 000000000..8c1b3e8bc --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h @@ -0,0 +1,130 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef SPARSELU_KERNEL_BMOD_H +#define SPARSELU_KERNEL_BMOD_H + +namespace Eigen { +namespace internal { + +template struct LU_kernel_bmod +{ + /** \internal + * \brief Performs numeric block updates from a given supernode to a single column + * + * \param segsize Size of the segment (and blocks ) to use for updates + * \param[in,out] dense Packed values of the original matrix + * \param tempv temporary vector to use for updates + * \param lusup array containing the supernodes + * \param lda Leading dimension in the supernode + * \param nrow Number of rows in the rectangular part of the supernode + * \param lsub compressed row subscripts of supernodes + * \param lptr pointer to the first column of the current supernode in lsub + * \param no_zeros Number of nonzeros elements before the diagonal part of the supernode + */ + template + static EIGEN_DONT_INLINE void run(const Index segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda, + const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros); +}; + +template +template +EIGEN_DONT_INLINE void LU_kernel_bmod::run(const Index segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda, + const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros) +{ + typedef typename ScalarVector::Scalar Scalar; + // First, copy U[*,j] segment from dense(*) to tempv(*) + // The result of triangular solve is in tempv[*]; + // The result of matric-vector update is in dense[*] + Index isub = lptr + no_zeros; + Index i; + Index irow; + for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++) + { + irow = lsub(isub); + tempv(i) = dense(irow); + ++isub; + } + // Dense triangular solve -- start effective triangle + luptr += lda * no_zeros + no_zeros; + // Form Eigen matrix and vector + Map, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) ); + Map > u(tempv.data(), segsize); + + u = A.template triangularView().solve(u); + + // Dense matrix-vector product y <-- B*x + luptr += segsize; + const Index PacketSize = internal::packet_traits::size; + Index ldl = internal::first_multiple(nrow, PacketSize); + Map, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) ); + Index aligned_offset = internal::first_default_aligned(tempv.data()+segsize, PacketSize); + Index aligned_with_B_offset = (PacketSize-internal::first_default_aligned(B.data(), PacketSize))%PacketSize; + Map, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) ); + + l.setZero(); + internal::sparselu_gemm(l.rows(), l.cols(), B.cols(), B.data(), B.outerStride(), u.data(), u.outerStride(), l.data(), l.outerStride()); + + // Scatter tempv[] into SPA dense[] as a temporary storage + isub = lptr + no_zeros; + for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++) + { + irow = lsub(isub++); + dense(irow) = tempv(i); + } + + // Scatter l into SPA dense[] + for (i = 0; i < nrow; i++) + { + irow = lsub(isub++); + dense(irow) -= l(i); + } +} + +template <> struct LU_kernel_bmod<1> +{ + template + static EIGEN_DONT_INLINE void run(const Index /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr, + const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros); +}; + + +template +EIGEN_DONT_INLINE void LU_kernel_bmod<1>::run(const Index /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr, + const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros) +{ + typedef typename ScalarVector::Scalar Scalar; + typedef typename IndexVector::Scalar StorageIndex; + Scalar f = dense(lsub(lptr + no_zeros)); + luptr += lda * no_zeros + no_zeros + 1; + const Scalar* a(lusup.data() + luptr); + const StorageIndex* irow(lsub.data()+lptr + no_zeros + 1); + Index i = 0; + for (; i+1 < nrow; i+=2) + { + Index i0 = *(irow++); + Index i1 = *(irow++); + Scalar a0 = *(a++); + Scalar a1 = *(a++); + Scalar d0 = dense.coeff(i0); + Scalar d1 = dense.coeff(i1); + d0 -= f*a0; + d1 -= f*a1; + dense.coeffRef(i0) = d0; + dense.coeffRef(i1) = d1; + } + if(i +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* + + * NOTE: This file is the modified version of [s,d,c,z]panel_bmod.c file in SuperLU + + * -- SuperLU routine (version 3.0) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * October 15, 2003 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ +#ifndef SPARSELU_PANEL_BMOD_H +#define SPARSELU_PANEL_BMOD_H + +namespace Eigen { +namespace internal { + +/** + * \brief Performs numeric block updates (sup-panel) in topological order. + * + * Before entering this routine, the original nonzeros in the panel + * were already copied i nto the spa[m,w] + * + * \param m number of rows in the matrix + * \param w Panel size + * \param jcol Starting column of the panel + * \param nseg Number of segments in the U part + * \param dense Store the full representation of the panel + * \param tempv working array + * \param segrep segment representative... first row in the segment + * \param repfnz First nonzero rows + * \param glu Global LU data. + * + * + */ +template +void SparseLUImpl::panel_bmod(const Index m, const Index w, const Index jcol, + const Index nseg, ScalarVector& dense, ScalarVector& tempv, + IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu) +{ + + Index ksub,jj,nextl_col; + Index fsupc, nsupc, nsupr, nrow; + Index krep, kfnz; + Index lptr; // points to the row subscripts of a supernode + Index luptr; // ... + Index segsize,no_zeros ; + // For each nonz supernode segment of U[*,j] in topological order + Index k = nseg - 1; + const Index PacketSize = internal::packet_traits::size; + + for (ksub = 0; ksub < nseg; ksub++) + { // For each updating supernode + /* krep = representative of current k-th supernode + * fsupc = first supernodal column + * nsupc = number of columns in a supernode + * nsupr = number of rows in a supernode + */ + krep = segrep(k); k--; + fsupc = glu.xsup(glu.supno(krep)); + nsupc = krep - fsupc + 1; + nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); + nrow = nsupr - nsupc; + lptr = glu.xlsub(fsupc); + + // loop over the panel columns to detect the actual number of columns and rows + Index u_rows = 0; + Index u_cols = 0; + for (jj = jcol; jj < jcol + w; jj++) + { + nextl_col = (jj-jcol) * m; + VectorBlock repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row + + kfnz = repfnz_col(krep); + if ( kfnz == emptyIdxLU ) + continue; // skip any zero segment + + segsize = krep - kfnz + 1; + u_cols++; + u_rows = (std::max)(segsize,u_rows); + } + + if(nsupc >= 2) + { + Index ldu = internal::first_multiple(u_rows, PacketSize); + Map > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu)); + + // gather U + Index u_col = 0; + for (jj = jcol; jj < jcol + w; jj++) + { + nextl_col = (jj-jcol) * m; + VectorBlock repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row + VectorBlock dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here + + kfnz = repfnz_col(krep); + if ( kfnz == emptyIdxLU ) + continue; // skip any zero segment + + segsize = krep - kfnz + 1; + luptr = glu.xlusup(fsupc); + no_zeros = kfnz - fsupc; + + Index isub = lptr + no_zeros; + Index off = u_rows-segsize; + for (Index i = 0; i < off; i++) U(i,u_col) = 0; + for (Index i = 0; i < segsize; i++) + { + Index irow = glu.lsub(isub); + U(i+off,u_col) = dense_col(irow); + ++isub; + } + u_col++; + } + // solve U = A^-1 U + luptr = glu.xlusup(fsupc); + Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); + no_zeros = (krep - u_rows + 1) - fsupc; + luptr += lda * no_zeros + no_zeros; + MappedMatrixBlock A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) ); + U = A.template triangularView().solve(U); + + // update + luptr += u_rows; + MappedMatrixBlock B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) ); + eigen_assert(tempv.size()>w*ldu + nrow*w + 1); + + Index ldl = internal::first_multiple(nrow, PacketSize); + Index offset = (PacketSize-internal::first_default_aligned(B.data(), PacketSize)) % PacketSize; + MappedMatrixBlock L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl)); + + L.setZero(); + internal::sparselu_gemm(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride()); + + // scatter U and L + u_col = 0; + for (jj = jcol; jj < jcol + w; jj++) + { + nextl_col = (jj-jcol) * m; + VectorBlock repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row + VectorBlock dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here + + kfnz = repfnz_col(krep); + if ( kfnz == emptyIdxLU ) + continue; // skip any zero segment + + segsize = krep - kfnz + 1; + no_zeros = kfnz - fsupc; + Index isub = lptr + no_zeros; + + Index off = u_rows-segsize; + for (Index i = 0; i < segsize; i++) + { + Index irow = glu.lsub(isub++); + dense_col(irow) = U.coeff(i+off,u_col); + U.coeffRef(i+off,u_col) = 0; + } + + // Scatter l into SPA dense[] + for (Index i = 0; i < nrow; i++) + { + Index irow = glu.lsub(isub++); + dense_col(irow) -= L.coeff(i,u_col); + L.coeffRef(i,u_col) = 0; + } + u_col++; + } + } + else // level 2 only + { + // Sequence through each column in the panel + for (jj = jcol; jj < jcol + w; jj++) + { + nextl_col = (jj-jcol) * m; + VectorBlock repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row + VectorBlock dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here + + kfnz = repfnz_col(krep); + if ( kfnz == emptyIdxLU ) + continue; // skip any zero segment + + segsize = krep - kfnz + 1; + luptr = glu.xlusup(fsupc); + + Index lda = glu.xlusup(fsupc+1)-glu.xlusup(fsupc);// nsupr + + // Perform a trianglar solve and block update, + // then scatter the result of sup-col update to dense[] + no_zeros = kfnz - fsupc; + if(segsize==1) LU_kernel_bmod<1>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); + else if(segsize==2) LU_kernel_bmod<2>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); + else if(segsize==3) LU_kernel_bmod<3>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); + else LU_kernel_bmod::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); + } // End for each column in the panel + } + + } // End for each updating supernode +} // end panel bmod + +} // end namespace internal + +} // end namespace Eigen + +#endif // SPARSELU_PANEL_BMOD_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_panel_dfs.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_panel_dfs.h new file mode 100644 index 000000000..155df7336 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_panel_dfs.h @@ -0,0 +1,258 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* + + * NOTE: This file is the modified version of [s,d,c,z]panel_dfs.c file in SuperLU + + * -- SuperLU routine (version 2.0) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * November 15, 1997 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ +#ifndef SPARSELU_PANEL_DFS_H +#define SPARSELU_PANEL_DFS_H + +namespace Eigen { + +namespace internal { + +template +struct panel_dfs_traits +{ + typedef typename IndexVector::Scalar StorageIndex; + panel_dfs_traits(Index jcol, StorageIndex* marker) + : m_jcol(jcol), m_marker(marker) + {} + bool update_segrep(Index krep, StorageIndex jj) + { + if(m_marker[krep] +template +void SparseLUImpl::dfs_kernel(const StorageIndex jj, IndexVector& perm_r, + Index& nseg, IndexVector& panel_lsub, IndexVector& segrep, + Ref repfnz_col, IndexVector& xprune, Ref marker, IndexVector& parent, + IndexVector& xplore, GlobalLU_t& glu, + Index& nextl_col, Index krow, Traits& traits + ) +{ + + StorageIndex kmark = marker(krow); + + // For each unmarked krow of jj + marker(krow) = jj; + StorageIndex kperm = perm_r(krow); + if (kperm == emptyIdxLU ) { + // krow is in L : place it in structure of L(*, jj) + panel_lsub(nextl_col++) = StorageIndex(krow); // krow is indexed into A + + traits.mem_expand(panel_lsub, nextl_col, kmark); + } + else + { + // krow is in U : if its supernode-representative krep + // has been explored, update repfnz(*) + // krep = supernode representative of the current row + StorageIndex krep = glu.xsup(glu.supno(kperm)+1) - 1; + // First nonzero element in the current column: + StorageIndex myfnz = repfnz_col(krep); + + if (myfnz != emptyIdxLU ) + { + // Representative visited before + if (myfnz > kperm ) repfnz_col(krep) = kperm; + + } + else + { + // Otherwise, perform dfs starting at krep + StorageIndex oldrep = emptyIdxLU; + parent(krep) = oldrep; + repfnz_col(krep) = kperm; + StorageIndex xdfs = glu.xlsub(krep); + Index maxdfs = xprune(krep); + + StorageIndex kpar; + do + { + // For each unmarked kchild of krep + while (xdfs < maxdfs) + { + StorageIndex kchild = glu.lsub(xdfs); + xdfs++; + StorageIndex chmark = marker(kchild); + + if (chmark != jj ) + { + marker(kchild) = jj; + StorageIndex chperm = perm_r(kchild); + + if (chperm == emptyIdxLU) + { + // case kchild is in L: place it in L(*, j) + panel_lsub(nextl_col++) = kchild; + traits.mem_expand(panel_lsub, nextl_col, chmark); + } + else + { + // case kchild is in U : + // chrep = its supernode-rep. If its rep has been explored, + // update its repfnz(*) + StorageIndex chrep = glu.xsup(glu.supno(chperm)+1) - 1; + myfnz = repfnz_col(chrep); + + if (myfnz != emptyIdxLU) + { // Visited before + if (myfnz > chperm) + repfnz_col(chrep) = chperm; + } + else + { // Cont. dfs at snode-rep of kchild + xplore(krep) = xdfs; + oldrep = krep; + krep = chrep; // Go deeper down G(L) + parent(krep) = oldrep; + repfnz_col(krep) = chperm; + xdfs = glu.xlsub(krep); + maxdfs = xprune(krep); + + } // end if myfnz != -1 + } // end if chperm == -1 + + } // end if chmark !=jj + } // end while xdfs < maxdfs + + // krow has no more unexplored nbrs : + // Place snode-rep krep in postorder DFS, if this + // segment is seen for the first time. (Note that + // "repfnz(krep)" may change later.) + // Baktrack dfs to its parent + if(traits.update_segrep(krep,jj)) + //if (marker1(krep) < jcol ) + { + segrep(nseg) = krep; + ++nseg; + //marker1(krep) = jj; + } + + kpar = parent(krep); // Pop recursion, mimic recursion + if (kpar == emptyIdxLU) + break; // dfs done + krep = kpar; + xdfs = xplore(krep); + maxdfs = xprune(krep); + + } while (kpar != emptyIdxLU); // Do until empty stack + + } // end if (myfnz = -1) + + } // end if (kperm == -1) +} + +/** + * \brief Performs a symbolic factorization on a panel of columns [jcol, jcol+w) + * + * A supernode representative is the last column of a supernode. + * The nonzeros in U[*,j] are segments that end at supernodes representatives + * + * The routine returns a list of the supernodal representatives + * in topological order of the dfs that generates them. This list is + * a superset of the topological order of each individual column within + * the panel. + * The location of the first nonzero in each supernodal segment + * (supernodal entry location) is also returned. Each column has + * a separate list for this purpose. + * + * Two markers arrays are used for dfs : + * marker[i] == jj, if i was visited during dfs of current column jj; + * marker1[i] >= jcol, if i was visited by earlier columns in this panel; + * + * \param[in] m number of rows in the matrix + * \param[in] w Panel size + * \param[in] jcol Starting column of the panel + * \param[in] A Input matrix in column-major storage + * \param[in] perm_r Row permutation + * \param[out] nseg Number of U segments + * \param[out] dense Accumulate the column vectors of the panel + * \param[out] panel_lsub Subscripts of the row in the panel + * \param[out] segrep Segment representative i.e first nonzero row of each segment + * \param[out] repfnz First nonzero location in each row + * \param[out] xprune The pruned elimination tree + * \param[out] marker work vector + * \param parent The elimination tree + * \param xplore work vector + * \param glu The global data structure + * + */ + +template +void SparseLUImpl::panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu) +{ + Index nextl_col; // Next available position in panel_lsub[*,jj] + + // Initialize pointers + VectorBlock marker1(marker, m, m); + nseg = 0; + + panel_dfs_traits traits(jcol, marker1.data()); + + // For each column in the panel + for (StorageIndex jj = StorageIndex(jcol); jj < jcol + w; jj++) + { + nextl_col = (jj - jcol) * m; + + VectorBlock repfnz_col(repfnz, nextl_col, m); // First nonzero location in each row + VectorBlock dense_col(dense,nextl_col, m); // Accumulate a column vector here + + + // For each nnz in A[*, jj] do depth first search + for (typename MatrixType::InnerIterator it(A, jj); it; ++it) + { + Index krow = it.row(); + dense_col(krow) = it.value(); + + StorageIndex kmark = marker(krow); + if (kmark == jj) + continue; // krow visited before, go to the next nonzero + + dfs_kernel(jj, perm_r, nseg, panel_lsub, segrep, repfnz_col, xprune, marker, parent, + xplore, glu, nextl_col, krow, traits); + }// end for nonzeros in column jj + + } // end for column jj +} + +} // end namespace internal +} // end namespace Eigen + +#endif // SPARSELU_PANEL_DFS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h new file mode 100644 index 000000000..a86dac93f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h @@ -0,0 +1,137 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* + + * NOTE: This file is the modified version of xpivotL.c file in SuperLU + + * -- SuperLU routine (version 3.0) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * October 15, 2003 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ +#ifndef SPARSELU_PIVOTL_H +#define SPARSELU_PIVOTL_H + +namespace Eigen { +namespace internal { + +/** + * \brief Performs the numerical pivotin on the current column of L, and the CDIV operation. + * + * Pivot policy : + * (1) Compute thresh = u * max_(i>=j) abs(A_ij); + * (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN + * pivot row = k; + * ELSE IF abs(A_jj) >= thresh THEN + * pivot row = j; + * ELSE + * pivot row = m; + * + * Note: If you absolutely want to use a given pivot order, then set u=0.0. + * + * \param jcol The current column of L + * \param diagpivotthresh diagonal pivoting threshold + * \param[in,out] perm_r Row permutation (threshold pivoting) + * \param[in] iperm_c column permutation - used to finf diagonal of Pc*A*Pc' + * \param[out] pivrow The pivot row + * \param glu Global LU data + * \return 0 if success, i > 0 if U(i,i) is exactly zero + * + */ +template +Index SparseLUImpl::pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu) +{ + + Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol + Index nsupc = jcol - fsupc; // Number of columns in the supernode portion, excluding jcol; nsupc >=0 + Index lptr = glu.xlsub(fsupc); // pointer to the starting location of the row subscripts for this supernode portion + Index nsupr = glu.xlsub(fsupc+1) - lptr; // Number of rows in the supernode + Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); // leading dimension + Scalar* lu_sup_ptr = &(glu.lusup.data()[glu.xlusup(fsupc)]); // Start of the current supernode + Scalar* lu_col_ptr = &(glu.lusup.data()[glu.xlusup(jcol)]); // Start of jcol in the supernode + StorageIndex* lsub_ptr = &(glu.lsub.data()[lptr]); // Start of row indices of the supernode + + // Determine the largest abs numerical value for partial pivoting + Index diagind = iperm_c(jcol); // diagonal index + RealScalar pivmax(-1.0); + Index pivptr = nsupc; + Index diag = emptyIdxLU; + RealScalar rtemp; + Index isub, icol, itemp, k; + for (isub = nsupc; isub < nsupr; ++isub) { + using std::abs; + rtemp = abs(lu_col_ptr[isub]); + if (rtemp > pivmax) { + pivmax = rtemp; + pivptr = isub; + } + if (lsub_ptr[isub] == diagind) diag = isub; + } + + // Test for singularity + if ( pivmax <= RealScalar(0.0) ) { + // if pivmax == -1, the column is structurally empty, otherwise it is only numerically zero + pivrow = pivmax < RealScalar(0.0) ? diagind : lsub_ptr[pivptr]; + perm_r(pivrow) = StorageIndex(jcol); + return (jcol+1); + } + + RealScalar thresh = diagpivotthresh * pivmax; + + // Choose appropriate pivotal element + + { + // Test if the diagonal element can be used as a pivot (given the threshold value) + if (diag >= 0 ) + { + // Diagonal element exists + using std::abs; + rtemp = abs(lu_col_ptr[diag]); + if (rtemp != RealScalar(0.0) && rtemp >= thresh) pivptr = diag; + } + pivrow = lsub_ptr[pivptr]; + } + + // Record pivot row + perm_r(pivrow) = StorageIndex(jcol); + // Interchange row subscripts + if (pivptr != nsupc ) + { + std::swap( lsub_ptr[pivptr], lsub_ptr[nsupc] ); + // Interchange numerical values as well, for the two rows in the whole snode + // such that L is indexed the same way as A + for (icol = 0; icol <= nsupc; icol++) + { + itemp = pivptr + icol * lda; + std::swap(lu_sup_ptr[itemp], lu_sup_ptr[nsupc + icol * lda]); + } + } + // cdiv operations + Scalar temp = Scalar(1.0) / lu_col_ptr[nsupc]; + for (k = nsupc+1; k < nsupr; k++) + lu_col_ptr[k] *= temp; + return 0; +} + +} // end namespace internal +} // end namespace Eigen + +#endif // SPARSELU_PIVOTL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_pruneL.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_pruneL.h new file mode 100644 index 000000000..ad32fed5e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_pruneL.h @@ -0,0 +1,136 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* + + * NOTE: This file is the modified version of [s,d,c,z]pruneL.c file in SuperLU + + * -- SuperLU routine (version 2.0) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * November 15, 1997 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ +#ifndef SPARSELU_PRUNEL_H +#define SPARSELU_PRUNEL_H + +namespace Eigen { +namespace internal { + +/** + * \brief Prunes the L-structure. + * + * It prunes the L-structure of supernodes whose L-structure contains the current pivot row "pivrow" + * + * + * \param jcol The current column of L + * \param[in] perm_r Row permutation + * \param[out] pivrow The pivot row + * \param nseg Number of segments + * \param segrep + * \param repfnz + * \param[out] xprune + * \param glu Global LU data + * + */ +template +void SparseLUImpl::pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, + const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu) +{ + // For each supernode-rep irep in U(*,j] + Index jsupno = glu.supno(jcol); + Index i,irep,irep1; + bool movnum, do_prune = false; + Index kmin = 0, kmax = 0, minloc, maxloc,krow; + for (i = 0; i < nseg; i++) + { + irep = segrep(i); + irep1 = irep + 1; + do_prune = false; + + // Don't prune with a zero U-segment + if (repfnz(irep) == emptyIdxLU) continue; + + // If a snode overlaps with the next panel, then the U-segment + // is fragmented into two parts -- irep and irep1. We should let + // pruning occur at the rep-column in irep1s snode. + if (glu.supno(irep) == glu.supno(irep1) ) continue; // don't prune + + // If it has not been pruned & it has a nonz in row L(pivrow,i) + if (glu.supno(irep) != jsupno ) + { + if ( xprune (irep) >= glu.xlsub(irep1) ) + { + kmin = glu.xlsub(irep); + kmax = glu.xlsub(irep1) - 1; + for (krow = kmin; krow <= kmax; krow++) + { + if (glu.lsub(krow) == pivrow) + { + do_prune = true; + break; + } + } + } + + if (do_prune) + { + // do a quicksort-type partition + // movnum=true means that the num values have to be exchanged + movnum = false; + if (irep == glu.xsup(glu.supno(irep)) ) // Snode of size 1 + movnum = true; + + while (kmin <= kmax) + { + if (perm_r(glu.lsub(kmax)) == emptyIdxLU) + kmax--; + else if ( perm_r(glu.lsub(kmin)) != emptyIdxLU) + kmin++; + else + { + // kmin below pivrow (not yet pivoted), and kmax + // above pivrow: interchange the two suscripts + std::swap(glu.lsub(kmin), glu.lsub(kmax)); + + // If the supernode has only one column, then we + // only keep one set of subscripts. For any subscript + // intercnahge performed, similar interchange must be + // done on the numerical values. + if (movnum) + { + minloc = glu.xlusup(irep) + ( kmin - glu.xlsub(irep) ); + maxloc = glu.xlusup(irep) + ( kmax - glu.xlsub(irep) ); + std::swap(glu.lusup(minloc), glu.lusup(maxloc)); + } + kmin++; + kmax--; + } + } // end while + + xprune(irep) = StorageIndex(kmin); //Pruning + } // end if do_prune + } // end pruning + } // End for each U-segment +} + +} // end namespace internal +} // end namespace Eigen + +#endif // SPARSELU_PRUNEL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_relax_snode.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_relax_snode.h new file mode 100644 index 000000000..c408d01b4 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseLU/SparseLU_relax_snode.h @@ -0,0 +1,83 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* This file is a modified version of heap_relax_snode.c file in SuperLU + * -- SuperLU routine (version 3.0) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * October 15, 2003 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ + +#ifndef SPARSELU_RELAX_SNODE_H +#define SPARSELU_RELAX_SNODE_H + +namespace Eigen { + +namespace internal { + +/** + * \brief Identify the initial relaxed supernodes + * + * This routine is applied to a column elimination tree. + * It assumes that the matrix has been reordered according to the postorder of the etree + * \param n the number of columns + * \param et elimination tree + * \param relax_columns Maximum number of columns allowed in a relaxed snode + * \param descendants Number of descendants of each node in the etree + * \param relax_end last column in a supernode + */ +template +void SparseLUImpl::relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end) +{ + + // compute the number of descendants of each node in the etree + Index parent; + relax_end.setConstant(emptyIdxLU); + descendants.setZero(); + for (Index j = 0; j < n; j++) + { + parent = et(j); + if (parent != n) // not the dummy root + descendants(parent) += descendants(j) + 1; + } + // Identify the relaxed supernodes by postorder traversal of the etree + Index snode_start; // beginning of a snode + for (Index j = 0; j < n; ) + { + parent = et(j); + snode_start = j; + while ( parent != n && descendants(parent) < relax_columns ) + { + j = parent; + parent = et(j); + } + // Found a supernode in postordered etree, j is the last column + relax_end(snode_start) = StorageIndex(j); // Record last column + j++; + // Search for a new leaf + while (descendants(j) != 0 && j < n) j++; + } // End postorder traversal of the etree + +} + +} // end namespace internal + +} // end namespace Eigen +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseQR/SparseQR.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseQR/SparseQR.h new file mode 100644 index 000000000..7409fcae9 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SparseQR/SparseQR.h @@ -0,0 +1,745 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012-2013 Desire Nuentsa +// Copyright (C) 2012-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_QR_H +#define EIGEN_SPARSE_QR_H + +namespace Eigen { + +template class SparseQR; +template struct SparseQRMatrixQReturnType; +template struct SparseQRMatrixQTransposeReturnType; +template struct SparseQR_QProduct; +namespace internal { + template struct traits > + { + typedef typename SparseQRType::MatrixType ReturnType; + typedef typename ReturnType::StorageIndex StorageIndex; + typedef typename ReturnType::StorageKind StorageKind; + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic + }; + }; + template struct traits > + { + typedef typename SparseQRType::MatrixType ReturnType; + }; + template struct traits > + { + typedef typename Derived::PlainObject ReturnType; + }; +} // End namespace internal + +/** + * \ingroup SparseQR_Module + * \class SparseQR + * \brief Sparse left-looking rank-revealing QR factorization + * + * This class implements a left-looking rank-revealing QR decomposition + * of sparse matrices. When a column has a norm less than a given tolerance + * it is implicitly permuted to the end. The QR factorization thus obtained is + * given by A*P = Q*R where R is upper triangular or trapezoidal. + * + * P is the column permutation which is the product of the fill-reducing and the + * rank-revealing permutations. Use colsPermutation() to get it. + * + * Q is the orthogonal matrix represented as products of Householder reflectors. + * Use matrixQ() to get an expression and matrixQ().adjoint() to get the adjoint. + * You can then apply it to a vector. + * + * R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient. + * matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank. + * + * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<> + * \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module + * OrderingMethods \endlink module for the list of built-in and external ordering methods. + * + * \implsparsesolverconcept + * + * \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()). + * \warning For complex matrices matrixQ().transpose() will actually return the adjoint matrix. + * + */ +template +class SparseQR : public SparseSolverBase > +{ + protected: + typedef SparseSolverBase > Base; + using Base::m_isInitialized; + public: + using Base::_solve_impl; + typedef _MatrixType MatrixType; + typedef _OrderingType OrderingType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix QRMatrixType; + typedef Matrix IndexVector; + typedef Matrix ScalarVector; + typedef PermutationMatrix PermutationType; + + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + public: + SparseQR () : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) + { } + + /** Construct a QR factorization of the matrix \a mat. + * + * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). + * + * \sa compute() + */ + explicit SparseQR(const MatrixType& mat) : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) + { + compute(mat); + } + + /** Computes the QR factorization of the sparse matrix \a mat. + * + * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). + * + * \sa analyzePattern(), factorize() + */ + void compute(const MatrixType& mat) + { + analyzePattern(mat); + factorize(mat); + } + void analyzePattern(const MatrixType& mat); + void factorize(const MatrixType& mat); + + /** \returns the number of rows of the represented matrix. + */ + inline Index rows() const { return m_pmat.rows(); } + + /** \returns the number of columns of the represented matrix. + */ + inline Index cols() const { return m_pmat.cols();} + + /** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization. + * \warning The entries of the returned matrix are not sorted. This means that using it in algorithms + * expecting sorted entries will fail. This include random coefficient accesses (SpaseMatrix::coeff()), + * and coefficient-wise operations. Matrix products and triangular solves are fine though. + * + * To sort the entries, you can assign it to a row-major matrix, and if a column-major matrix + * is required, you can copy it again: + * \code + * SparseMatrix R = qr.matrixR(); // column-major, not sorted! + * SparseMatrix Rr = qr.matrixR(); // row-major, sorted + * SparseMatrix Rc = Rr; // column-major, sorted + * \endcode + */ + const QRMatrixType& matrixR() const { return m_R; } + + /** \returns the number of non linearly dependent columns as determined by the pivoting threshold. + * + * \sa setPivotThreshold() + */ + Index rank() const + { + eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); + return m_nonzeropivots; + } + + /** \returns an expression of the matrix Q as products of sparse Householder reflectors. + * The common usage of this function is to apply it to a dense matrix or vector + * \code + * VectorXd B1, B2; + * // Initialize B1 + * B2 = matrixQ() * B1; + * \endcode + * + * To get a plain SparseMatrix representation of Q: + * \code + * SparseMatrix Q; + * Q = SparseQR >(A).matrixQ(); + * \endcode + * Internally, this call simply performs a sparse product between the matrix Q + * and a sparse identity matrix. However, due to the fact that the sparse + * reflectors are stored unsorted, two transpositions are needed to sort + * them before performing the product. + */ + SparseQRMatrixQReturnType matrixQ() const + { return SparseQRMatrixQReturnType(*this); } + + /** \returns a const reference to the column permutation P that was applied to A such that A*P = Q*R + * It is the combination of the fill-in reducing permutation and numerical column pivoting. + */ + const PermutationType& colsPermutation() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_outputPerm_c; + } + + /** \returns A string describing the type of error. + * This method is provided to ease debugging, not to handle errors. + */ + std::string lastErrorMessage() const { return m_lastError; } + + /** \internal */ + template + bool _solve_impl(const MatrixBase &B, MatrixBase &dest) const + { + eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); + eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); + + Index rank = this->rank(); + + // Compute Q^* * b; + typename Dest::PlainObject y, b; + y = this->matrixQ().adjoint() * B; + b = y; + + // Solve with the triangular matrix R + y.resize((std::max)(cols(),y.rows()),y.cols()); + y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView().solve(b.topRows(rank)); + y.bottomRows(y.rows()-rank).setZero(); + + // Apply the column permutation + if (m_perm_c.size()) dest = colsPermutation() * y.topRows(cols()); + else dest = y.topRows(cols()); + + m_info = Success; + return true; + } + + /** Sets the threshold that is used to determine linearly dependent columns during the factorization. + * + * In practice, if during the factorization the norm of the column that has to be eliminated is below + * this threshold, then the entire column is treated as zero, and it is moved at the end. + */ + void setPivotThreshold(const RealScalar& threshold) + { + m_useDefaultThreshold = false; + m_threshold = threshold; + } + + /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const Solve solve(const MatrixBase& B) const + { + eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); + eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); + return Solve(*this, B.derived()); + } + template + inline const Solve solve(const SparseMatrixBase& B) const + { + eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); + eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); + return Solve(*this, B.derived()); + } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the QR factorization reports a numerical problem + * \c InvalidInput if the input matrix is invalid + * + * \sa iparm() + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + + /** \internal */ + inline void _sort_matrix_Q() + { + if(this->m_isQSorted) return; + // The matrix Q is sorted during the transposition + SparseMatrix mQrm(this->m_Q); + this->m_Q = mQrm; + this->m_isQSorted = true; + } + + + protected: + bool m_analysisIsok; + bool m_factorizationIsok; + mutable ComputationInfo m_info; + std::string m_lastError; + QRMatrixType m_pmat; // Temporary matrix + QRMatrixType m_R; // The triangular factor matrix + QRMatrixType m_Q; // The orthogonal reflectors + ScalarVector m_hcoeffs; // The Householder coefficients + PermutationType m_perm_c; // Fill-reducing Column permutation + PermutationType m_pivotperm; // The permutation for rank revealing + PermutationType m_outputPerm_c; // The final column permutation + RealScalar m_threshold; // Threshold to determine null Householder reflections + bool m_useDefaultThreshold; // Use default threshold + Index m_nonzeropivots; // Number of non zero pivots found + IndexVector m_etree; // Column elimination tree + IndexVector m_firstRowElt; // First element in each row + bool m_isQSorted; // whether Q is sorted or not + bool m_isEtreeOk; // whether the elimination tree match the initial input matrix + + template friend struct SparseQR_QProduct; + +}; + +/** \brief Preprocessing step of a QR factorization + * + * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). + * + * In this step, the fill-reducing permutation is computed and applied to the columns of A + * and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited. + * + * \note In this step it is assumed that there is no empty row in the matrix \a mat. + */ +template +void SparseQR::analyzePattern(const MatrixType& mat) +{ + eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR"); + // Copy to a column major matrix if the input is rowmajor + typename internal::conditional::type matCpy(mat); + // Compute the column fill reducing ordering + OrderingType ord; + ord(matCpy, m_perm_c); + Index n = mat.cols(); + Index m = mat.rows(); + Index diagSize = (std::min)(m,n); + + if (!m_perm_c.size()) + { + m_perm_c.resize(n); + m_perm_c.indices().setLinSpaced(n, 0,StorageIndex(n-1)); + } + + // Compute the column elimination tree of the permuted matrix + m_outputPerm_c = m_perm_c.inverse(); + internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); + m_isEtreeOk = true; + + m_R.resize(m, n); + m_Q.resize(m, diagSize); + + // Allocate space for nonzero elements : rough estimation + m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree + m_Q.reserve(2*mat.nonZeros()); + m_hcoeffs.resize(diagSize); + m_analysisIsok = true; +} + +/** \brief Performs the numerical QR factorization of the input matrix + * + * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with + * a matrix having the same sparsity pattern than \a mat. + * + * \param mat The sparse column-major matrix + */ +template +void SparseQR::factorize(const MatrixType& mat) +{ + using std::abs; + + eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); + StorageIndex m = StorageIndex(mat.rows()); + StorageIndex n = StorageIndex(mat.cols()); + StorageIndex diagSize = (std::min)(m,n); + IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes + IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q + Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q + ScalarVector tval(m); // The dense vector used to compute the current column + RealScalar pivotThreshold = m_threshold; + + m_R.setZero(); + m_Q.setZero(); + m_pmat = mat; + if(!m_isEtreeOk) + { + m_outputPerm_c = m_perm_c.inverse(); + internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); + m_isEtreeOk = true; + } + + m_pmat.uncompress(); // To have the innerNonZeroPtr allocated + + // Apply the fill-in reducing permutation lazily: + { + // If the input is row major, copy the original column indices, + // otherwise directly use the input matrix + // + IndexVector originalOuterIndicesCpy; + const StorageIndex *originalOuterIndices = mat.outerIndexPtr(); + if(MatrixType::IsRowMajor) + { + originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1); + originalOuterIndices = originalOuterIndicesCpy.data(); + } + + for (int i = 0; i < n; i++) + { + Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i; + m_pmat.outerIndexPtr()[p] = originalOuterIndices[i]; + m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i]; + } + } + + /* Compute the default threshold as in MatLab, see: + * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing + * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 + */ + if(m_useDefaultThreshold) + { + RealScalar max2Norm = 0.0; + for (int j = 0; j < n; j++) max2Norm = numext::maxi(max2Norm, m_pmat.col(j).norm()); + if(max2Norm==RealScalar(0)) + max2Norm = RealScalar(1); + pivotThreshold = 20 * (m + n) * max2Norm * NumTraits::epsilon(); + } + + // Initialize the numerical permutation + m_pivotperm.setIdentity(n); + + StorageIndex nonzeroCol = 0; // Record the number of valid pivots + m_Q.startVec(0); + + // Left looking rank-revealing QR factorization: compute a column of R and Q at a time + for (StorageIndex col = 0; col < n; ++col) + { + mark.setConstant(-1); + m_R.startVec(col); + mark(nonzeroCol) = col; + Qidx(0) = nonzeroCol; + nzcolR = 0; nzcolQ = 1; + bool found_diag = nonzeroCol>=m; + tval.setZero(); + + // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e., + // all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k. + // Note: if the diagonal entry does not exist, then its contribution must be explicitly added, + // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found. + for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) + { + StorageIndex curIdx = nonzeroCol; + if(itp) curIdx = StorageIndex(itp.row()); + if(curIdx == nonzeroCol) found_diag = true; + + // Get the nonzeros indexes of the current column of R + StorageIndex st = m_firstRowElt(curIdx); // The traversal of the etree starts here + if (st < 0 ) + { + m_lastError = "Empty row found during numerical factorization"; + m_info = InvalidInput; + return; + } + + // Traverse the etree + Index bi = nzcolR; + for (; mark(st) != col; st = m_etree(st)) + { + Ridx(nzcolR) = st; // Add this row to the list, + mark(st) = col; // and mark this row as visited + nzcolR++; + } + + // Reverse the list to get the topological ordering + Index nt = nzcolR-bi; + for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1)); + + // Copy the current (curIdx,pcol) value of the input matrix + if(itp) tval(curIdx) = itp.value(); + else tval(curIdx) = Scalar(0); + + // Compute the pattern of Q(:,k) + if(curIdx > nonzeroCol && mark(curIdx) != col ) + { + Qidx(nzcolQ) = curIdx; // Add this row to the pattern of Q, + mark(curIdx) = col; // and mark it as visited + nzcolQ++; + } + } + + // Browse all the indexes of R(:,col) in reverse order + for (Index i = nzcolR-1; i >= 0; i--) + { + Index curIdx = Ridx(i); + + // Apply the curIdx-th householder vector to the current column (temporarily stored into tval) + Scalar tdot(0); + + // First compute q' * tval + tdot = m_Q.col(curIdx).dot(tval); + + tdot *= m_hcoeffs(curIdx); + + // Then update tval = tval - q * tau + // FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient "dense ?= sparse") + for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) + tval(itq.row()) -= itq.value() * tdot; + + // Detect fill-in for the current column of Q + if(m_etree(Ridx(i)) == nonzeroCol) + { + for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) + { + StorageIndex iQ = StorageIndex(itq.row()); + if (mark(iQ) != col) + { + Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q, + mark(iQ) = col; // and mark it as visited + } + } + } + } // End update current column + + Scalar tau = RealScalar(0); + RealScalar beta = 0; + + if(nonzeroCol < diagSize) + { + // Compute the Householder reflection that eliminate the current column + // FIXME this step should call the Householder module. + Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0); + + // First, the squared norm of Q((col+1):m, col) + RealScalar sqrNorm = 0.; + for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq))); + if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0)) + { + beta = numext::real(c0); + tval(Qidx(0)) = 1; + } + else + { + using std::sqrt; + beta = sqrt(numext::abs2(c0) + sqrNorm); + if(numext::real(c0) >= RealScalar(0)) + beta = -beta; + tval(Qidx(0)) = 1; + for (Index itq = 1; itq < nzcolQ; ++itq) + tval(Qidx(itq)) /= (c0 - beta); + tau = numext::conj((beta-c0) / beta); + + } + } + + // Insert values in R + for (Index i = nzcolR-1; i >= 0; i--) + { + Index curIdx = Ridx(i); + if(curIdx < nonzeroCol) + { + m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx); + tval(curIdx) = Scalar(0.); + } + } + + if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold) + { + m_R.insertBackByOuterInner(col, nonzeroCol) = beta; + // The householder coefficient + m_hcoeffs(nonzeroCol) = tau; + // Record the householder reflections + for (Index itq = 0; itq < nzcolQ; ++itq) + { + Index iQ = Qidx(itq); + m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ); + tval(iQ) = Scalar(0.); + } + nonzeroCol++; + if(nonzeroCol +struct SparseQR_QProduct : ReturnByValue > +{ + typedef typename SparseQRType::QRMatrixType MatrixType; + typedef typename SparseQRType::Scalar Scalar; + // Get the references + SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) : + m_qr(qr),m_other(other),m_transpose(transpose) {} + inline Index rows() const { return m_qr.matrixQ().rows(); } + inline Index cols() const { return m_other.cols(); } + + // Assign to a vector + template + void evalTo(DesType& res) const + { + Index m = m_qr.rows(); + Index n = m_qr.cols(); + Index diagSize = (std::min)(m,n); + res = m_other; + if (m_transpose) + { + eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); + //Compute res = Q' * other column by column + for(Index j = 0; j < res.cols(); j++){ + for (Index k = 0; k < diagSize; k++) + { + Scalar tau = Scalar(0); + tau = m_qr.m_Q.col(k).dot(res.col(j)); + if(tau==Scalar(0)) continue; + tau = tau * m_qr.m_hcoeffs(k); + res.col(j) -= tau * m_qr.m_Q.col(k); + } + } + } + else + { + eigen_assert(m_qr.matrixQ().cols() == m_other.rows() && "Non conforming object sizes"); + + res.conservativeResize(rows(), cols()); + + // Compute res = Q * other column by column + for(Index j = 0; j < res.cols(); j++) + { + for (Index k = diagSize-1; k >=0; k--) + { + Scalar tau = Scalar(0); + tau = m_qr.m_Q.col(k).dot(res.col(j)); + if(tau==Scalar(0)) continue; + tau = tau * numext::conj(m_qr.m_hcoeffs(k)); + res.col(j) -= tau * m_qr.m_Q.col(k); + } + } + } + } + + const SparseQRType& m_qr; + const Derived& m_other; + bool m_transpose; // TODO this actually means adjoint +}; + +template +struct SparseQRMatrixQReturnType : public EigenBase > +{ + typedef typename SparseQRType::Scalar Scalar; + typedef Matrix DenseMatrix; + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic + }; + explicit SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {} + template + SparseQR_QProduct operator*(const MatrixBase& other) + { + return SparseQR_QProduct(m_qr,other.derived(),false); + } + // To use for operations with the adjoint of Q + SparseQRMatrixQTransposeReturnType adjoint() const + { + return SparseQRMatrixQTransposeReturnType(m_qr); + } + inline Index rows() const { return m_qr.rows(); } + inline Index cols() const { return m_qr.rows(); } + // To use for operations with the transpose of Q FIXME this is the same as adjoint at the moment + SparseQRMatrixQTransposeReturnType transpose() const + { + return SparseQRMatrixQTransposeReturnType(m_qr); + } + const SparseQRType& m_qr; +}; + +// TODO this actually represents the adjoint of Q +template +struct SparseQRMatrixQTransposeReturnType +{ + explicit SparseQRMatrixQTransposeReturnType(const SparseQRType& qr) : m_qr(qr) {} + template + SparseQR_QProduct operator*(const MatrixBase& other) + { + return SparseQR_QProduct(m_qr,other.derived(), true); + } + const SparseQRType& m_qr; +}; + +namespace internal { + +template +struct evaluator_traits > +{ + typedef typename SparseQRType::MatrixType MatrixType; + typedef typename storage_kind_to_evaluator_kind::Kind Kind; + typedef SparseShape Shape; +}; + +template< typename DstXprType, typename SparseQRType> +struct Assignment, internal::assign_op, Sparse2Sparse> +{ + typedef SparseQRMatrixQReturnType SrcXprType; + typedef typename DstXprType::Scalar Scalar; + typedef typename DstXprType::StorageIndex StorageIndex; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &/*func*/) + { + typename DstXprType::PlainObject idMat(src.rows(), src.cols()); + idMat.setIdentity(); + // Sort the sparse householder reflectors if needed + const_cast(&src.m_qr)->_sort_matrix_Q(); + dst = SparseQR_QProduct(src.m_qr, idMat, false); + } +}; + +template< typename DstXprType, typename SparseQRType> +struct Assignment, internal::assign_op, Sparse2Dense> +{ + typedef SparseQRMatrixQReturnType SrcXprType; + typedef typename DstXprType::Scalar Scalar; + typedef typename DstXprType::StorageIndex StorageIndex; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &/*func*/) + { + dst = src.m_qr.matrixQ() * DstXprType::Identity(src.m_qr.rows(), src.m_qr.rows()); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/StdDeque.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/StdDeque.h new file mode 100644 index 000000000..cf1fedf92 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/StdDeque.h @@ -0,0 +1,126 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDDEQUE_H +#define EIGEN_STDDEQUE_H + +#include "details.h" + +/** + * This section contains a convenience MACRO which allows an easy specialization of + * std::deque such that for data types with alignment issues the correct allocator + * is used automatically. + */ +#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) \ +namespace std \ +{ \ + template<> \ + class deque<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ + : public deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ + { \ + typedef deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > deque_base; \ + public: \ + typedef __VA_ARGS__ value_type; \ + typedef deque_base::allocator_type allocator_type; \ + typedef deque_base::size_type size_type; \ + typedef deque_base::iterator iterator; \ + explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \ + template \ + deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \ + deque(const deque& c) : deque_base(c) {} \ + explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \ + deque(iterator start, iterator end) : deque_base(start, end) {} \ + deque& operator=(const deque& x) { \ + deque_base::operator=(x); \ + return *this; \ + } \ + }; \ +} + +// check whether we really need the std::deque specialization +#if !EIGEN_HAS_CXX11_CONTAINERS && !(defined(_GLIBCXX_DEQUE) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::deque::resize(size_type,const T&). */ + +namespace std { + +#define EIGEN_STD_DEQUE_SPECIALIZATION_BODY \ + public: \ + typedef T value_type; \ + typedef typename deque_base::allocator_type allocator_type; \ + typedef typename deque_base::size_type size_type; \ + typedef typename deque_base::iterator iterator; \ + typedef typename deque_base::const_iterator const_iterator; \ + explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \ + template \ + deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ + : deque_base(first, last, a) {} \ + deque(const deque& c) : deque_base(c) {} \ + explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \ + deque(iterator start, iterator end) : deque_base(start, end) {} \ + deque& operator=(const deque& x) { \ + deque_base::operator=(x); \ + return *this; \ + } + + template + class deque > + : public deque > +{ + typedef deque > deque_base; + EIGEN_STD_DEQUE_SPECIALIZATION_BODY + + void resize(size_type new_size) + { resize(new_size, T()); } + +#if defined(_DEQUE_) + // workaround MSVC std::deque implementation + void resize(size_type new_size, const value_type& x) + { + if (deque_base::size() < new_size) + deque_base::_Insert_n(deque_base::end(), new_size - deque_base::size(), x); + else if (new_size < deque_base::size()) + deque_base::erase(deque_base::begin() + new_size, deque_base::end()); + } + void push_back(const value_type& x) + { deque_base::push_back(x); } + void push_front(const value_type& x) + { deque_base::push_front(x); } + using deque_base::insert; + iterator insert(const_iterator position, const value_type& x) + { return deque_base::insert(position,x); } + void insert(const_iterator position, size_type new_size, const value_type& x) + { deque_base::insert(position, new_size, x); } +#elif defined(_GLIBCXX_DEQUE) && EIGEN_GNUC_AT_LEAST(4,2) + // workaround GCC std::deque implementation + void resize(size_type new_size, const value_type& x) + { + if (new_size < deque_base::size()) + deque_base::_M_erase_at_end(this->_M_impl._M_start + new_size); + else + deque_base::insert(deque_base::end(), new_size - deque_base::size(), x); + } +#else + // either GCC 4.1 or non-GCC + // default implementation which should always work. + void resize(size_type new_size, const value_type& x) + { + if (new_size < deque_base::size()) + deque_base::erase(deque_base::begin() + new_size, deque_base::end()); + else if (new_size > deque_base::size()) + deque_base::insert(deque_base::end(), new_size - deque_base::size(), x); + } +#endif + }; +} + +#endif // check whether specialization is actually required + +#endif // EIGEN_STDDEQUE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/StdList.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/StdList.h new file mode 100644 index 000000000..e1eba4985 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/StdList.h @@ -0,0 +1,106 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDLIST_H +#define EIGEN_STDLIST_H + +#include "details.h" + +/** + * This section contains a convenience MACRO which allows an easy specialization of + * std::list such that for data types with alignment issues the correct allocator + * is used automatically. + */ +#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) \ +namespace std \ +{ \ + template<> \ + class list<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ + : public list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ + { \ + typedef list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > list_base; \ + public: \ + typedef __VA_ARGS__ value_type; \ + typedef list_base::allocator_type allocator_type; \ + typedef list_base::size_type size_type; \ + typedef list_base::iterator iterator; \ + explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \ + template \ + list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \ + list(const list& c) : list_base(c) {} \ + explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \ + list(iterator start, iterator end) : list_base(start, end) {} \ + list& operator=(const list& x) { \ + list_base::operator=(x); \ + return *this; \ + } \ + }; \ +} + +// check whether we really need the std::list specialization +#if !EIGEN_HAS_CXX11_CONTAINERS && !(defined(_GLIBCXX_LIST) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::list::resize(size_type,const T&). */ + +namespace std +{ + +#define EIGEN_STD_LIST_SPECIALIZATION_BODY \ + public: \ + typedef T value_type; \ + typedef typename list_base::allocator_type allocator_type; \ + typedef typename list_base::size_type size_type; \ + typedef typename list_base::iterator iterator; \ + typedef typename list_base::const_iterator const_iterator; \ + explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \ + template \ + list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ + : list_base(first, last, a) {} \ + list(const list& c) : list_base(c) {} \ + explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \ + list(iterator start, iterator end) : list_base(start, end) {} \ + list& operator=(const list& x) { \ + list_base::operator=(x); \ + return *this; \ + } + + template + class list > + : public list > + { + typedef list > list_base; + EIGEN_STD_LIST_SPECIALIZATION_BODY + + void resize(size_type new_size) + { resize(new_size, T()); } + + void resize(size_type new_size, const value_type& x) + { + if (list_base::size() < new_size) + list_base::insert(list_base::end(), new_size - list_base::size(), x); + else + while (new_size < list_base::size()) list_base::pop_back(); + } + +#if defined(_LIST_) + // workaround MSVC std::list implementation + void push_back(const value_type& x) + { list_base::push_back(x); } + using list_base::insert; + iterator insert(const_iterator position, const value_type& x) + { return list_base::insert(position,x); } + void insert(const_iterator position, size_type new_size, const value_type& x) + { list_base::insert(position, new_size, x); } +#endif + }; +} + +#endif // check whether specialization is actually required + +#endif // EIGEN_STDLIST_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/StdVector.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/StdVector.h new file mode 100644 index 000000000..ec22821d2 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/StdVector.h @@ -0,0 +1,131 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDVECTOR_H +#define EIGEN_STDVECTOR_H + +#include "details.h" + +/** + * This section contains a convenience MACRO which allows an easy specialization of + * std::vector such that for data types with alignment issues the correct allocator + * is used automatically. + */ +#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \ +namespace std \ +{ \ + template<> \ + class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ + : public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ + { \ + typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \ + public: \ + typedef __VA_ARGS__ value_type; \ + typedef vector_base::allocator_type allocator_type; \ + typedef vector_base::size_type size_type; \ + typedef vector_base::iterator iterator; \ + explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \ + template \ + vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \ + vector(const vector& c) : vector_base(c) {} \ + explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \ + vector(iterator start, iterator end) : vector_base(start, end) {} \ + vector& operator=(const vector& x) { \ + vector_base::operator=(x); \ + return *this; \ + } \ + }; \ +} + +// Don't specialize if containers are implemented according to C++11 +#if !EIGEN_HAS_CXX11_CONTAINERS + +namespace std { + +#define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \ + public: \ + typedef T value_type; \ + typedef typename vector_base::allocator_type allocator_type; \ + typedef typename vector_base::size_type size_type; \ + typedef typename vector_base::iterator iterator; \ + typedef typename vector_base::const_iterator const_iterator; \ + explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \ + template \ + vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ + : vector_base(first, last, a) {} \ + vector(const vector& c) : vector_base(c) {} \ + explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \ + vector(iterator start, iterator end) : vector_base(start, end) {} \ + vector& operator=(const vector& x) { \ + vector_base::operator=(x); \ + return *this; \ + } + + template + class vector > + : public vector > +{ + typedef vector > vector_base; + EIGEN_STD_VECTOR_SPECIALIZATION_BODY + + void resize(size_type new_size) + { resize(new_size, T()); } + +#if defined(_VECTOR_) + // workaround MSVC std::vector implementation + void resize(size_type new_size, const value_type& x) + { + if (vector_base::size() < new_size) + vector_base::_Insert_n(vector_base::end(), new_size - vector_base::size(), x); + else if (new_size < vector_base::size()) + vector_base::erase(vector_base::begin() + new_size, vector_base::end()); + } + void push_back(const value_type& x) + { vector_base::push_back(x); } + using vector_base::insert; + iterator insert(const_iterator position, const value_type& x) + { return vector_base::insert(position,x); } + void insert(const_iterator position, size_type new_size, const value_type& x) + { vector_base::insert(position, new_size, x); } +#elif defined(_GLIBCXX_VECTOR) && (!(EIGEN_GNUC_AT_LEAST(4,1))) + /* Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&). + * However, this specialization is still needed to make the above EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION trick to work. */ + void resize(size_type new_size, const value_type& x) + { + vector_base::resize(new_size,x); + } +#elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2) + // workaround GCC std::vector implementation + void resize(size_type new_size, const value_type& x) + { + if (new_size < vector_base::size()) + vector_base::_M_erase_at_end(this->_M_impl._M_start + new_size); + else + vector_base::insert(vector_base::end(), new_size - vector_base::size(), x); + } +#else + // either GCC 4.1 or non-GCC + // default implementation which should always work. + void resize(size_type new_size, const value_type& x) + { + if (new_size < vector_base::size()) + vector_base::erase(vector_base::begin() + new_size, vector_base::end()); + else if (new_size > vector_base::size()) + vector_base::insert(vector_base::end(), new_size - vector_base::size(), x); + } +#endif + }; +} +#endif // !EIGEN_HAS_CXX11_CONTAINERS + + +#endif // EIGEN_STDVECTOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/details.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/details.h new file mode 100644 index 000000000..2cfd13e03 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/StlSupport/details.h @@ -0,0 +1,84 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STL_DETAILS_H +#define EIGEN_STL_DETAILS_H + +#ifndef EIGEN_ALIGNED_ALLOCATOR + #define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator +#endif + +namespace Eigen { + + // This one is needed to prevent reimplementing the whole std::vector. + template + class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR + { + public: + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; + + template + struct rebind + { + typedef aligned_allocator_indirection other; + }; + + aligned_allocator_indirection() {} + aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR() {} + aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR& ) {} + template + aligned_allocator_indirection(const aligned_allocator_indirection& ) {} + template + aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR& ) {} + ~aligned_allocator_indirection() {} + }; + +#if EIGEN_COMP_MSVC + + // sometimes, MSVC detects, at compile time, that the argument x + // in std::vector::resize(size_t s,T x) won't be aligned and generate an error + // even if this function is never called. Whence this little wrapper. +#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) \ + typename Eigen::internal::conditional< \ + Eigen::internal::is_arithmetic::value, \ + T, \ + Eigen::internal::workaround_msvc_stl_support \ + >::type + + namespace internal { + template struct workaround_msvc_stl_support : public T + { + inline workaround_msvc_stl_support() : T() {} + inline workaround_msvc_stl_support(const T& other) : T(other) {} + inline operator T& () { return *static_cast(this); } + inline operator const T& () const { return *static_cast(this); } + template + inline T& operator=(const OtherT& other) + { T::operator=(other); return *this; } + inline workaround_msvc_stl_support& operator=(const workaround_msvc_stl_support& other) + { T::operator=(other); return *this; } + }; + } + +#else + +#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) T + +#endif + +} + +#endif // EIGEN_STL_DETAILS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h new file mode 100644 index 000000000..7261c7d0f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h @@ -0,0 +1,1027 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SUPERLUSUPPORT_H +#define EIGEN_SUPERLUSUPPORT_H + +namespace Eigen { + +#if defined(SUPERLU_MAJOR_VERSION) && (SUPERLU_MAJOR_VERSION >= 5) +#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE) \ + extern "C" { \ + extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \ + char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \ + void *, int, SuperMatrix *, SuperMatrix *, \ + FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, \ + GlobalLU_t *, mem_usage_t *, SuperLUStat_t *, int *); \ + } \ + inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \ + int *perm_c, int *perm_r, int *etree, char *equed, \ + FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ + SuperMatrix *U, void *work, int lwork, \ + SuperMatrix *B, SuperMatrix *X, \ + FLOATTYPE *recip_pivot_growth, \ + FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \ + SuperLUStat_t *stats, int *info, KEYTYPE) { \ + mem_usage_t mem_usage; \ + GlobalLU_t gLU; \ + PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L, \ + U, work, lwork, B, X, recip_pivot_growth, rcond, \ + ferr, berr, &gLU, &mem_usage, stats, info); \ + return mem_usage.for_lu; /* bytes used by the factor storage */ \ + } +#else // version < 5.0 +#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE) \ + extern "C" { \ + extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \ + char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \ + void *, int, SuperMatrix *, SuperMatrix *, \ + FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, \ + mem_usage_t *, SuperLUStat_t *, int *); \ + } \ + inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \ + int *perm_c, int *perm_r, int *etree, char *equed, \ + FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ + SuperMatrix *U, void *work, int lwork, \ + SuperMatrix *B, SuperMatrix *X, \ + FLOATTYPE *recip_pivot_growth, \ + FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \ + SuperLUStat_t *stats, int *info, KEYTYPE) { \ + mem_usage_t mem_usage; \ + PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L, \ + U, work, lwork, B, X, recip_pivot_growth, rcond, \ + ferr, berr, &mem_usage, stats, info); \ + return mem_usage.for_lu; /* bytes used by the factor storage */ \ + } +#endif + +DECL_GSSVX(s,float,float) +DECL_GSSVX(c,float,std::complex) +DECL_GSSVX(d,double,double) +DECL_GSSVX(z,double,std::complex) + +#ifdef MILU_ALPHA +#define EIGEN_SUPERLU_HAS_ILU +#endif + +#ifdef EIGEN_SUPERLU_HAS_ILU + +// similarly for the incomplete factorization using gsisx +#define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE) \ + extern "C" { \ + extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \ + char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \ + void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *, \ + mem_usage_t *, SuperLUStat_t *, int *); \ + } \ + inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A, \ + int *perm_c, int *perm_r, int *etree, char *equed, \ + FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ + SuperMatrix *U, void *work, int lwork, \ + SuperMatrix *B, SuperMatrix *X, \ + FLOATTYPE *recip_pivot_growth, \ + FLOATTYPE *rcond, \ + SuperLUStat_t *stats, int *info, KEYTYPE) { \ + mem_usage_t mem_usage; \ + PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L, \ + U, work, lwork, B, X, recip_pivot_growth, rcond, \ + &mem_usage, stats, info); \ + return mem_usage.for_lu; /* bytes used by the factor storage */ \ + } + +DECL_GSISX(s,float,float) +DECL_GSISX(c,float,std::complex) +DECL_GSISX(d,double,double) +DECL_GSISX(z,double,std::complex) + +#endif + +template +struct SluMatrixMapHelper; + +/** \internal + * + * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices + * and dense matrices. Supernodal and other fancy format are not supported by this wrapper. + * + * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure. + */ +struct SluMatrix : SuperMatrix +{ + SluMatrix() + { + Store = &storage; + } + + SluMatrix(const SluMatrix& other) + : SuperMatrix(other) + { + Store = &storage; + storage = other.storage; + } + + SluMatrix& operator=(const SluMatrix& other) + { + SuperMatrix::operator=(static_cast(other)); + Store = &storage; + storage = other.storage; + return *this; + } + + struct + { + union {int nnz;int lda;}; + void *values; + int *innerInd; + int *outerInd; + } storage; + + void setStorageType(Stype_t t) + { + Stype = t; + if (t==SLU_NC || t==SLU_NR || t==SLU_DN) + Store = &storage; + else + { + eigen_assert(false && "storage type not supported"); + Store = 0; + } + } + + template + void setScalarType() + { + if (internal::is_same::value) + Dtype = SLU_S; + else if (internal::is_same::value) + Dtype = SLU_D; + else if (internal::is_same >::value) + Dtype = SLU_C; + else if (internal::is_same >::value) + Dtype = SLU_Z; + else + { + eigen_assert(false && "Scalar type not supported by SuperLU"); + } + } + + template + static SluMatrix Map(MatrixBase& _mat) + { + MatrixType& mat(_mat.derived()); + eigen_assert( ((MatrixType::Flags&RowMajorBit)!=RowMajorBit) && "row-major dense matrices are not supported by SuperLU"); + SluMatrix res; + res.setStorageType(SLU_DN); + res.setScalarType(); + res.Mtype = SLU_GE; + + res.nrow = internal::convert_index(mat.rows()); + res.ncol = internal::convert_index(mat.cols()); + + res.storage.lda = internal::convert_index(MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride()); + res.storage.values = (void*)(mat.data()); + return res; + } + + template + static SluMatrix Map(SparseMatrixBase& a_mat) + { + MatrixType &mat(a_mat.derived()); + SluMatrix res; + if ((MatrixType::Flags&RowMajorBit)==RowMajorBit) + { + res.setStorageType(SLU_NR); + res.nrow = internal::convert_index(mat.cols()); + res.ncol = internal::convert_index(mat.rows()); + } + else + { + res.setStorageType(SLU_NC); + res.nrow = internal::convert_index(mat.rows()); + res.ncol = internal::convert_index(mat.cols()); + } + + res.Mtype = SLU_GE; + + res.storage.nnz = internal::convert_index(mat.nonZeros()); + res.storage.values = mat.valuePtr(); + res.storage.innerInd = mat.innerIndexPtr(); + res.storage.outerInd = mat.outerIndexPtr(); + + res.setScalarType(); + + // FIXME the following is not very accurate + if (MatrixType::Flags & Upper) + res.Mtype = SLU_TRU; + if (MatrixType::Flags & Lower) + res.Mtype = SLU_TRL; + + eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU"); + + return res; + } +}; + +template +struct SluMatrixMapHelper > +{ + typedef Matrix MatrixType; + static void run(MatrixType& mat, SluMatrix& res) + { + eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU"); + res.setStorageType(SLU_DN); + res.setScalarType(); + res.Mtype = SLU_GE; + + res.nrow = mat.rows(); + res.ncol = mat.cols(); + + res.storage.lda = mat.outerStride(); + res.storage.values = mat.data(); + } +}; + +template +struct SluMatrixMapHelper > +{ + typedef Derived MatrixType; + static void run(MatrixType& mat, SluMatrix& res) + { + if ((MatrixType::Flags&RowMajorBit)==RowMajorBit) + { + res.setStorageType(SLU_NR); + res.nrow = mat.cols(); + res.ncol = mat.rows(); + } + else + { + res.setStorageType(SLU_NC); + res.nrow = mat.rows(); + res.ncol = mat.cols(); + } + + res.Mtype = SLU_GE; + + res.storage.nnz = mat.nonZeros(); + res.storage.values = mat.valuePtr(); + res.storage.innerInd = mat.innerIndexPtr(); + res.storage.outerInd = mat.outerIndexPtr(); + + res.setScalarType(); + + // FIXME the following is not very accurate + if (MatrixType::Flags & Upper) + res.Mtype = SLU_TRU; + if (MatrixType::Flags & Lower) + res.Mtype = SLU_TRL; + + eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU"); + } +}; + +namespace internal { + +template +SluMatrix asSluMatrix(MatrixType& mat) +{ + return SluMatrix::Map(mat); +} + +/** View a Super LU matrix as an Eigen expression */ +template +MappedSparseMatrix map_superlu(SluMatrix& sluMat) +{ + eigen_assert(((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR) + || ((Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC)); + + Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow; + + return MappedSparseMatrix( + sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize], + sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast(sluMat.storage.values) ); +} + +} // end namespace internal + +/** \ingroup SuperLUSupport_Module + * \class SuperLUBase + * \brief The base class for the direct and incomplete LU factorization of SuperLU + */ +template +class SuperLUBase : public SparseSolverBase +{ + protected: + typedef SparseSolverBase Base; + using Base::derived; + using Base::m_isInitialized; + public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Matrix Vector; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + typedef Map > PermutationMap; + typedef SparseMatrix LUMatrixType; + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + public: + + SuperLUBase() {} + + ~SuperLUBase() + { + clearFactors(); + } + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + /** \returns a reference to the Super LU option object to configure the Super LU algorithms. */ + inline superlu_options_t& options() { return m_sluOptions; } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + void compute(const MatrixType& matrix) + { + derived().analyzePattern(matrix); + derived().factorize(matrix); + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& /*matrix*/) + { + m_isInitialized = true; + m_info = Success; + m_analysisIsOk = true; + m_factorizationIsOk = false; + } + + template + void dumpMemory(Stream& /*s*/) + {} + + protected: + + void initFactorization(const MatrixType& a) + { + set_default_options(&this->m_sluOptions); + + const Index size = a.rows(); + m_matrix = a; + + m_sluA = internal::asSluMatrix(m_matrix); + clearFactors(); + + m_p.resize(size); + m_q.resize(size); + m_sluRscale.resize(size); + m_sluCscale.resize(size); + m_sluEtree.resize(size); + + // set empty B and X + m_sluB.setStorageType(SLU_DN); + m_sluB.setScalarType(); + m_sluB.Mtype = SLU_GE; + m_sluB.storage.values = 0; + m_sluB.nrow = 0; + m_sluB.ncol = 0; + m_sluB.storage.lda = internal::convert_index(size); + m_sluX = m_sluB; + + m_extractedDataAreDirty = true; + } + + void init() + { + m_info = InvalidInput; + m_isInitialized = false; + m_sluL.Store = 0; + m_sluU.Store = 0; + } + + void extractData() const; + + void clearFactors() + { + if(m_sluL.Store) + Destroy_SuperNode_Matrix(&m_sluL); + if(m_sluU.Store) + Destroy_CompCol_Matrix(&m_sluU); + + m_sluL.Store = 0; + m_sluU.Store = 0; + + memset(&m_sluL,0,sizeof m_sluL); + memset(&m_sluU,0,sizeof m_sluU); + } + + // cached data to reduce reallocation, etc. + mutable LUMatrixType m_l; + mutable LUMatrixType m_u; + mutable IntColVectorType m_p; + mutable IntRowVectorType m_q; + + mutable LUMatrixType m_matrix; // copy of the factorized matrix + mutable SluMatrix m_sluA; + mutable SuperMatrix m_sluL, m_sluU; + mutable SluMatrix m_sluB, m_sluX; + mutable SuperLUStat_t m_sluStat; + mutable superlu_options_t m_sluOptions; + mutable std::vector m_sluEtree; + mutable Matrix m_sluRscale, m_sluCscale; + mutable Matrix m_sluFerr, m_sluBerr; + mutable char m_sluEqued; + + mutable ComputationInfo m_info; + int m_factorizationIsOk; + int m_analysisIsOk; + mutable bool m_extractedDataAreDirty; + + private: + SuperLUBase(SuperLUBase& ) { } +}; + + +/** \ingroup SuperLUSupport_Module + * \class SuperLU + * \brief A sparse direct LU factorization and solver based on the SuperLU library + * + * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization + * using the SuperLU library. The sparse matrix A must be squared and invertible. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * + * \warning This class is only for the 4.x versions of SuperLU. The 3.x and 5.x versions are not supported. + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SparseLU + */ +template +class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> > +{ + public: + typedef SuperLUBase<_MatrixType,SuperLU> Base; + typedef _MatrixType MatrixType; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + typedef typename Base::StorageIndex StorageIndex; + typedef typename Base::IntRowVectorType IntRowVectorType; + typedef typename Base::IntColVectorType IntColVectorType; + typedef typename Base::PermutationMap PermutationMap; + typedef typename Base::LUMatrixType LUMatrixType; + typedef TriangularView LMatrixType; + typedef TriangularView UMatrixType; + + public: + using Base::_solve_impl; + + SuperLU() : Base() { init(); } + + explicit SuperLU(const MatrixType& matrix) : Base() + { + init(); + Base::compute(matrix); + } + + ~SuperLU() + { + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + m_info = InvalidInput; + m_isInitialized = false; + Base::analyzePattern(matrix); + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix); + + /** \internal */ + template + void _solve_impl(const MatrixBase &b, MatrixBase &dest) const; + + inline const LMatrixType& matrixL() const + { + if (m_extractedDataAreDirty) this->extractData(); + return m_l; + } + + inline const UMatrixType& matrixU() const + { + if (m_extractedDataAreDirty) this->extractData(); + return m_u; + } + + inline const IntColVectorType& permutationP() const + { + if (m_extractedDataAreDirty) this->extractData(); + return m_p; + } + + inline const IntRowVectorType& permutationQ() const + { + if (m_extractedDataAreDirty) this->extractData(); + return m_q; + } + + Scalar determinant() const; + + protected: + + using Base::m_matrix; + using Base::m_sluOptions; + using Base::m_sluA; + using Base::m_sluB; + using Base::m_sluX; + using Base::m_p; + using Base::m_q; + using Base::m_sluEtree; + using Base::m_sluEqued; + using Base::m_sluRscale; + using Base::m_sluCscale; + using Base::m_sluL; + using Base::m_sluU; + using Base::m_sluStat; + using Base::m_sluFerr; + using Base::m_sluBerr; + using Base::m_l; + using Base::m_u; + + using Base::m_analysisIsOk; + using Base::m_factorizationIsOk; + using Base::m_extractedDataAreDirty; + using Base::m_isInitialized; + using Base::m_info; + + void init() + { + Base::init(); + + set_default_options(&this->m_sluOptions); + m_sluOptions.PrintStat = NO; + m_sluOptions.ConditionNumber = NO; + m_sluOptions.Trans = NOTRANS; + m_sluOptions.ColPerm = COLAMD; + } + + + private: + SuperLU(SuperLU& ) { } +}; + +template +void SuperLU::factorize(const MatrixType& a) +{ + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + if(!m_analysisIsOk) + { + m_info = InvalidInput; + return; + } + + this->initFactorization(a); + + m_sluOptions.ColPerm = COLAMD; + int info = 0; + RealScalar recip_pivot_growth, rcond; + RealScalar ferr, berr; + + StatInit(&m_sluStat); + SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], + &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], + &m_sluL, &m_sluU, + NULL, 0, + &m_sluB, &m_sluX, + &recip_pivot_growth, &rcond, + &ferr, &berr, + &m_sluStat, &info, Scalar()); + StatFree(&m_sluStat); + + m_extractedDataAreDirty = true; + + // FIXME how to better check for errors ??? + m_info = info == 0 ? Success : NumericalIssue; + m_factorizationIsOk = true; +} + +template +template +void SuperLU::_solve_impl(const MatrixBase &b, MatrixBase& x) const +{ + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()"); + + const Index size = m_matrix.rows(); + const Index rhsCols = b.cols(); + eigen_assert(size==b.rows()); + + m_sluOptions.Trans = NOTRANS; + m_sluOptions.Fact = FACTORED; + m_sluOptions.IterRefine = NOREFINE; + + + m_sluFerr.resize(rhsCols); + m_sluBerr.resize(rhsCols); + + Ref > b_ref(b); + Ref > x_ref(x); + + m_sluB = SluMatrix::Map(b_ref.const_cast_derived()); + m_sluX = SluMatrix::Map(x_ref.const_cast_derived()); + + typename Rhs::PlainObject b_cpy; + if(m_sluEqued!='N') + { + b_cpy = b; + m_sluB = SluMatrix::Map(b_cpy.const_cast_derived()); + } + + StatInit(&m_sluStat); + int info = 0; + RealScalar recip_pivot_growth, rcond; + SuperLU_gssvx(&m_sluOptions, &m_sluA, + m_q.data(), m_p.data(), + &m_sluEtree[0], &m_sluEqued, + &m_sluRscale[0], &m_sluCscale[0], + &m_sluL, &m_sluU, + NULL, 0, + &m_sluB, &m_sluX, + &recip_pivot_growth, &rcond, + &m_sluFerr[0], &m_sluBerr[0], + &m_sluStat, &info, Scalar()); + StatFree(&m_sluStat); + + if(x.derived().data() != x_ref.data()) + x = x_ref; + + m_info = info==0 ? Success : NumericalIssue; +} + +// the code of this extractData() function has been adapted from the SuperLU's Matlab support code, +// +// Copyright (c) 1994 by Xerox Corporation. All rights reserved. +// +// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY +// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. +// +template +void SuperLUBase::extractData() const +{ + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for extracting factors, you must first call either compute() or analyzePattern()/factorize()"); + if (m_extractedDataAreDirty) + { + int upper; + int fsupc, istart, nsupr; + int lastl = 0, lastu = 0; + SCformat *Lstore = static_cast(m_sluL.Store); + NCformat *Ustore = static_cast(m_sluU.Store); + Scalar *SNptr; + + const Index size = m_matrix.rows(); + m_l.resize(size,size); + m_l.resizeNonZeros(Lstore->nnz); + m_u.resize(size,size); + m_u.resizeNonZeros(Ustore->nnz); + + int* Lcol = m_l.outerIndexPtr(); + int* Lrow = m_l.innerIndexPtr(); + Scalar* Lval = m_l.valuePtr(); + + int* Ucol = m_u.outerIndexPtr(); + int* Urow = m_u.innerIndexPtr(); + Scalar* Uval = m_u.valuePtr(); + + Ucol[0] = 0; + Ucol[0] = 0; + + /* for each supernode */ + for (int k = 0; k <= Lstore->nsuper; ++k) + { + fsupc = L_FST_SUPC(k); + istart = L_SUB_START(fsupc); + nsupr = L_SUB_START(fsupc+1) - istart; + upper = 1; + + /* for each column in the supernode */ + for (int j = fsupc; j < L_FST_SUPC(k+1); ++j) + { + SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)]; + + /* Extract U */ + for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i) + { + Uval[lastu] = ((Scalar*)Ustore->nzval)[i]; + /* Matlab doesn't like explicit zero. */ + if (Uval[lastu] != 0.0) + Urow[lastu++] = U_SUB(i); + } + for (int i = 0; i < upper; ++i) + { + /* upper triangle in the supernode */ + Uval[lastu] = SNptr[i]; + /* Matlab doesn't like explicit zero. */ + if (Uval[lastu] != 0.0) + Urow[lastu++] = L_SUB(istart+i); + } + Ucol[j+1] = lastu; + + /* Extract L */ + Lval[lastl] = 1.0; /* unit diagonal */ + Lrow[lastl++] = L_SUB(istart + upper - 1); + for (int i = upper; i < nsupr; ++i) + { + Lval[lastl] = SNptr[i]; + /* Matlab doesn't like explicit zero. */ + if (Lval[lastl] != 0.0) + Lrow[lastl++] = L_SUB(istart+i); + } + Lcol[j+1] = lastl; + + ++upper; + } /* for j ... */ + + } /* for k ... */ + + // squeeze the matrices : + m_l.resizeNonZeros(lastl); + m_u.resizeNonZeros(lastu); + + m_extractedDataAreDirty = false; + } +} + +template +typename SuperLU::Scalar SuperLU::determinant() const +{ + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for computing the determinant, you must first call either compute() or analyzePattern()/factorize()"); + + if (m_extractedDataAreDirty) + this->extractData(); + + Scalar det = Scalar(1); + for (int j=0; j 0) + { + int lastId = m_u.outerIndexPtr()[j+1]-1; + eigen_assert(m_u.innerIndexPtr()[lastId]<=j); + if (m_u.innerIndexPtr()[lastId]==j) + det *= m_u.valuePtr()[lastId]; + } + } + if(PermutationMap(m_p.data(),m_p.size()).determinant()*PermutationMap(m_q.data(),m_q.size()).determinant()<0) + det = -det; + if(m_sluEqued!='N') + return det/m_sluRscale.prod()/m_sluCscale.prod(); + else + return det; +} + +#ifdef EIGEN_PARSED_BY_DOXYGEN +#define EIGEN_SUPERLU_HAS_ILU +#endif + +#ifdef EIGEN_SUPERLU_HAS_ILU + +/** \ingroup SuperLUSupport_Module + * \class SuperILU + * \brief A sparse direct \b incomplete LU factorization and solver based on the SuperLU library + * + * This class allows to solve for an approximate solution of A.X = B sparse linear problems via an incomplete LU factorization + * using the SuperLU library. This class is aimed to be used as a preconditioner of the iterative linear solvers. + * + * \warning This class is only for the 4.x versions of SuperLU. The 3.x and 5.x versions are not supported. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class IncompleteLUT, class ConjugateGradient, class BiCGSTAB + */ + +template +class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> > +{ + public: + typedef SuperLUBase<_MatrixType,SuperILU> Base; + typedef _MatrixType MatrixType; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + + public: + using Base::_solve_impl; + + SuperILU() : Base() { init(); } + + SuperILU(const MatrixType& matrix) : Base() + { + init(); + Base::compute(matrix); + } + + ~SuperILU() + { + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + Base::analyzePattern(matrix); + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix); + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal */ + template + void _solve_impl(const MatrixBase &b, MatrixBase &dest) const; + #endif // EIGEN_PARSED_BY_DOXYGEN + + protected: + + using Base::m_matrix; + using Base::m_sluOptions; + using Base::m_sluA; + using Base::m_sluB; + using Base::m_sluX; + using Base::m_p; + using Base::m_q; + using Base::m_sluEtree; + using Base::m_sluEqued; + using Base::m_sluRscale; + using Base::m_sluCscale; + using Base::m_sluL; + using Base::m_sluU; + using Base::m_sluStat; + using Base::m_sluFerr; + using Base::m_sluBerr; + using Base::m_l; + using Base::m_u; + + using Base::m_analysisIsOk; + using Base::m_factorizationIsOk; + using Base::m_extractedDataAreDirty; + using Base::m_isInitialized; + using Base::m_info; + + void init() + { + Base::init(); + + ilu_set_default_options(&m_sluOptions); + m_sluOptions.PrintStat = NO; + m_sluOptions.ConditionNumber = NO; + m_sluOptions.Trans = NOTRANS; + m_sluOptions.ColPerm = MMD_AT_PLUS_A; + + // no attempt to preserve column sum + m_sluOptions.ILU_MILU = SILU; + // only basic ILU(k) support -- no direct control over memory consumption + // better to use ILU_DropRule = DROP_BASIC | DROP_AREA + // and set ILU_FillFactor to max memory growth + m_sluOptions.ILU_DropRule = DROP_BASIC; + m_sluOptions.ILU_DropTol = NumTraits::dummy_precision()*10; + } + + private: + SuperILU(SuperILU& ) { } +}; + +template +void SuperILU::factorize(const MatrixType& a) +{ + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + if(!m_analysisIsOk) + { + m_info = InvalidInput; + return; + } + + this->initFactorization(a); + + int info = 0; + RealScalar recip_pivot_growth, rcond; + + StatInit(&m_sluStat); + SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], + &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], + &m_sluL, &m_sluU, + NULL, 0, + &m_sluB, &m_sluX, + &recip_pivot_growth, &rcond, + &m_sluStat, &info, Scalar()); + StatFree(&m_sluStat); + + // FIXME how to better check for errors ??? + m_info = info == 0 ? Success : NumericalIssue; + m_factorizationIsOk = true; +} + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void SuperILU::_solve_impl(const MatrixBase &b, MatrixBase& x) const +{ + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()"); + + const int size = m_matrix.rows(); + const int rhsCols = b.cols(); + eigen_assert(size==b.rows()); + + m_sluOptions.Trans = NOTRANS; + m_sluOptions.Fact = FACTORED; + m_sluOptions.IterRefine = NOREFINE; + + m_sluFerr.resize(rhsCols); + m_sluBerr.resize(rhsCols); + + Ref > b_ref(b); + Ref > x_ref(x); + + m_sluB = SluMatrix::Map(b_ref.const_cast_derived()); + m_sluX = SluMatrix::Map(x_ref.const_cast_derived()); + + typename Rhs::PlainObject b_cpy; + if(m_sluEqued!='N') + { + b_cpy = b; + m_sluB = SluMatrix::Map(b_cpy.const_cast_derived()); + } + + int info = 0; + RealScalar recip_pivot_growth, rcond; + + StatInit(&m_sluStat); + SuperLU_gsisx(&m_sluOptions, &m_sluA, + m_q.data(), m_p.data(), + &m_sluEtree[0], &m_sluEqued, + &m_sluRscale[0], &m_sluCscale[0], + &m_sluL, &m_sluU, + NULL, 0, + &m_sluB, &m_sluX, + &recip_pivot_growth, &rcond, + &m_sluStat, &info, Scalar()); + StatFree(&m_sluStat); + + if(x.derived().data() != x_ref.data()) + x = x_ref; + + m_info = info==0 ? Success : NumericalIssue; +} +#endif + +#endif + +} // end namespace Eigen + +#endif // EIGEN_SUPERLUSUPPORT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h new file mode 100644 index 000000000..91c09ab13 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h @@ -0,0 +1,506 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_UMFPACKSUPPORT_H +#define EIGEN_UMFPACKSUPPORT_H + +namespace Eigen { + +/* TODO extract L, extract U, compute det, etc... */ + +// generic double/complex wrapper functions: + + +inline void umfpack_defaults(double control[UMFPACK_CONTROL], double) +{ umfpack_di_defaults(control); } + +inline void umfpack_defaults(double control[UMFPACK_CONTROL], std::complex) +{ umfpack_zi_defaults(control); } + +inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], double) +{ umfpack_di_report_info(control, info);} + +inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], std::complex) +{ umfpack_zi_report_info(control, info);} + +inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, double) +{ umfpack_di_report_status(control, status);} + +inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, std::complex) +{ umfpack_zi_report_status(control, status);} + +inline void umfpack_report_control(double control[UMFPACK_CONTROL], double) +{ umfpack_di_report_control(control);} + +inline void umfpack_report_control(double control[UMFPACK_CONTROL], std::complex) +{ umfpack_zi_report_control(control);} + +inline void umfpack_free_numeric(void **Numeric, double) +{ umfpack_di_free_numeric(Numeric); *Numeric = 0; } + +inline void umfpack_free_numeric(void **Numeric, std::complex) +{ umfpack_zi_free_numeric(Numeric); *Numeric = 0; } + +inline void umfpack_free_symbolic(void **Symbolic, double) +{ umfpack_di_free_symbolic(Symbolic); *Symbolic = 0; } + +inline void umfpack_free_symbolic(void **Symbolic, std::complex) +{ umfpack_zi_free_symbolic(Symbolic); *Symbolic = 0; } + +inline int umfpack_symbolic(int n_row,int n_col, + const int Ap[], const int Ai[], const double Ax[], void **Symbolic, + const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO]) +{ + return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info); +} + +inline int umfpack_symbolic(int n_row,int n_col, + const int Ap[], const int Ai[], const std::complex Ax[], void **Symbolic, + const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO]) +{ + return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Control,Info); +} + +inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[], + void *Symbolic, void **Numeric, + const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO]) +{ + return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info); +} + +inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex Ax[], + void *Symbolic, void **Numeric, + const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO]) +{ + return umfpack_zi_numeric(Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info); +} + +inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[], + double X[], const double B[], void *Numeric, + const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO]) +{ + return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info); +} + +inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex Ax[], + std::complex X[], const std::complex B[], void *Numeric, + const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO]) +{ + return umfpack_zi_solve(sys,Ap,Ai,&numext::real_ref(Ax[0]),0,&numext::real_ref(X[0]),0,&numext::real_ref(B[0]),0,Numeric,Control,Info); +} + +inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double) +{ + return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric); +} + +inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex) +{ + return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric); +} + +inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[], + int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric) +{ + return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric); +} + +inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex Lx[], int Up[], int Ui[], std::complex Ux[], + int P[], int Q[], std::complex Dx[], int *do_recip, double Rs[], void *Numeric) +{ + double& lx0_real = numext::real_ref(Lx[0]); + double& ux0_real = numext::real_ref(Ux[0]); + double& dx0_real = numext::real_ref(Dx[0]); + return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q, + Dx?&dx0_real:0,0,do_recip,Rs,Numeric); +} + +inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO]) +{ + return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info); +} + +inline int umfpack_get_determinant(std::complex *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO]) +{ + double& mx_real = numext::real_ref(*Mx); + return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info); +} + + +/** \ingroup UmfPackSupport_Module + * \brief A sparse LU factorization and solver based on UmfPack + * + * This class allows to solve for A.X = B sparse linear problems via a LU factorization + * using the UmfPack library. The sparse matrix A must be squared and full rank. + * The vectors or matrices X and B can be either dense or sparse. + * + * \warning The input matrix A should be in a \b compressed and \b column-major form. + * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix. + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SparseLU + */ +template +class UmfPackLU : public SparseSolverBase > +{ + protected: + typedef SparseSolverBase > Base; + using Base::m_isInitialized; + public: + using Base::_solve_impl; + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Matrix Vector; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + typedef SparseMatrix LUMatrixType; + typedef SparseMatrix UmfpackMatrixType; + typedef Ref UmfpackMatrixRef; + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + public: + + typedef Array UmfpackControl; + typedef Array UmfpackInfo; + + UmfPackLU() + : m_dummy(0,0), mp_matrix(m_dummy) + { + init(); + } + + template + explicit UmfPackLU(const InputMatrixType& matrix) + : mp_matrix(matrix) + { + init(); + compute(matrix); + } + + ~UmfPackLU() + { + if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar()); + if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar()); + } + + inline Index rows() const { return mp_matrix.rows(); } + inline Index cols() const { return mp_matrix.cols(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + inline const LUMatrixType& matrixL() const + { + if (m_extractedDataAreDirty) extractData(); + return m_l; + } + + inline const LUMatrixType& matrixU() const + { + if (m_extractedDataAreDirty) extractData(); + return m_u; + } + + inline const IntColVectorType& permutationP() const + { + if (m_extractedDataAreDirty) extractData(); + return m_p; + } + + inline const IntRowVectorType& permutationQ() const + { + if (m_extractedDataAreDirty) extractData(); + return m_q; + } + + /** Computes the sparse Cholesky decomposition of \a matrix + * Note that the matrix should be column-major, and in compressed format for best performance. + * \sa SparseMatrix::makeCompressed(). + */ + template + void compute(const InputMatrixType& matrix) + { + if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar()); + if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar()); + grab(matrix.derived()); + analyzePattern_impl(); + factorize_impl(); + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize(), compute() + */ + template + void analyzePattern(const InputMatrixType& matrix) + { + if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar()); + if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar()); + + grab(matrix.derived()); + + analyzePattern_impl(); + } + + /** Provides the return status code returned by UmfPack during the numeric + * factorization. + * + * \sa factorize(), compute() + */ + inline int umfpackFactorizeReturncode() const + { + eigen_assert(m_numeric && "UmfPackLU: you must first call factorize()"); + return m_fact_errorCode; + } + + /** Provides access to the control settings array used by UmfPack. + * + * If this array contains NaN's, the default values are used. + * + * See UMFPACK documentation for details. + */ + inline const UmfpackControl& umfpackControl() const + { + return m_control; + } + + /** Provides access to the control settings array used by UmfPack. + * + * If this array contains NaN's, the default values are used. + * + * See UMFPACK documentation for details. + */ + inline UmfpackControl& umfpackControl() + { + return m_control; + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the pattern anylysis has been performed. + * + * \sa analyzePattern(), compute() + */ + template + void factorize(const InputMatrixType& matrix) + { + eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()"); + if(m_numeric) + umfpack_free_numeric(&m_numeric,Scalar()); + + grab(matrix.derived()); + + factorize_impl(); + } + + /** Prints the current UmfPack control settings. + * + * \sa umfpackControl() + */ + void umfpackReportControl() + { + umfpack_report_control(m_control.data(), Scalar()); + } + + /** Prints statistics collected by UmfPack. + * + * \sa analyzePattern(), compute() + */ + void umfpackReportInfo() + { + eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()"); + umfpack_report_info(m_control.data(), m_umfpackInfo.data(), Scalar()); + } + + /** Prints the status of the previous factorization operation performed by UmfPack (symbolic or numerical factorization). + * + * \sa analyzePattern(), compute() + */ + void umfpackReportStatus() { + eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()"); + umfpack_report_status(m_control.data(), m_fact_errorCode, Scalar()); + } + + /** \internal */ + template + bool _solve_impl(const MatrixBase &b, MatrixBase &x) const; + + Scalar determinant() const; + + void extractData() const; + + protected: + + void init() + { + m_info = InvalidInput; + m_isInitialized = false; + m_numeric = 0; + m_symbolic = 0; + m_extractedDataAreDirty = true; + + umfpack_defaults(m_control.data(), Scalar()); + } + + void analyzePattern_impl() + { + m_fact_errorCode = umfpack_symbolic(internal::convert_index(mp_matrix.rows()), + internal::convert_index(mp_matrix.cols()), + mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(), + &m_symbolic, m_control.data(), m_umfpackInfo.data()); + + m_isInitialized = true; + m_info = m_fact_errorCode ? InvalidInput : Success; + m_analysisIsOk = true; + m_factorizationIsOk = false; + m_extractedDataAreDirty = true; + } + + void factorize_impl() + { + + m_fact_errorCode = umfpack_numeric(mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(), + m_symbolic, &m_numeric, m_control.data(), m_umfpackInfo.data()); + + m_info = m_fact_errorCode == UMFPACK_OK ? Success : NumericalIssue; + m_factorizationIsOk = true; + m_extractedDataAreDirty = true; + } + + template + void grab(const EigenBase &A) + { + mp_matrix.~UmfpackMatrixRef(); + ::new (&mp_matrix) UmfpackMatrixRef(A.derived()); + } + + void grab(const UmfpackMatrixRef &A) + { + if(&(A.derived()) != &mp_matrix) + { + mp_matrix.~UmfpackMatrixRef(); + ::new (&mp_matrix) UmfpackMatrixRef(A); + } + } + + // cached data to reduce reallocation, etc. + mutable LUMatrixType m_l; + int m_fact_errorCode; + UmfpackControl m_control; + mutable UmfpackInfo m_umfpackInfo; + + mutable LUMatrixType m_u; + mutable IntColVectorType m_p; + mutable IntRowVectorType m_q; + + UmfpackMatrixType m_dummy; + UmfpackMatrixRef mp_matrix; + + void* m_numeric; + void* m_symbolic; + + mutable ComputationInfo m_info; + int m_factorizationIsOk; + int m_analysisIsOk; + mutable bool m_extractedDataAreDirty; + + private: + UmfPackLU(const UmfPackLU& ) { } +}; + + +template +void UmfPackLU::extractData() const +{ + if (m_extractedDataAreDirty) + { + // get size of the data + int lnz, unz, rows, cols, nz_udiag; + umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar()); + + // allocate data + m_l.resize(rows,(std::min)(rows,cols)); + m_l.resizeNonZeros(lnz); + + m_u.resize((std::min)(rows,cols),cols); + m_u.resizeNonZeros(unz); + + m_p.resize(rows); + m_q.resize(cols); + + // extract + umfpack_get_numeric(m_l.outerIndexPtr(), m_l.innerIndexPtr(), m_l.valuePtr(), + m_u.outerIndexPtr(), m_u.innerIndexPtr(), m_u.valuePtr(), + m_p.data(), m_q.data(), 0, 0, 0, m_numeric); + + m_extractedDataAreDirty = false; + } +} + +template +typename UmfPackLU::Scalar UmfPackLU::determinant() const +{ + Scalar det; + umfpack_get_determinant(&det, 0, m_numeric, 0); + return det; +} + +template +template +bool UmfPackLU::_solve_impl(const MatrixBase &b, MatrixBase &x) const +{ + Index rhsCols = b.cols(); + eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet"); + eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet"); + eigen_assert(b.derived().data() != x.derived().data() && " Umfpack does not support inplace solve"); + + int errorCode; + Scalar* x_ptr = 0; + Matrix x_tmp; + if(x.innerStride()!=1) + { + x_tmp.resize(x.rows()); + x_ptr = x_tmp.data(); + } + for (int j=0; j +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MISC_IMAGE_H +#define EIGEN_MISC_IMAGE_H + +namespace Eigen { + +namespace internal { + +/** \class image_retval_base + * + */ +template +struct traits > +{ + typedef typename DecompositionType::MatrixType MatrixType; + typedef Matrix< + typename MatrixType::Scalar, + MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose + // dimension is the number of rows of the original matrix + Dynamic, // we don't know at compile time the dimension of the image (the rank) + MatrixType::Options, + MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix, + MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns. + > ReturnType; +}; + +template struct image_retval_base + : public ReturnByValue > +{ + typedef _DecompositionType DecompositionType; + typedef typename DecompositionType::MatrixType MatrixType; + typedef ReturnByValue Base; + + image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix) + : m_dec(dec), m_rank(dec.rank()), + m_cols(m_rank == 0 ? 1 : m_rank), + m_originalMatrix(originalMatrix) + {} + + inline Index rows() const { return m_dec.rows(); } + inline Index cols() const { return m_cols; } + inline Index rank() const { return m_rank; } + inline const DecompositionType& dec() const { return m_dec; } + inline const MatrixType& originalMatrix() const { return m_originalMatrix; } + + template inline void evalTo(Dest& dst) const + { + static_cast*>(this)->evalTo(dst); + } + + protected: + const DecompositionType& m_dec; + Index m_rank, m_cols; + const MatrixType& m_originalMatrix; +}; + +} // end namespace internal + +#define EIGEN_MAKE_IMAGE_HELPERS(DecompositionType) \ + typedef typename DecompositionType::MatrixType MatrixType; \ + typedef typename MatrixType::Scalar Scalar; \ + typedef typename MatrixType::RealScalar RealScalar; \ + typedef Eigen::internal::image_retval_base Base; \ + using Base::dec; \ + using Base::originalMatrix; \ + using Base::rank; \ + using Base::rows; \ + using Base::cols; \ + image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \ + : Base(dec, originalMatrix) {} + +} // end namespace Eigen + +#endif // EIGEN_MISC_IMAGE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/Kernel.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/Kernel.h new file mode 100644 index 000000000..bef5d6ff5 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/Kernel.h @@ -0,0 +1,79 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MISC_KERNEL_H +#define EIGEN_MISC_KERNEL_H + +namespace Eigen { + +namespace internal { + +/** \class kernel_retval_base + * + */ +template +struct traits > +{ + typedef typename DecompositionType::MatrixType MatrixType; + typedef Matrix< + typename MatrixType::Scalar, + MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix" + // is the number of cols of the original matrix + // so that the product "matrix * kernel = zero" makes sense + Dynamic, // we don't know at compile-time the dimension of the kernel + MatrixType::Options, + MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter + MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space, + // whose dimension is the number of columns of the original matrix + > ReturnType; +}; + +template struct kernel_retval_base + : public ReturnByValue > +{ + typedef _DecompositionType DecompositionType; + typedef ReturnByValue Base; + + explicit kernel_retval_base(const DecompositionType& dec) + : m_dec(dec), + m_rank(dec.rank()), + m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank) + {} + + inline Index rows() const { return m_dec.cols(); } + inline Index cols() const { return m_cols; } + inline Index rank() const { return m_rank; } + inline const DecompositionType& dec() const { return m_dec; } + + template inline void evalTo(Dest& dst) const + { + static_cast*>(this)->evalTo(dst); + } + + protected: + const DecompositionType& m_dec; + Index m_rank, m_cols; +}; + +} // end namespace internal + +#define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \ + typedef typename DecompositionType::MatrixType MatrixType; \ + typedef typename MatrixType::Scalar Scalar; \ + typedef typename MatrixType::RealScalar RealScalar; \ + typedef Eigen::internal::kernel_retval_base Base; \ + using Base::dec; \ + using Base::rank; \ + using Base::rows; \ + using Base::cols; \ + kernel_retval(const DecompositionType& dec) : Base(dec) {} + +} // end namespace Eigen + +#endif // EIGEN_MISC_KERNEL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/RealSvd2x2.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/RealSvd2x2.h new file mode 100644 index 000000000..abb4d3c2f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/RealSvd2x2.h @@ -0,0 +1,55 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Benoit Jacob +// Copyright (C) 2013-2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REALSVD2X2_H +#define EIGEN_REALSVD2X2_H + +namespace Eigen { + +namespace internal { + +template +void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, + JacobiRotation *j_left, + JacobiRotation *j_right) +{ + using std::sqrt; + using std::abs; + Matrix m; + m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)), + numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q)); + JacobiRotation rot1; + RealScalar t = m.coeff(0,0) + m.coeff(1,1); + RealScalar d = m.coeff(1,0) - m.coeff(0,1); + + if(abs(d) < (std::numeric_limits::min)()) + { + rot1.s() = RealScalar(0); + rot1.c() = RealScalar(1); + } + else + { + // If d!=0, then t/d cannot overflow because the magnitude of the + // entries forming d are not too small compared to the ones forming t. + RealScalar u = t / d; + RealScalar tmp = sqrt(RealScalar(1) + numext::abs2(u)); + rot1.s() = RealScalar(1) / tmp; + rot1.c() = u / tmp; + } + m.applyOnTheLeft(0,1,rot1); + j_right->makeJacobi(m,0,1); + *j_left = rot1 * j_right->transpose(); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_REALSVD2X2_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/blas.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/blas.h new file mode 100644 index 000000000..25215b15e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/blas.h @@ -0,0 +1,440 @@ +#ifndef BLAS_H +#define BLAS_H + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define BLASFUNC(FUNC) FUNC##_ + +#ifdef __WIN64__ +typedef long long BLASLONG; +typedef unsigned long long BLASULONG; +#else +typedef long BLASLONG; +typedef unsigned long BLASULONG; +#endif + +int BLASFUNC(xerbla)(const char *, int *info, int); + +float BLASFUNC(sdot) (int *, float *, int *, float *, int *); +float BLASFUNC(sdsdot)(int *, float *, float *, int *, float *, int *); + +double BLASFUNC(dsdot) (int *, float *, int *, float *, int *); +double BLASFUNC(ddot) (int *, double *, int *, double *, int *); +double BLASFUNC(qdot) (int *, double *, int *, double *, int *); + +int BLASFUNC(cdotuw) (int *, float *, int *, float *, int *, float*); +int BLASFUNC(cdotcw) (int *, float *, int *, float *, int *, float*); +int BLASFUNC(zdotuw) (int *, double *, int *, double *, int *, double*); +int BLASFUNC(zdotcw) (int *, double *, int *, double *, int *, double*); + +int BLASFUNC(saxpy) (const int *, const float *, const float *, const int *, float *, const int *); +int BLASFUNC(daxpy) (const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(qaxpy) (const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(caxpy) (const int *, const float *, const float *, const int *, float *, const int *); +int BLASFUNC(zaxpy) (const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(xaxpy) (const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(caxpyc)(const int *, const float *, const float *, const int *, float *, const int *); +int BLASFUNC(zaxpyc)(const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(xaxpyc)(const int *, const double *, const double *, const int *, double *, const int *); + +int BLASFUNC(scopy) (int *, float *, int *, float *, int *); +int BLASFUNC(dcopy) (int *, double *, int *, double *, int *); +int BLASFUNC(qcopy) (int *, double *, int *, double *, int *); +int BLASFUNC(ccopy) (int *, float *, int *, float *, int *); +int BLASFUNC(zcopy) (int *, double *, int *, double *, int *); +int BLASFUNC(xcopy) (int *, double *, int *, double *, int *); + +int BLASFUNC(sswap) (int *, float *, int *, float *, int *); +int BLASFUNC(dswap) (int *, double *, int *, double *, int *); +int BLASFUNC(qswap) (int *, double *, int *, double *, int *); +int BLASFUNC(cswap) (int *, float *, int *, float *, int *); +int BLASFUNC(zswap) (int *, double *, int *, double *, int *); +int BLASFUNC(xswap) (int *, double *, int *, double *, int *); + +float BLASFUNC(sasum) (int *, float *, int *); +float BLASFUNC(scasum)(int *, float *, int *); +double BLASFUNC(dasum) (int *, double *, int *); +double BLASFUNC(qasum) (int *, double *, int *); +double BLASFUNC(dzasum)(int *, double *, int *); +double BLASFUNC(qxasum)(int *, double *, int *); + +int BLASFUNC(isamax)(int *, float *, int *); +int BLASFUNC(idamax)(int *, double *, int *); +int BLASFUNC(iqamax)(int *, double *, int *); +int BLASFUNC(icamax)(int *, float *, int *); +int BLASFUNC(izamax)(int *, double *, int *); +int BLASFUNC(ixamax)(int *, double *, int *); + +int BLASFUNC(ismax) (int *, float *, int *); +int BLASFUNC(idmax) (int *, double *, int *); +int BLASFUNC(iqmax) (int *, double *, int *); +int BLASFUNC(icmax) (int *, float *, int *); +int BLASFUNC(izmax) (int *, double *, int *); +int BLASFUNC(ixmax) (int *, double *, int *); + +int BLASFUNC(isamin)(int *, float *, int *); +int BLASFUNC(idamin)(int *, double *, int *); +int BLASFUNC(iqamin)(int *, double *, int *); +int BLASFUNC(icamin)(int *, float *, int *); +int BLASFUNC(izamin)(int *, double *, int *); +int BLASFUNC(ixamin)(int *, double *, int *); + +int BLASFUNC(ismin)(int *, float *, int *); +int BLASFUNC(idmin)(int *, double *, int *); +int BLASFUNC(iqmin)(int *, double *, int *); +int BLASFUNC(icmin)(int *, float *, int *); +int BLASFUNC(izmin)(int *, double *, int *); +int BLASFUNC(ixmin)(int *, double *, int *); + +float BLASFUNC(samax) (int *, float *, int *); +double BLASFUNC(damax) (int *, double *, int *); +double BLASFUNC(qamax) (int *, double *, int *); +float BLASFUNC(scamax)(int *, float *, int *); +double BLASFUNC(dzamax)(int *, double *, int *); +double BLASFUNC(qxamax)(int *, double *, int *); + +float BLASFUNC(samin) (int *, float *, int *); +double BLASFUNC(damin) (int *, double *, int *); +double BLASFUNC(qamin) (int *, double *, int *); +float BLASFUNC(scamin)(int *, float *, int *); +double BLASFUNC(dzamin)(int *, double *, int *); +double BLASFUNC(qxamin)(int *, double *, int *); + +float BLASFUNC(smax) (int *, float *, int *); +double BLASFUNC(dmax) (int *, double *, int *); +double BLASFUNC(qmax) (int *, double *, int *); +float BLASFUNC(scmax) (int *, float *, int *); +double BLASFUNC(dzmax) (int *, double *, int *); +double BLASFUNC(qxmax) (int *, double *, int *); + +float BLASFUNC(smin) (int *, float *, int *); +double BLASFUNC(dmin) (int *, double *, int *); +double BLASFUNC(qmin) (int *, double *, int *); +float BLASFUNC(scmin) (int *, float *, int *); +double BLASFUNC(dzmin) (int *, double *, int *); +double BLASFUNC(qxmin) (int *, double *, int *); + +int BLASFUNC(sscal) (int *, float *, float *, int *); +int BLASFUNC(dscal) (int *, double *, double *, int *); +int BLASFUNC(qscal) (int *, double *, double *, int *); +int BLASFUNC(cscal) (int *, float *, float *, int *); +int BLASFUNC(zscal) (int *, double *, double *, int *); +int BLASFUNC(xscal) (int *, double *, double *, int *); +int BLASFUNC(csscal)(int *, float *, float *, int *); +int BLASFUNC(zdscal)(int *, double *, double *, int *); +int BLASFUNC(xqscal)(int *, double *, double *, int *); + +float BLASFUNC(snrm2) (int *, float *, int *); +float BLASFUNC(scnrm2)(int *, float *, int *); + +double BLASFUNC(dnrm2) (int *, double *, int *); +double BLASFUNC(qnrm2) (int *, double *, int *); +double BLASFUNC(dznrm2)(int *, double *, int *); +double BLASFUNC(qxnrm2)(int *, double *, int *); + +int BLASFUNC(srot) (int *, float *, int *, float *, int *, float *, float *); +int BLASFUNC(drot) (int *, double *, int *, double *, int *, double *, double *); +int BLASFUNC(qrot) (int *, double *, int *, double *, int *, double *, double *); +int BLASFUNC(csrot) (int *, float *, int *, float *, int *, float *, float *); +int BLASFUNC(zdrot) (int *, double *, int *, double *, int *, double *, double *); +int BLASFUNC(xqrot) (int *, double *, int *, double *, int *, double *, double *); + +int BLASFUNC(srotg) (float *, float *, float *, float *); +int BLASFUNC(drotg) (double *, double *, double *, double *); +int BLASFUNC(qrotg) (double *, double *, double *, double *); +int BLASFUNC(crotg) (float *, float *, float *, float *); +int BLASFUNC(zrotg) (double *, double *, double *, double *); +int BLASFUNC(xrotg) (double *, double *, double *, double *); + +int BLASFUNC(srotmg)(float *, float *, float *, float *, float *); +int BLASFUNC(drotmg)(double *, double *, double *, double *, double *); + +int BLASFUNC(srotm) (int *, float *, int *, float *, int *, float *); +int BLASFUNC(drotm) (int *, double *, int *, double *, int *, double *); +int BLASFUNC(qrotm) (int *, double *, int *, double *, int *, double *); + +/* Level 2 routines */ + +int BLASFUNC(sger)(int *, int *, float *, float *, int *, + float *, int *, float *, int *); +int BLASFUNC(dger)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); +int BLASFUNC(qger)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); +int BLASFUNC(cgeru)(int *, int *, float *, float *, int *, + float *, int *, float *, int *); +int BLASFUNC(cgerc)(int *, int *, float *, float *, int *, + float *, int *, float *, int *); +int BLASFUNC(zgeru)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); +int BLASFUNC(zgerc)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); +int BLASFUNC(xgeru)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); +int BLASFUNC(xgerc)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); + +int BLASFUNC(sgemv)(const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(dgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(qgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(cgemv)(const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(xgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); + +int BLASFUNC(strsv) (const char *, const char *, const char *, const int *, const float *, const int *, float *, const int *); +int BLASFUNC(dtrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *); +int BLASFUNC(qtrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *); +int BLASFUNC(ctrsv) (const char *, const char *, const char *, const int *, const float *, const int *, float *, const int *); +int BLASFUNC(ztrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *); +int BLASFUNC(xtrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *); + +int BLASFUNC(stpsv) (char *, char *, char *, int *, float *, float *, int *); +int BLASFUNC(dtpsv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(qtpsv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(ctpsv) (char *, char *, char *, int *, float *, float *, int *); +int BLASFUNC(ztpsv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(xtpsv) (char *, char *, char *, int *, double *, double *, int *); + +int BLASFUNC(strmv) (const char *, const char *, const char *, const int *, const float *, const int *, float *, const int *); +int BLASFUNC(dtrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *); +int BLASFUNC(qtrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *); +int BLASFUNC(ctrmv) (const char *, const char *, const char *, const int *, const float *, const int *, float *, const int *); +int BLASFUNC(ztrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *); +int BLASFUNC(xtrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *); + +int BLASFUNC(stpmv) (char *, char *, char *, int *, float *, float *, int *); +int BLASFUNC(dtpmv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(qtpmv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(ctpmv) (char *, char *, char *, int *, float *, float *, int *); +int BLASFUNC(ztpmv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(xtpmv) (char *, char *, char *, int *, double *, double *, int *); + +int BLASFUNC(stbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); +int BLASFUNC(dtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(qtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(ctbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); +int BLASFUNC(ztbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(xtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); + +int BLASFUNC(stbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); +int BLASFUNC(dtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(qtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(ctbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); +int BLASFUNC(ztbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(xtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); + +int BLASFUNC(ssymv) (const char *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(dsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(qsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); + +int BLASFUNC(sspmv) (char *, int *, float *, float *, + float *, int *, float *, float *, int *); +int BLASFUNC(dspmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); +int BLASFUNC(qspmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); + +int BLASFUNC(ssyr) (const char *, const int *, const float *, const float *, const int *, float *, const int *); +int BLASFUNC(dsyr) (const char *, const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(qsyr) (const char *, const int *, const double *, const double *, const int *, double *, const int *); + +int BLASFUNC(ssyr2) (const char *, const int *, const float *, const float *, const int *, const float *, const int *, float *, const int *); +int BLASFUNC(dsyr2) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, double *, const int *); +int BLASFUNC(qsyr2) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, double *, const int *); +int BLASFUNC(csyr2) (const char *, const int *, const float *, const float *, const int *, const float *, const int *, float *, const int *); +int BLASFUNC(zsyr2) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, double *, const int *); +int BLASFUNC(xsyr2) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, double *, const int *); + +int BLASFUNC(sspr) (char *, int *, float *, float *, int *, + float *); +int BLASFUNC(dspr) (char *, int *, double *, double *, int *, + double *); +int BLASFUNC(qspr) (char *, int *, double *, double *, int *, + double *); + +int BLASFUNC(sspr2) (char *, int *, float *, + float *, int *, float *, int *, float *); +int BLASFUNC(dspr2) (char *, int *, double *, + double *, int *, double *, int *, double *); +int BLASFUNC(qspr2) (char *, int *, double *, + double *, int *, double *, int *, double *); +int BLASFUNC(cspr2) (char *, int *, float *, + float *, int *, float *, int *, float *); +int BLASFUNC(zspr2) (char *, int *, double *, + double *, int *, double *, int *, double *); +int BLASFUNC(xspr2) (char *, int *, double *, + double *, int *, double *, int *, double *); + +int BLASFUNC(cher) (char *, int *, float *, float *, int *, + float *, int *); +int BLASFUNC(zher) (char *, int *, double *, double *, int *, + double *, int *); +int BLASFUNC(xher) (char *, int *, double *, double *, int *, + double *, int *); + +int BLASFUNC(chpr) (char *, int *, float *, float *, int *, float *); +int BLASFUNC(zhpr) (char *, int *, double *, double *, int *, double *); +int BLASFUNC(xhpr) (char *, int *, double *, double *, int *, double *); + +int BLASFUNC(cher2) (char *, int *, float *, + float *, int *, float *, int *, float *, int *); +int BLASFUNC(zher2) (char *, int *, double *, + double *, int *, double *, int *, double *, int *); +int BLASFUNC(xher2) (char *, int *, double *, + double *, int *, double *, int *, double *, int *); + +int BLASFUNC(chpr2) (char *, int *, float *, + float *, int *, float *, int *, float *); +int BLASFUNC(zhpr2) (char *, int *, double *, + double *, int *, double *, int *, double *); +int BLASFUNC(xhpr2) (char *, int *, double *, + double *, int *, double *, int *, double *); + +int BLASFUNC(chemv) (const char *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zhemv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(xhemv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); + +int BLASFUNC(chpmv) (char *, int *, float *, float *, + float *, int *, float *, float *, int *); +int BLASFUNC(zhpmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); +int BLASFUNC(xhpmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); + +int BLASFUNC(snorm)(char *, int *, int *, float *, int *); +int BLASFUNC(dnorm)(char *, int *, int *, double *, int *); +int BLASFUNC(cnorm)(char *, int *, int *, float *, int *); +int BLASFUNC(znorm)(char *, int *, int *, double *, int *); + +int BLASFUNC(sgbmv)(char *, int *, int *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(dgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(qgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(cgbmv)(char *, int *, int *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(ssbmv)(char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(dsbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(qsbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(csbmv)(char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zsbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xsbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(chbmv)(char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zhbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xhbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +/* Level 3 routines */ + +int BLASFUNC(sgemm)(const char *, const char *, const int *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(dgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(qgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(cgemm)(const char *, const char *, const int *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(xgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); + +int BLASFUNC(cgemm3m)(char *, char *, int *, int *, int *, float *, + float *, int *, float *, int *, float *, float *, int *); +int BLASFUNC(zgemm3m)(char *, char *, int *, int *, int *, double *, + double *, int *, double *, int *, double *, double *, int *); +int BLASFUNC(xgemm3m)(char *, char *, int *, int *, int *, double *, + double *, int *, double *, int *, double *, double *, int *); + +int BLASFUNC(sge2mm)(char *, char *, char *, int *, int *, + float *, float *, int *, float *, int *, + float *, float *, int *); +int BLASFUNC(dge2mm)(char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *, + double *, double *, int *); +int BLASFUNC(cge2mm)(char *, char *, char *, int *, int *, + float *, float *, int *, float *, int *, + float *, float *, int *); +int BLASFUNC(zge2mm)(char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *, + double *, double *, int *); + +int BLASFUNC(strsm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *, const float *, const int *, float *, const int *); +int BLASFUNC(dtrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(qtrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(ctrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *, const float *, const int *, float *, const int *); +int BLASFUNC(ztrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(xtrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *); + +int BLASFUNC(strmm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *, const float *, const int *, float *, const int *); +int BLASFUNC(dtrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(qtrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(ctrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *, const float *, const int *, float *, const int *); +int BLASFUNC(ztrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *); +int BLASFUNC(xtrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *); + +int BLASFUNC(ssymm)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(dsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(qsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(csymm)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(xsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); + +int BLASFUNC(csymm3m)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); +int BLASFUNC(zsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); +int BLASFUNC(xsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); + +int BLASFUNC(ssyrk)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(dsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(qsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(csyrk)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(xsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *); + +int BLASFUNC(ssyr2k)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(dsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *); +int BLASFUNC(qsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *); +int BLASFUNC(csyr2k)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *); +int BLASFUNC(xsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *); + +int BLASFUNC(chemm)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zhemm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(xhemm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); + +int BLASFUNC(chemm3m)(char *, char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zhemm3m)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xhemm3m)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(cherk)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zherk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(xherk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *); + +int BLASFUNC(cher2k)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zher2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(xher2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(cher2m)(const char *, const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zher2m)(const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *); +int BLASFUNC(xher2m)(const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/lapack.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/lapack.h new file mode 100644 index 000000000..249f3575c --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/lapack.h @@ -0,0 +1,152 @@ +#ifndef LAPACK_H +#define LAPACK_H + +#include "blas.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +int BLASFUNC(csymv) (const char *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *); +int BLASFUNC(zsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); +int BLASFUNC(xsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *); + + +int BLASFUNC(cspmv) (char *, int *, float *, float *, + float *, int *, float *, float *, int *); +int BLASFUNC(zspmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); +int BLASFUNC(xspmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); + +int BLASFUNC(csyr) (char *, int *, float *, float *, int *, + float *, int *); +int BLASFUNC(zsyr) (char *, int *, double *, double *, int *, + double *, int *); +int BLASFUNC(xsyr) (char *, int *, double *, double *, int *, + double *, int *); + +int BLASFUNC(cspr) (char *, int *, float *, float *, int *, + float *); +int BLASFUNC(zspr) (char *, int *, double *, double *, int *, + double *); +int BLASFUNC(xspr) (char *, int *, double *, double *, int *, + double *); + +int BLASFUNC(sgemt)(char *, int *, int *, float *, float *, int *, + float *, int *); +int BLASFUNC(dgemt)(char *, int *, int *, double *, double *, int *, + double *, int *); +int BLASFUNC(cgemt)(char *, int *, int *, float *, float *, int *, + float *, int *); +int BLASFUNC(zgemt)(char *, int *, int *, double *, double *, int *, + double *, int *); + +int BLASFUNC(sgema)(char *, char *, int *, int *, float *, + float *, int *, float *, float *, int *, float *, int *); +int BLASFUNC(dgema)(char *, char *, int *, int *, double *, + double *, int *, double*, double *, int *, double*, int *); +int BLASFUNC(cgema)(char *, char *, int *, int *, float *, + float *, int *, float *, float *, int *, float *, int *); +int BLASFUNC(zgema)(char *, char *, int *, int *, double *, + double *, int *, double*, double *, int *, double*, int *); + +int BLASFUNC(sgems)(char *, char *, int *, int *, float *, + float *, int *, float *, float *, int *, float *, int *); +int BLASFUNC(dgems)(char *, char *, int *, int *, double *, + double *, int *, double*, double *, int *, double*, int *); +int BLASFUNC(cgems)(char *, char *, int *, int *, float *, + float *, int *, float *, float *, int *, float *, int *); +int BLASFUNC(zgems)(char *, char *, int *, int *, double *, + double *, int *, double*, double *, int *, double*, int *); + +int BLASFUNC(sgetf2)(int *, int *, float *, int *, int *, int *); +int BLASFUNC(dgetf2)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(qgetf2)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(cgetf2)(int *, int *, float *, int *, int *, int *); +int BLASFUNC(zgetf2)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(xgetf2)(int *, int *, double *, int *, int *, int *); + +int BLASFUNC(sgetrf)(int *, int *, float *, int *, int *, int *); +int BLASFUNC(dgetrf)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(qgetrf)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(cgetrf)(int *, int *, float *, int *, int *, int *); +int BLASFUNC(zgetrf)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(xgetrf)(int *, int *, double *, int *, int *, int *); + +int BLASFUNC(slaswp)(int *, float *, int *, int *, int *, int *, int *); +int BLASFUNC(dlaswp)(int *, double *, int *, int *, int *, int *, int *); +int BLASFUNC(qlaswp)(int *, double *, int *, int *, int *, int *, int *); +int BLASFUNC(claswp)(int *, float *, int *, int *, int *, int *, int *); +int BLASFUNC(zlaswp)(int *, double *, int *, int *, int *, int *, int *); +int BLASFUNC(xlaswp)(int *, double *, int *, int *, int *, int *, int *); + +int BLASFUNC(sgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *); +int BLASFUNC(dgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); +int BLASFUNC(qgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); +int BLASFUNC(cgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *); +int BLASFUNC(zgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); +int BLASFUNC(xgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); + +int BLASFUNC(sgesv)(int *, int *, float *, int *, int *, float *, int *, int *); +int BLASFUNC(dgesv)(int *, int *, double *, int *, int *, double*, int *, int *); +int BLASFUNC(qgesv)(int *, int *, double *, int *, int *, double*, int *, int *); +int BLASFUNC(cgesv)(int *, int *, float *, int *, int *, float *, int *, int *); +int BLASFUNC(zgesv)(int *, int *, double *, int *, int *, double*, int *, int *); +int BLASFUNC(xgesv)(int *, int *, double *, int *, int *, double*, int *, int *); + +int BLASFUNC(spotf2)(char *, int *, float *, int *, int *); +int BLASFUNC(dpotf2)(char *, int *, double *, int *, int *); +int BLASFUNC(qpotf2)(char *, int *, double *, int *, int *); +int BLASFUNC(cpotf2)(char *, int *, float *, int *, int *); +int BLASFUNC(zpotf2)(char *, int *, double *, int *, int *); +int BLASFUNC(xpotf2)(char *, int *, double *, int *, int *); + +int BLASFUNC(spotrf)(char *, int *, float *, int *, int *); +int BLASFUNC(dpotrf)(char *, int *, double *, int *, int *); +int BLASFUNC(qpotrf)(char *, int *, double *, int *, int *); +int BLASFUNC(cpotrf)(char *, int *, float *, int *, int *); +int BLASFUNC(zpotrf)(char *, int *, double *, int *, int *); +int BLASFUNC(xpotrf)(char *, int *, double *, int *, int *); + +int BLASFUNC(slauu2)(char *, int *, float *, int *, int *); +int BLASFUNC(dlauu2)(char *, int *, double *, int *, int *); +int BLASFUNC(qlauu2)(char *, int *, double *, int *, int *); +int BLASFUNC(clauu2)(char *, int *, float *, int *, int *); +int BLASFUNC(zlauu2)(char *, int *, double *, int *, int *); +int BLASFUNC(xlauu2)(char *, int *, double *, int *, int *); + +int BLASFUNC(slauum)(char *, int *, float *, int *, int *); +int BLASFUNC(dlauum)(char *, int *, double *, int *, int *); +int BLASFUNC(qlauum)(char *, int *, double *, int *, int *); +int BLASFUNC(clauum)(char *, int *, float *, int *, int *); +int BLASFUNC(zlauum)(char *, int *, double *, int *, int *); +int BLASFUNC(xlauum)(char *, int *, double *, int *, int *); + +int BLASFUNC(strti2)(char *, char *, int *, float *, int *, int *); +int BLASFUNC(dtrti2)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(qtrti2)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(ctrti2)(char *, char *, int *, float *, int *, int *); +int BLASFUNC(ztrti2)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(xtrti2)(char *, char *, int *, double *, int *, int *); + +int BLASFUNC(strtri)(char *, char *, int *, float *, int *, int *); +int BLASFUNC(dtrtri)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(qtrtri)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(ctrtri)(char *, char *, int *, float *, int *, int *); +int BLASFUNC(ztrtri)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(xtrtri)(char *, char *, int *, double *, int *, int *); + +int BLASFUNC(spotri)(char *, int *, float *, int *, int *); +int BLASFUNC(dpotri)(char *, int *, double *, int *, int *); +int BLASFUNC(qpotri)(char *, int *, double *, int *, int *); +int BLASFUNC(cpotri)(char *, int *, float *, int *, int *); +int BLASFUNC(zpotri)(char *, int *, double *, int *, int *); +int BLASFUNC(xpotri)(char *, int *, double *, int *, int *); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/lapacke.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/lapacke.h new file mode 100644 index 000000000..8c7e79b03 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/lapacke.h @@ -0,0 +1,16291 @@ +/***************************************************************************** + Copyright (c) 2010, Intel Corp. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. +****************************************************************************** +* Contents: Native C interface to LAPACK +* Author: Intel Corporation +* Generated November, 2011 +*****************************************************************************/ + +#ifndef _MKL_LAPACKE_H_ + +#ifndef _LAPACKE_H_ +#define _LAPACKE_H_ + +/* +* Turn on HAVE_LAPACK_CONFIG_H to redefine C-LAPACK datatypes +*/ +#ifdef HAVE_LAPACK_CONFIG_H +#include "lapacke_config.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include + +#ifndef lapack_int +#define lapack_int int +#endif + +#ifndef lapack_logical +#define lapack_logical lapack_int +#endif + +/* Complex types are structures equivalent to the +* Fortran complex types COMPLEX(4) and COMPLEX(8). +* +* One can also redefine the types with his own types +* for example by including in the code definitions like +* +* #define lapack_complex_float std::complex +* #define lapack_complex_double std::complex +* +* or define these types in the command line: +* +* -Dlapack_complex_float="std::complex" +* -Dlapack_complex_double="std::complex" +*/ + +#ifndef LAPACK_COMPLEX_CUSTOM + +/* Complex type (single precision) */ +#ifndef lapack_complex_float +#include +#define lapack_complex_float float _Complex +#endif + +#ifndef lapack_complex_float_real +#define lapack_complex_float_real(z) (creal(z)) +#endif + +#ifndef lapack_complex_float_imag +#define lapack_complex_float_imag(z) (cimag(z)) +#endif + +lapack_complex_float lapack_make_complex_float( float re, float im ); + +/* Complex type (double precision) */ +#ifndef lapack_complex_double +#include +#define lapack_complex_double double _Complex +#endif + +#ifndef lapack_complex_double_real +#define lapack_complex_double_real(z) (creal(z)) +#endif + +#ifndef lapack_complex_double_imag +#define lapack_complex_double_imag(z) (cimag(z)) +#endif + +lapack_complex_double lapack_make_complex_double( double re, double im ); + +#endif + +#ifndef LAPACKE_malloc +#define LAPACKE_malloc( size ) malloc( size ) +#endif +#ifndef LAPACKE_free +#define LAPACKE_free( p ) free( p ) +#endif + +#define LAPACK_C2INT( x ) (lapack_int)(*((float*)&x )) +#define LAPACK_Z2INT( x ) (lapack_int)(*((double*)&x )) + +#define LAPACK_ROW_MAJOR 101 +#define LAPACK_COL_MAJOR 102 + +#define LAPACK_WORK_MEMORY_ERROR -1010 +#define LAPACK_TRANSPOSE_MEMORY_ERROR -1011 + +/* Callback logical functions of one, two, or three arguments are used +* to select eigenvalues to sort to the top left of the Schur form. +* The value is selected if function returns TRUE (non-zero). */ + +typedef lapack_logical (*LAPACK_S_SELECT2) ( const float*, const float* ); +typedef lapack_logical (*LAPACK_S_SELECT3) + ( const float*, const float*, const float* ); +typedef lapack_logical (*LAPACK_D_SELECT2) ( const double*, const double* ); +typedef lapack_logical (*LAPACK_D_SELECT3) + ( const double*, const double*, const double* ); + +typedef lapack_logical (*LAPACK_C_SELECT1) ( const lapack_complex_float* ); +typedef lapack_logical (*LAPACK_C_SELECT2) + ( const lapack_complex_float*, const lapack_complex_float* ); +typedef lapack_logical (*LAPACK_Z_SELECT1) ( const lapack_complex_double* ); +typedef lapack_logical (*LAPACK_Z_SELECT2) + ( const lapack_complex_double*, const lapack_complex_double* ); + +#include "lapacke_mangling.h" + +#define LAPACK_lsame LAPACK_GLOBAL(lsame,LSAME) +lapack_logical LAPACK_lsame( char* ca, char* cb, + lapack_int lca, lapack_int lcb ); + +/* C-LAPACK function prototypes */ + +lapack_int LAPACKE_sbdsdc( int matrix_order, char uplo, char compq, + lapack_int n, float* d, float* e, float* u, + lapack_int ldu, float* vt, lapack_int ldvt, float* q, + lapack_int* iq ); +lapack_int LAPACKE_dbdsdc( int matrix_order, char uplo, char compq, + lapack_int n, double* d, double* e, double* u, + lapack_int ldu, double* vt, lapack_int ldvt, + double* q, lapack_int* iq ); + +lapack_int LAPACKE_sbdsqr( int matrix_order, char uplo, lapack_int n, + lapack_int ncvt, lapack_int nru, lapack_int ncc, + float* d, float* e, float* vt, lapack_int ldvt, + float* u, lapack_int ldu, float* c, lapack_int ldc ); +lapack_int LAPACKE_dbdsqr( int matrix_order, char uplo, lapack_int n, + lapack_int ncvt, lapack_int nru, lapack_int ncc, + double* d, double* e, double* vt, lapack_int ldvt, + double* u, lapack_int ldu, double* c, + lapack_int ldc ); +lapack_int LAPACKE_cbdsqr( int matrix_order, char uplo, lapack_int n, + lapack_int ncvt, lapack_int nru, lapack_int ncc, + float* d, float* e, lapack_complex_float* vt, + lapack_int ldvt, lapack_complex_float* u, + lapack_int ldu, lapack_complex_float* c, + lapack_int ldc ); +lapack_int LAPACKE_zbdsqr( int matrix_order, char uplo, lapack_int n, + lapack_int ncvt, lapack_int nru, lapack_int ncc, + double* d, double* e, lapack_complex_double* vt, + lapack_int ldvt, lapack_complex_double* u, + lapack_int ldu, lapack_complex_double* c, + lapack_int ldc ); + +lapack_int LAPACKE_sdisna( char job, lapack_int m, lapack_int n, const float* d, + float* sep ); +lapack_int LAPACKE_ddisna( char job, lapack_int m, lapack_int n, + const double* d, double* sep ); + +lapack_int LAPACKE_sgbbrd( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int ncc, lapack_int kl, + lapack_int ku, float* ab, lapack_int ldab, float* d, + float* e, float* q, lapack_int ldq, float* pt, + lapack_int ldpt, float* c, lapack_int ldc ); +lapack_int LAPACKE_dgbbrd( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int ncc, lapack_int kl, + lapack_int ku, double* ab, lapack_int ldab, + double* d, double* e, double* q, lapack_int ldq, + double* pt, lapack_int ldpt, double* c, + lapack_int ldc ); +lapack_int LAPACKE_cgbbrd( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int ncc, lapack_int kl, + lapack_int ku, lapack_complex_float* ab, + lapack_int ldab, float* d, float* e, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* pt, lapack_int ldpt, + lapack_complex_float* c, lapack_int ldc ); +lapack_int LAPACKE_zgbbrd( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int ncc, lapack_int kl, + lapack_int ku, lapack_complex_double* ab, + lapack_int ldab, double* d, double* e, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* pt, lapack_int ldpt, + lapack_complex_double* c, lapack_int ldc ); + +lapack_int LAPACKE_sgbcon( int matrix_order, char norm, lapack_int n, + lapack_int kl, lapack_int ku, const float* ab, + lapack_int ldab, const lapack_int* ipiv, float anorm, + float* rcond ); +lapack_int LAPACKE_dgbcon( int matrix_order, char norm, lapack_int n, + lapack_int kl, lapack_int ku, const double* ab, + lapack_int ldab, const lapack_int* ipiv, + double anorm, double* rcond ); +lapack_int LAPACKE_cgbcon( int matrix_order, char norm, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_float* ab, lapack_int ldab, + const lapack_int* ipiv, float anorm, float* rcond ); +lapack_int LAPACKE_zgbcon( int matrix_order, char norm, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_double* ab, lapack_int ldab, + const lapack_int* ipiv, double anorm, + double* rcond ); + +lapack_int LAPACKE_sgbequ( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const float* ab, + lapack_int ldab, float* r, float* c, float* rowcnd, + float* colcnd, float* amax ); +lapack_int LAPACKE_dgbequ( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const double* ab, + lapack_int ldab, double* r, double* c, + double* rowcnd, double* colcnd, double* amax ); +lapack_int LAPACKE_cgbequ( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_float* ab, lapack_int ldab, + float* r, float* c, float* rowcnd, float* colcnd, + float* amax ); +lapack_int LAPACKE_zgbequ( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_double* ab, lapack_int ldab, + double* r, double* c, double* rowcnd, double* colcnd, + double* amax ); + +lapack_int LAPACKE_sgbequb( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const float* ab, + lapack_int ldab, float* r, float* c, float* rowcnd, + float* colcnd, float* amax ); +lapack_int LAPACKE_dgbequb( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const double* ab, + lapack_int ldab, double* r, double* c, + double* rowcnd, double* colcnd, double* amax ); +lapack_int LAPACKE_cgbequb( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_float* ab, lapack_int ldab, + float* r, float* c, float* rowcnd, float* colcnd, + float* amax ); +lapack_int LAPACKE_zgbequb( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_double* ab, lapack_int ldab, + double* r, double* c, double* rowcnd, + double* colcnd, double* amax ); + +lapack_int LAPACKE_sgbrfs( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const float* ab, lapack_int ldab, const float* afb, + lapack_int ldafb, const lapack_int* ipiv, + const float* b, lapack_int ldb, float* x, + lapack_int ldx, float* ferr, float* berr ); +lapack_int LAPACKE_dgbrfs( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const double* ab, lapack_int ldab, const double* afb, + lapack_int ldafb, const lapack_int* ipiv, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* ferr, double* berr ); +lapack_int LAPACKE_cgbrfs( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const lapack_complex_float* ab, lapack_int ldab, + const lapack_complex_float* afb, lapack_int ldafb, + const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_zgbrfs( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const lapack_complex_double* ab, lapack_int ldab, + const lapack_complex_double* afb, lapack_int ldafb, + const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_sgbrfsx( int matrix_order, char trans, char equed, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, const float* ab, lapack_int ldab, + const float* afb, lapack_int ldafb, + const lapack_int* ipiv, const float* r, + const float* c, const float* b, lapack_int ldb, + float* x, lapack_int ldx, float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_dgbrfsx( int matrix_order, char trans, char equed, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, const double* ab, lapack_int ldab, + const double* afb, lapack_int ldafb, + const lapack_int* ipiv, const double* r, + const double* c, const double* b, lapack_int ldb, + double* x, lapack_int ldx, double* rcond, + double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params ); +lapack_int LAPACKE_cgbrfsx( int matrix_order, char trans, char equed, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, const lapack_complex_float* ab, + lapack_int ldab, const lapack_complex_float* afb, + lapack_int ldafb, const lapack_int* ipiv, + const float* r, const float* c, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params ); +lapack_int LAPACKE_zgbrfsx( int matrix_order, char trans, char equed, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, const lapack_complex_double* ab, + lapack_int ldab, const lapack_complex_double* afb, + lapack_int ldafb, const lapack_int* ipiv, + const double* r, const double* c, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params ); + +lapack_int LAPACKE_sgbsv( int matrix_order, lapack_int n, lapack_int kl, + lapack_int ku, lapack_int nrhs, float* ab, + lapack_int ldab, lapack_int* ipiv, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dgbsv( int matrix_order, lapack_int n, lapack_int kl, + lapack_int ku, lapack_int nrhs, double* ab, + lapack_int ldab, lapack_int* ipiv, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cgbsv( int matrix_order, lapack_int n, lapack_int kl, + lapack_int ku, lapack_int nrhs, + lapack_complex_float* ab, lapack_int ldab, + lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zgbsv( int matrix_order, lapack_int n, lapack_int kl, + lapack_int ku, lapack_int nrhs, + lapack_complex_double* ab, lapack_int ldab, + lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_sgbsvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, float* ab, lapack_int ldab, + float* afb, lapack_int ldafb, lapack_int* ipiv, + char* equed, float* r, float* c, float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + float* rpivot ); +lapack_int LAPACKE_dgbsvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, double* ab, lapack_int ldab, + double* afb, lapack_int ldafb, lapack_int* ipiv, + char* equed, double* r, double* c, double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + double* rpivot ); +lapack_int LAPACKE_cgbsvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, lapack_complex_float* ab, + lapack_int ldab, lapack_complex_float* afb, + lapack_int ldafb, lapack_int* ipiv, char* equed, + float* r, float* c, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr, float* rpivot ); +lapack_int LAPACKE_zgbsvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, lapack_complex_double* ab, + lapack_int ldab, lapack_complex_double* afb, + lapack_int ldafb, lapack_int* ipiv, char* equed, + double* r, double* c, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* rcond, double* ferr, + double* berr, double* rpivot ); + +lapack_int LAPACKE_sgbsvxx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, float* ab, lapack_int ldab, + float* afb, lapack_int ldafb, lapack_int* ipiv, + char* equed, float* r, float* c, float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_dgbsvxx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, double* ab, lapack_int ldab, + double* afb, lapack_int ldafb, lapack_int* ipiv, + char* equed, double* r, double* c, double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params ); +lapack_int LAPACKE_cgbsvxx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, lapack_complex_float* ab, + lapack_int ldab, lapack_complex_float* afb, + lapack_int ldafb, lapack_int* ipiv, char* equed, + float* r, float* c, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* rpvgrw, + float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params ); +lapack_int LAPACKE_zgbsvxx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, lapack_complex_double* ab, + lapack_int ldab, lapack_complex_double* afb, + lapack_int ldafb, lapack_int* ipiv, char* equed, + double* r, double* c, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* rcond, double* rpvgrw, + double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params ); + +lapack_int LAPACKE_sgbtrf( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, float* ab, + lapack_int ldab, lapack_int* ipiv ); +lapack_int LAPACKE_dgbtrf( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, double* ab, + lapack_int ldab, lapack_int* ipiv ); +lapack_int LAPACKE_cgbtrf( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + lapack_complex_float* ab, lapack_int ldab, + lapack_int* ipiv ); +lapack_int LAPACKE_zgbtrf( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + lapack_complex_double* ab, lapack_int ldab, + lapack_int* ipiv ); + +lapack_int LAPACKE_sgbtrs( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const float* ab, lapack_int ldab, + const lapack_int* ipiv, float* b, lapack_int ldb ); +lapack_int LAPACKE_dgbtrs( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const double* ab, lapack_int ldab, + const lapack_int* ipiv, double* b, lapack_int ldb ); +lapack_int LAPACKE_cgbtrs( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const lapack_complex_float* ab, lapack_int ldab, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zgbtrs( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const lapack_complex_double* ab, lapack_int ldab, + const lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_sgebak( int matrix_order, char job, char side, lapack_int n, + lapack_int ilo, lapack_int ihi, const float* scale, + lapack_int m, float* v, lapack_int ldv ); +lapack_int LAPACKE_dgebak( int matrix_order, char job, char side, lapack_int n, + lapack_int ilo, lapack_int ihi, const double* scale, + lapack_int m, double* v, lapack_int ldv ); +lapack_int LAPACKE_cgebak( int matrix_order, char job, char side, lapack_int n, + lapack_int ilo, lapack_int ihi, const float* scale, + lapack_int m, lapack_complex_float* v, + lapack_int ldv ); +lapack_int LAPACKE_zgebak( int matrix_order, char job, char side, lapack_int n, + lapack_int ilo, lapack_int ihi, const double* scale, + lapack_int m, lapack_complex_double* v, + lapack_int ldv ); + +lapack_int LAPACKE_sgebal( int matrix_order, char job, lapack_int n, float* a, + lapack_int lda, lapack_int* ilo, lapack_int* ihi, + float* scale ); +lapack_int LAPACKE_dgebal( int matrix_order, char job, lapack_int n, double* a, + lapack_int lda, lapack_int* ilo, lapack_int* ihi, + double* scale ); +lapack_int LAPACKE_cgebal( int matrix_order, char job, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* ilo, lapack_int* ihi, float* scale ); +lapack_int LAPACKE_zgebal( int matrix_order, char job, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* ilo, lapack_int* ihi, double* scale ); + +lapack_int LAPACKE_sgebrd( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* d, float* e, + float* tauq, float* taup ); +lapack_int LAPACKE_dgebrd( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* d, double* e, + double* tauq, double* taup ); +lapack_int LAPACKE_cgebrd( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, float* d, + float* e, lapack_complex_float* tauq, + lapack_complex_float* taup ); +lapack_int LAPACKE_zgebrd( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, double* d, + double* e, lapack_complex_double* tauq, + lapack_complex_double* taup ); + +lapack_int LAPACKE_sgecon( int matrix_order, char norm, lapack_int n, + const float* a, lapack_int lda, float anorm, + float* rcond ); +lapack_int LAPACKE_dgecon( int matrix_order, char norm, lapack_int n, + const double* a, lapack_int lda, double anorm, + double* rcond ); +lapack_int LAPACKE_cgecon( int matrix_order, char norm, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float anorm, float* rcond ); +lapack_int LAPACKE_zgecon( int matrix_order, char norm, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double anorm, double* rcond ); + +lapack_int LAPACKE_sgeequ( int matrix_order, lapack_int m, lapack_int n, + const float* a, lapack_int lda, float* r, float* c, + float* rowcnd, float* colcnd, float* amax ); +lapack_int LAPACKE_dgeequ( int matrix_order, lapack_int m, lapack_int n, + const double* a, lapack_int lda, double* r, + double* c, double* rowcnd, double* colcnd, + double* amax ); +lapack_int LAPACKE_cgeequ( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* r, float* c, float* rowcnd, float* colcnd, + float* amax ); +lapack_int LAPACKE_zgeequ( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* r, double* c, double* rowcnd, double* colcnd, + double* amax ); + +lapack_int LAPACKE_sgeequb( int matrix_order, lapack_int m, lapack_int n, + const float* a, lapack_int lda, float* r, float* c, + float* rowcnd, float* colcnd, float* amax ); +lapack_int LAPACKE_dgeequb( int matrix_order, lapack_int m, lapack_int n, + const double* a, lapack_int lda, double* r, + double* c, double* rowcnd, double* colcnd, + double* amax ); +lapack_int LAPACKE_cgeequb( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* r, float* c, float* rowcnd, float* colcnd, + float* amax ); +lapack_int LAPACKE_zgeequb( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* r, double* c, double* rowcnd, + double* colcnd, double* amax ); + +lapack_int LAPACKE_sgees( int matrix_order, char jobvs, char sort, + LAPACK_S_SELECT2 select, lapack_int n, float* a, + lapack_int lda, lapack_int* sdim, float* wr, + float* wi, float* vs, lapack_int ldvs ); +lapack_int LAPACKE_dgees( int matrix_order, char jobvs, char sort, + LAPACK_D_SELECT2 select, lapack_int n, double* a, + lapack_int lda, lapack_int* sdim, double* wr, + double* wi, double* vs, lapack_int ldvs ); +lapack_int LAPACKE_cgees( int matrix_order, char jobvs, char sort, + LAPACK_C_SELECT1 select, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* sdim, lapack_complex_float* w, + lapack_complex_float* vs, lapack_int ldvs ); +lapack_int LAPACKE_zgees( int matrix_order, char jobvs, char sort, + LAPACK_Z_SELECT1 select, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* sdim, lapack_complex_double* w, + lapack_complex_double* vs, lapack_int ldvs ); + +lapack_int LAPACKE_sgeesx( int matrix_order, char jobvs, char sort, + LAPACK_S_SELECT2 select, char sense, lapack_int n, + float* a, lapack_int lda, lapack_int* sdim, + float* wr, float* wi, float* vs, lapack_int ldvs, + float* rconde, float* rcondv ); +lapack_int LAPACKE_dgeesx( int matrix_order, char jobvs, char sort, + LAPACK_D_SELECT2 select, char sense, lapack_int n, + double* a, lapack_int lda, lapack_int* sdim, + double* wr, double* wi, double* vs, lapack_int ldvs, + double* rconde, double* rcondv ); +lapack_int LAPACKE_cgeesx( int matrix_order, char jobvs, char sort, + LAPACK_C_SELECT1 select, char sense, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* sdim, lapack_complex_float* w, + lapack_complex_float* vs, lapack_int ldvs, + float* rconde, float* rcondv ); +lapack_int LAPACKE_zgeesx( int matrix_order, char jobvs, char sort, + LAPACK_Z_SELECT1 select, char sense, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* sdim, lapack_complex_double* w, + lapack_complex_double* vs, lapack_int ldvs, + double* rconde, double* rcondv ); + +lapack_int LAPACKE_sgeev( int matrix_order, char jobvl, char jobvr, + lapack_int n, float* a, lapack_int lda, float* wr, + float* wi, float* vl, lapack_int ldvl, float* vr, + lapack_int ldvr ); +lapack_int LAPACKE_dgeev( int matrix_order, char jobvl, char jobvr, + lapack_int n, double* a, lapack_int lda, double* wr, + double* wi, double* vl, lapack_int ldvl, double* vr, + lapack_int ldvr ); +lapack_int LAPACKE_cgeev( int matrix_order, char jobvl, char jobvr, + lapack_int n, lapack_complex_float* a, lapack_int lda, + lapack_complex_float* w, lapack_complex_float* vl, + lapack_int ldvl, lapack_complex_float* vr, + lapack_int ldvr ); +lapack_int LAPACKE_zgeev( int matrix_order, char jobvl, char jobvr, + lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* w, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr ); + +lapack_int LAPACKE_sgeevx( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, float* a, + lapack_int lda, float* wr, float* wi, float* vl, + lapack_int ldvl, float* vr, lapack_int ldvr, + lapack_int* ilo, lapack_int* ihi, float* scale, + float* abnrm, float* rconde, float* rcondv ); +lapack_int LAPACKE_dgeevx( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, double* a, + lapack_int lda, double* wr, double* wi, double* vl, + lapack_int ldvl, double* vr, lapack_int ldvr, + lapack_int* ilo, lapack_int* ihi, double* scale, + double* abnrm, double* rconde, double* rcondv ); +lapack_int LAPACKE_cgeevx( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* w, lapack_complex_float* vl, + lapack_int ldvl, lapack_complex_float* vr, + lapack_int ldvr, lapack_int* ilo, lapack_int* ihi, + float* scale, float* abnrm, float* rconde, + float* rcondv ); +lapack_int LAPACKE_zgeevx( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* w, lapack_complex_double* vl, + lapack_int ldvl, lapack_complex_double* vr, + lapack_int ldvr, lapack_int* ilo, lapack_int* ihi, + double* scale, double* abnrm, double* rconde, + double* rcondv ); + +lapack_int LAPACKE_sgehrd( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, float* a, lapack_int lda, + float* tau ); +lapack_int LAPACKE_dgehrd( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, double* a, lapack_int lda, + double* tau ); +lapack_int LAPACKE_cgehrd( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* tau ); +lapack_int LAPACKE_zgehrd( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* tau ); + +lapack_int LAPACKE_sgejsv( int matrix_order, char joba, char jobu, char jobv, + char jobr, char jobt, char jobp, lapack_int m, + lapack_int n, float* a, lapack_int lda, float* sva, + float* u, lapack_int ldu, float* v, lapack_int ldv, + float* stat, lapack_int* istat ); +lapack_int LAPACKE_dgejsv( int matrix_order, char joba, char jobu, char jobv, + char jobr, char jobt, char jobp, lapack_int m, + lapack_int n, double* a, lapack_int lda, double* sva, + double* u, lapack_int ldu, double* v, lapack_int ldv, + double* stat, lapack_int* istat ); + +lapack_int LAPACKE_sgelq2( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau ); +lapack_int LAPACKE_dgelq2( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau ); +lapack_int LAPACKE_cgelq2( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau ); +lapack_int LAPACKE_zgelq2( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau ); + +lapack_int LAPACKE_sgelqf( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau ); +lapack_int LAPACKE_dgelqf( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau ); +lapack_int LAPACKE_cgelqf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau ); +lapack_int LAPACKE_zgelqf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau ); + +lapack_int LAPACKE_sgels( int matrix_order, char trans, lapack_int m, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* b, lapack_int ldb ); +lapack_int LAPACKE_dgels( int matrix_order, char trans, lapack_int m, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* b, lapack_int ldb ); +lapack_int LAPACKE_cgels( int matrix_order, char trans, lapack_int m, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zgels( int matrix_order, char trans, lapack_int m, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_sgelsd( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, float* b, + lapack_int ldb, float* s, float rcond, + lapack_int* rank ); +lapack_int LAPACKE_dgelsd( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + double* b, lapack_int ldb, double* s, double rcond, + lapack_int* rank ); +lapack_int LAPACKE_cgelsd( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, float* s, float rcond, + lapack_int* rank ); +lapack_int LAPACKE_zgelsd( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, double* s, double rcond, + lapack_int* rank ); + +lapack_int LAPACKE_sgelss( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, float* b, + lapack_int ldb, float* s, float rcond, + lapack_int* rank ); +lapack_int LAPACKE_dgelss( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + double* b, lapack_int ldb, double* s, double rcond, + lapack_int* rank ); +lapack_int LAPACKE_cgelss( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, float* s, float rcond, + lapack_int* rank ); +lapack_int LAPACKE_zgelss( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, double* s, double rcond, + lapack_int* rank ); + +lapack_int LAPACKE_sgelsy( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, float* b, + lapack_int ldb, lapack_int* jpvt, float rcond, + lapack_int* rank ); +lapack_int LAPACKE_dgelsy( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + double* b, lapack_int ldb, lapack_int* jpvt, + double rcond, lapack_int* rank ); +lapack_int LAPACKE_cgelsy( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, lapack_int* jpvt, float rcond, + lapack_int* rank ); +lapack_int LAPACKE_zgelsy( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_int* jpvt, double rcond, + lapack_int* rank ); + +lapack_int LAPACKE_sgeqlf( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau ); +lapack_int LAPACKE_dgeqlf( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau ); +lapack_int LAPACKE_cgeqlf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau ); +lapack_int LAPACKE_zgeqlf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau ); + +lapack_int LAPACKE_sgeqp3( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, lapack_int* jpvt, + float* tau ); +lapack_int LAPACKE_dgeqp3( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, lapack_int* jpvt, + double* tau ); +lapack_int LAPACKE_cgeqp3( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* jpvt, lapack_complex_float* tau ); +lapack_int LAPACKE_zgeqp3( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* jpvt, lapack_complex_double* tau ); + +lapack_int LAPACKE_sgeqpf( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, lapack_int* jpvt, + float* tau ); +lapack_int LAPACKE_dgeqpf( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, lapack_int* jpvt, + double* tau ); +lapack_int LAPACKE_cgeqpf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* jpvt, lapack_complex_float* tau ); +lapack_int LAPACKE_zgeqpf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* jpvt, lapack_complex_double* tau ); + +lapack_int LAPACKE_sgeqr2( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau ); +lapack_int LAPACKE_dgeqr2( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau ); +lapack_int LAPACKE_cgeqr2( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau ); +lapack_int LAPACKE_zgeqr2( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau ); + +lapack_int LAPACKE_sgeqrf( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau ); +lapack_int LAPACKE_dgeqrf( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau ); +lapack_int LAPACKE_cgeqrf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau ); +lapack_int LAPACKE_zgeqrf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau ); + +lapack_int LAPACKE_sgeqrfp( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau ); +lapack_int LAPACKE_dgeqrfp( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau ); +lapack_int LAPACKE_cgeqrfp( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau ); +lapack_int LAPACKE_zgeqrfp( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau ); + +lapack_int LAPACKE_sgerfs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const float* af, lapack_int ldaf, + const lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr ); +lapack_int LAPACKE_dgerfs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const double* a, lapack_int lda, + const double* af, lapack_int ldaf, + const lapack_int* ipiv, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* ferr, double* berr ); +lapack_int LAPACKE_cgerfs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_zgerfs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_sgerfsx( int matrix_order, char trans, char equed, + lapack_int n, lapack_int nrhs, const float* a, + lapack_int lda, const float* af, lapack_int ldaf, + const lapack_int* ipiv, const float* r, + const float* c, const float* b, lapack_int ldb, + float* x, lapack_int ldx, float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_dgerfsx( int matrix_order, char trans, char equed, + lapack_int n, lapack_int nrhs, const double* a, + lapack_int lda, const double* af, lapack_int ldaf, + const lapack_int* ipiv, const double* r, + const double* c, const double* b, lapack_int ldb, + double* x, lapack_int ldx, double* rcond, + double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params ); +lapack_int LAPACKE_cgerfsx( int matrix_order, char trans, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* af, lapack_int ldaf, + const lapack_int* ipiv, const float* r, + const float* c, const lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_zgerfsx( int matrix_order, char trans, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* af, lapack_int ldaf, + const lapack_int* ipiv, const double* r, + const double* c, const lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params ); + +lapack_int LAPACKE_sgerqf( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau ); +lapack_int LAPACKE_dgerqf( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau ); +lapack_int LAPACKE_cgerqf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau ); +lapack_int LAPACKE_zgerqf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau ); + +lapack_int LAPACKE_sgesdd( int matrix_order, char jobz, lapack_int m, + lapack_int n, float* a, lapack_int lda, float* s, + float* u, lapack_int ldu, float* vt, + lapack_int ldvt ); +lapack_int LAPACKE_dgesdd( int matrix_order, char jobz, lapack_int m, + lapack_int n, double* a, lapack_int lda, double* s, + double* u, lapack_int ldu, double* vt, + lapack_int ldvt ); +lapack_int LAPACKE_cgesdd( int matrix_order, char jobz, lapack_int m, + lapack_int n, lapack_complex_float* a, + lapack_int lda, float* s, lapack_complex_float* u, + lapack_int ldu, lapack_complex_float* vt, + lapack_int ldvt ); +lapack_int LAPACKE_zgesdd( int matrix_order, char jobz, lapack_int m, + lapack_int n, lapack_complex_double* a, + lapack_int lda, double* s, lapack_complex_double* u, + lapack_int ldu, lapack_complex_double* vt, + lapack_int ldvt ); + +lapack_int LAPACKE_sgesv( int matrix_order, lapack_int n, lapack_int nrhs, + float* a, lapack_int lda, lapack_int* ipiv, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dgesv( int matrix_order, lapack_int n, lapack_int nrhs, + double* a, lapack_int lda, lapack_int* ipiv, + double* b, lapack_int ldb ); +lapack_int LAPACKE_cgesv( int matrix_order, lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zgesv( int matrix_order, lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); +lapack_int LAPACKE_dsgesv( int matrix_order, lapack_int n, lapack_int nrhs, + double* a, lapack_int lda, lapack_int* ipiv, + double* b, lapack_int ldb, double* x, lapack_int ldx, + lapack_int* iter ); +lapack_int LAPACKE_zcgesv( int matrix_order, lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, lapack_int* iter ); + +lapack_int LAPACKE_sgesvd( int matrix_order, char jobu, char jobvt, + lapack_int m, lapack_int n, float* a, lapack_int lda, + float* s, float* u, lapack_int ldu, float* vt, + lapack_int ldvt, float* superb ); +lapack_int LAPACKE_dgesvd( int matrix_order, char jobu, char jobvt, + lapack_int m, lapack_int n, double* a, + lapack_int lda, double* s, double* u, lapack_int ldu, + double* vt, lapack_int ldvt, double* superb ); +lapack_int LAPACKE_cgesvd( int matrix_order, char jobu, char jobvt, + lapack_int m, lapack_int n, lapack_complex_float* a, + lapack_int lda, float* s, lapack_complex_float* u, + lapack_int ldu, lapack_complex_float* vt, + lapack_int ldvt, float* superb ); +lapack_int LAPACKE_zgesvd( int matrix_order, char jobu, char jobvt, + lapack_int m, lapack_int n, lapack_complex_double* a, + lapack_int lda, double* s, lapack_complex_double* u, + lapack_int ldu, lapack_complex_double* vt, + lapack_int ldvt, double* superb ); + +lapack_int LAPACKE_sgesvj( int matrix_order, char joba, char jobu, char jobv, + lapack_int m, lapack_int n, float* a, lapack_int lda, + float* sva, lapack_int mv, float* v, lapack_int ldv, + float* stat ); +lapack_int LAPACKE_dgesvj( int matrix_order, char joba, char jobu, char jobv, + lapack_int m, lapack_int n, double* a, + lapack_int lda, double* sva, lapack_int mv, + double* v, lapack_int ldv, double* stat ); + +lapack_int LAPACKE_sgesvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* r, float* c, + float* b, lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + float* rpivot ); +lapack_int LAPACKE_dgesvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* r, double* c, + double* b, lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + double* rpivot ); +lapack_int LAPACKE_cgesvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* r, float* c, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + float* rpivot ); +lapack_int LAPACKE_zgesvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* r, double* c, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + double* rpivot ); + +lapack_int LAPACKE_sgesvxx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* r, float* c, + float* b, lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_dgesvxx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* r, double* c, + double* b, lapack_int ldb, double* x, + lapack_int ldx, double* rcond, double* rpvgrw, + double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params ); +lapack_int LAPACKE_cgesvxx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* r, float* c, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_zgesvxx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* r, double* c, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params ); + +lapack_int LAPACKE_sgetf2( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, lapack_int* ipiv ); +lapack_int LAPACKE_dgetf2( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, lapack_int* ipiv ); +lapack_int LAPACKE_cgetf2( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* ipiv ); +lapack_int LAPACKE_zgetf2( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv ); + +lapack_int LAPACKE_sgetrf( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, lapack_int* ipiv ); +lapack_int LAPACKE_dgetrf( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, lapack_int* ipiv ); +lapack_int LAPACKE_cgetrf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* ipiv ); +lapack_int LAPACKE_zgetrf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv ); + +lapack_int LAPACKE_sgetri( int matrix_order, lapack_int n, float* a, + lapack_int lda, const lapack_int* ipiv ); +lapack_int LAPACKE_dgetri( int matrix_order, lapack_int n, double* a, + lapack_int lda, const lapack_int* ipiv ); +lapack_int LAPACKE_cgetri( int matrix_order, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv ); +lapack_int LAPACKE_zgetri( int matrix_order, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv ); + +lapack_int LAPACKE_sgetrs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const lapack_int* ipiv, float* b, lapack_int ldb ); +lapack_int LAPACKE_dgetrs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const double* a, lapack_int lda, + const lapack_int* ipiv, double* b, lapack_int ldb ); +lapack_int LAPACKE_cgetrs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zgetrs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_sggbak( int matrix_order, char job, char side, lapack_int n, + lapack_int ilo, lapack_int ihi, const float* lscale, + const float* rscale, lapack_int m, float* v, + lapack_int ldv ); +lapack_int LAPACKE_dggbak( int matrix_order, char job, char side, lapack_int n, + lapack_int ilo, lapack_int ihi, const double* lscale, + const double* rscale, lapack_int m, double* v, + lapack_int ldv ); +lapack_int LAPACKE_cggbak( int matrix_order, char job, char side, lapack_int n, + lapack_int ilo, lapack_int ihi, const float* lscale, + const float* rscale, lapack_int m, + lapack_complex_float* v, lapack_int ldv ); +lapack_int LAPACKE_zggbak( int matrix_order, char job, char side, lapack_int n, + lapack_int ilo, lapack_int ihi, const double* lscale, + const double* rscale, lapack_int m, + lapack_complex_double* v, lapack_int ldv ); + +lapack_int LAPACKE_sggbal( int matrix_order, char job, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, + lapack_int* ilo, lapack_int* ihi, float* lscale, + float* rscale ); +lapack_int LAPACKE_dggbal( int matrix_order, char job, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, + lapack_int* ilo, lapack_int* ihi, double* lscale, + double* rscale ); +lapack_int LAPACKE_cggbal( int matrix_order, char job, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_int* ilo, lapack_int* ihi, float* lscale, + float* rscale ); +lapack_int LAPACKE_zggbal( int matrix_order, char job, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_int* ilo, lapack_int* ihi, double* lscale, + double* rscale ); + +lapack_int LAPACKE_sgges( int matrix_order, char jobvsl, char jobvsr, char sort, + LAPACK_S_SELECT3 selctg, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, + lapack_int* sdim, float* alphar, float* alphai, + float* beta, float* vsl, lapack_int ldvsl, float* vsr, + lapack_int ldvsr ); +lapack_int LAPACKE_dgges( int matrix_order, char jobvsl, char jobvsr, char sort, + LAPACK_D_SELECT3 selctg, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, + lapack_int* sdim, double* alphar, double* alphai, + double* beta, double* vsl, lapack_int ldvsl, + double* vsr, lapack_int ldvsr ); +lapack_int LAPACKE_cgges( int matrix_order, char jobvsl, char jobvsr, char sort, + LAPACK_C_SELECT2 selctg, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_int* sdim, lapack_complex_float* alpha, + lapack_complex_float* beta, lapack_complex_float* vsl, + lapack_int ldvsl, lapack_complex_float* vsr, + lapack_int ldvsr ); +lapack_int LAPACKE_zgges( int matrix_order, char jobvsl, char jobvsr, char sort, + LAPACK_Z_SELECT2 selctg, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_int* sdim, lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* vsl, lapack_int ldvsl, + lapack_complex_double* vsr, lapack_int ldvsr ); + +lapack_int LAPACKE_sggesx( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_S_SELECT3 selctg, char sense, + lapack_int n, float* a, lapack_int lda, float* b, + lapack_int ldb, lapack_int* sdim, float* alphar, + float* alphai, float* beta, float* vsl, + lapack_int ldvsl, float* vsr, lapack_int ldvsr, + float* rconde, float* rcondv ); +lapack_int LAPACKE_dggesx( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_D_SELECT3 selctg, char sense, + lapack_int n, double* a, lapack_int lda, double* b, + lapack_int ldb, lapack_int* sdim, double* alphar, + double* alphai, double* beta, double* vsl, + lapack_int ldvsl, double* vsr, lapack_int ldvsr, + double* rconde, double* rcondv ); +lapack_int LAPACKE_cggesx( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_C_SELECT2 selctg, char sense, + lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, lapack_int* sdim, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* vsl, lapack_int ldvsl, + lapack_complex_float* vsr, lapack_int ldvsr, + float* rconde, float* rcondv ); +lapack_int LAPACKE_zggesx( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_Z_SELECT2 selctg, char sense, + lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_int* sdim, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* vsl, lapack_int ldvsl, + lapack_complex_double* vsr, lapack_int ldvsr, + double* rconde, double* rcondv ); + +lapack_int LAPACKE_sggev( int matrix_order, char jobvl, char jobvr, + lapack_int n, float* a, lapack_int lda, float* b, + lapack_int ldb, float* alphar, float* alphai, + float* beta, float* vl, lapack_int ldvl, float* vr, + lapack_int ldvr ); +lapack_int LAPACKE_dggev( int matrix_order, char jobvl, char jobvr, + lapack_int n, double* a, lapack_int lda, double* b, + lapack_int ldb, double* alphar, double* alphai, + double* beta, double* vl, lapack_int ldvl, double* vr, + lapack_int ldvr ); +lapack_int LAPACKE_cggev( int matrix_order, char jobvl, char jobvr, + lapack_int n, lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* alpha, + lapack_complex_float* beta, lapack_complex_float* vl, + lapack_int ldvl, lapack_complex_float* vr, + lapack_int ldvr ); +lapack_int LAPACKE_zggev( int matrix_order, char jobvl, char jobvr, + lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr ); + +lapack_int LAPACKE_sggevx( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, + float* alphar, float* alphai, float* beta, float* vl, + lapack_int ldvl, float* vr, lapack_int ldvr, + lapack_int* ilo, lapack_int* ihi, float* lscale, + float* rscale, float* abnrm, float* bbnrm, + float* rconde, float* rcondv ); +lapack_int LAPACKE_dggevx( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, + double* alphar, double* alphai, double* beta, + double* vl, lapack_int ldvl, double* vr, + lapack_int ldvr, lapack_int* ilo, lapack_int* ihi, + double* lscale, double* rscale, double* abnrm, + double* bbnrm, double* rconde, double* rcondv ); +lapack_int LAPACKE_cggevx( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* alpha, + lapack_complex_float* beta, lapack_complex_float* vl, + lapack_int ldvl, lapack_complex_float* vr, + lapack_int ldvr, lapack_int* ilo, lapack_int* ihi, + float* lscale, float* rscale, float* abnrm, + float* bbnrm, float* rconde, float* rcondv ); +lapack_int LAPACKE_zggevx( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr, + lapack_int* ilo, lapack_int* ihi, double* lscale, + double* rscale, double* abnrm, double* bbnrm, + double* rconde, double* rcondv ); + +lapack_int LAPACKE_sggglm( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, float* a, lapack_int lda, float* b, + lapack_int ldb, float* d, float* x, float* y ); +lapack_int LAPACKE_dggglm( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, double* a, lapack_int lda, double* b, + lapack_int ldb, double* d, double* x, double* y ); +lapack_int LAPACKE_cggglm( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* d, + lapack_complex_float* x, lapack_complex_float* y ); +lapack_int LAPACKE_zggglm( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* d, + lapack_complex_double* x, lapack_complex_double* y ); + +lapack_int LAPACKE_sgghrd( int matrix_order, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + float* a, lapack_int lda, float* b, lapack_int ldb, + float* q, lapack_int ldq, float* z, lapack_int ldz ); +lapack_int LAPACKE_dgghrd( int matrix_order, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + double* a, lapack_int lda, double* b, lapack_int ldb, + double* q, lapack_int ldq, double* z, + lapack_int ldz ); +lapack_int LAPACKE_cgghrd( int matrix_order, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zgghrd( int matrix_order, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* z, lapack_int ldz ); + +lapack_int LAPACKE_sgglse( int matrix_order, lapack_int m, lapack_int n, + lapack_int p, float* a, lapack_int lda, float* b, + lapack_int ldb, float* c, float* d, float* x ); +lapack_int LAPACKE_dgglse( int matrix_order, lapack_int m, lapack_int n, + lapack_int p, double* a, lapack_int lda, double* b, + lapack_int ldb, double* c, double* d, double* x ); +lapack_int LAPACKE_cgglse( int matrix_order, lapack_int m, lapack_int n, + lapack_int p, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* c, + lapack_complex_float* d, lapack_complex_float* x ); +lapack_int LAPACKE_zgglse( int matrix_order, lapack_int m, lapack_int n, + lapack_int p, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* c, + lapack_complex_double* d, lapack_complex_double* x ); + +lapack_int LAPACKE_sggqrf( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, float* a, lapack_int lda, float* taua, + float* b, lapack_int ldb, float* taub ); +lapack_int LAPACKE_dggqrf( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, double* a, lapack_int lda, + double* taua, double* b, lapack_int ldb, + double* taub ); +lapack_int LAPACKE_cggqrf( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* taua, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* taub ); +lapack_int LAPACKE_zggqrf( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* taua, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* taub ); + +lapack_int LAPACKE_sggrqf( int matrix_order, lapack_int m, lapack_int p, + lapack_int n, float* a, lapack_int lda, float* taua, + float* b, lapack_int ldb, float* taub ); +lapack_int LAPACKE_dggrqf( int matrix_order, lapack_int m, lapack_int p, + lapack_int n, double* a, lapack_int lda, + double* taua, double* b, lapack_int ldb, + double* taub ); +lapack_int LAPACKE_cggrqf( int matrix_order, lapack_int m, lapack_int p, + lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* taua, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* taub ); +lapack_int LAPACKE_zggrqf( int matrix_order, lapack_int m, lapack_int p, + lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* taua, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* taub ); + +lapack_int LAPACKE_sggsvd( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int n, lapack_int p, + lapack_int* k, lapack_int* l, float* a, + lapack_int lda, float* b, lapack_int ldb, + float* alpha, float* beta, float* u, lapack_int ldu, + float* v, lapack_int ldv, float* q, lapack_int ldq, + lapack_int* iwork ); +lapack_int LAPACKE_dggsvd( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int n, lapack_int p, + lapack_int* k, lapack_int* l, double* a, + lapack_int lda, double* b, lapack_int ldb, + double* alpha, double* beta, double* u, + lapack_int ldu, double* v, lapack_int ldv, double* q, + lapack_int ldq, lapack_int* iwork ); +lapack_int LAPACKE_cggsvd( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int n, lapack_int p, + lapack_int* k, lapack_int* l, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + float* alpha, float* beta, lapack_complex_float* u, + lapack_int ldu, lapack_complex_float* v, + lapack_int ldv, lapack_complex_float* q, + lapack_int ldq, lapack_int* iwork ); +lapack_int LAPACKE_zggsvd( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int n, lapack_int p, + lapack_int* k, lapack_int* l, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + double* alpha, double* beta, + lapack_complex_double* u, lapack_int ldu, + lapack_complex_double* v, lapack_int ldv, + lapack_complex_double* q, lapack_int ldq, + lapack_int* iwork ); + +lapack_int LAPACKE_sggsvp( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int p, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, float tola, + float tolb, lapack_int* k, lapack_int* l, float* u, + lapack_int ldu, float* v, lapack_int ldv, float* q, + lapack_int ldq ); +lapack_int LAPACKE_dggsvp( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int p, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, + double tola, double tolb, lapack_int* k, + lapack_int* l, double* u, lapack_int ldu, double* v, + lapack_int ldv, double* q, lapack_int ldq ); +lapack_int LAPACKE_cggsvp( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int p, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, float tola, + float tolb, lapack_int* k, lapack_int* l, + lapack_complex_float* u, lapack_int ldu, + lapack_complex_float* v, lapack_int ldv, + lapack_complex_float* q, lapack_int ldq ); +lapack_int LAPACKE_zggsvp( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int p, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + double tola, double tolb, lapack_int* k, + lapack_int* l, lapack_complex_double* u, + lapack_int ldu, lapack_complex_double* v, + lapack_int ldv, lapack_complex_double* q, + lapack_int ldq ); + +lapack_int LAPACKE_sgtcon( char norm, lapack_int n, const float* dl, + const float* d, const float* du, const float* du2, + const lapack_int* ipiv, float anorm, float* rcond ); +lapack_int LAPACKE_dgtcon( char norm, lapack_int n, const double* dl, + const double* d, const double* du, const double* du2, + const lapack_int* ipiv, double anorm, + double* rcond ); +lapack_int LAPACKE_cgtcon( char norm, lapack_int n, + const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + const lapack_complex_float* du2, + const lapack_int* ipiv, float anorm, float* rcond ); +lapack_int LAPACKE_zgtcon( char norm, lapack_int n, + const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + const lapack_complex_double* du2, + const lapack_int* ipiv, double anorm, + double* rcond ); + +lapack_int LAPACKE_sgtrfs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const float* dl, const float* d, + const float* du, const float* dlf, const float* df, + const float* duf, const float* du2, + const lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr ); +lapack_int LAPACKE_dgtrfs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const double* dl, const double* d, + const double* du, const double* dlf, + const double* df, const double* duf, + const double* du2, const lapack_int* ipiv, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* ferr, double* berr ); +lapack_int LAPACKE_cgtrfs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + const lapack_complex_float* dlf, + const lapack_complex_float* df, + const lapack_complex_float* duf, + const lapack_complex_float* du2, + const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_zgtrfs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + const lapack_complex_double* dlf, + const lapack_complex_double* df, + const lapack_complex_double* duf, + const lapack_complex_double* du2, + const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_sgtsv( int matrix_order, lapack_int n, lapack_int nrhs, + float* dl, float* d, float* du, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dgtsv( int matrix_order, lapack_int n, lapack_int nrhs, + double* dl, double* d, double* du, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cgtsv( int matrix_order, lapack_int n, lapack_int nrhs, + lapack_complex_float* dl, lapack_complex_float* d, + lapack_complex_float* du, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zgtsv( int matrix_order, lapack_int n, lapack_int nrhs, + lapack_complex_double* dl, lapack_complex_double* d, + lapack_complex_double* du, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_sgtsvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, const float* dl, + const float* d, const float* du, float* dlf, + float* df, float* duf, float* du2, lapack_int* ipiv, + const float* b, lapack_int ldb, float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr ); +lapack_int LAPACKE_dgtsvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, const double* dl, + const double* d, const double* du, double* dlf, + double* df, double* duf, double* du2, + lapack_int* ipiv, const double* b, lapack_int ldb, + double* x, lapack_int ldx, double* rcond, + double* ferr, double* berr ); +lapack_int LAPACKE_cgtsvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + lapack_complex_float* dlf, lapack_complex_float* df, + lapack_complex_float* duf, lapack_complex_float* du2, + lapack_int* ipiv, const lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr ); +lapack_int LAPACKE_zgtsvx( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + lapack_complex_double* dlf, + lapack_complex_double* df, + lapack_complex_double* duf, + lapack_complex_double* du2, lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr ); + +lapack_int LAPACKE_sgttrf( lapack_int n, float* dl, float* d, float* du, + float* du2, lapack_int* ipiv ); +lapack_int LAPACKE_dgttrf( lapack_int n, double* dl, double* d, double* du, + double* du2, lapack_int* ipiv ); +lapack_int LAPACKE_cgttrf( lapack_int n, lapack_complex_float* dl, + lapack_complex_float* d, lapack_complex_float* du, + lapack_complex_float* du2, lapack_int* ipiv ); +lapack_int LAPACKE_zgttrf( lapack_int n, lapack_complex_double* dl, + lapack_complex_double* d, lapack_complex_double* du, + lapack_complex_double* du2, lapack_int* ipiv ); + +lapack_int LAPACKE_sgttrs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const float* dl, const float* d, + const float* du, const float* du2, + const lapack_int* ipiv, float* b, lapack_int ldb ); +lapack_int LAPACKE_dgttrs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const double* dl, const double* d, + const double* du, const double* du2, + const lapack_int* ipiv, double* b, lapack_int ldb ); +lapack_int LAPACKE_cgttrs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + const lapack_complex_float* du2, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zgttrs( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + const lapack_complex_double* du2, + const lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_chbev( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int kd, lapack_complex_float* ab, + lapack_int ldab, float* w, lapack_complex_float* z, + lapack_int ldz ); +lapack_int LAPACKE_zhbev( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int kd, lapack_complex_double* ab, + lapack_int ldab, double* w, lapack_complex_double* z, + lapack_int ldz ); + +lapack_int LAPACKE_chbevd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int kd, lapack_complex_float* ab, + lapack_int ldab, float* w, lapack_complex_float* z, + lapack_int ldz ); +lapack_int LAPACKE_zhbevd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int kd, lapack_complex_double* ab, + lapack_int ldab, double* w, lapack_complex_double* z, + lapack_int ldz ); + +lapack_int LAPACKE_chbevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_int kd, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* q, lapack_int ldq, float vl, + float vu, lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int ldz, lapack_int* ifail ); +lapack_int LAPACKE_zhbevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_int kd, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* q, lapack_int ldq, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_int* ifail ); + +lapack_int LAPACKE_chbgst( int matrix_order, char vect, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, + lapack_complex_float* ab, lapack_int ldab, + const lapack_complex_float* bb, lapack_int ldbb, + lapack_complex_float* x, lapack_int ldx ); +lapack_int LAPACKE_zhbgst( int matrix_order, char vect, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, + lapack_complex_double* ab, lapack_int ldab, + const lapack_complex_double* bb, lapack_int ldbb, + lapack_complex_double* x, lapack_int ldx ); + +lapack_int LAPACKE_chbgv( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* bb, lapack_int ldbb, float* w, + lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zhbgv( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* bb, lapack_int ldbb, double* w, + lapack_complex_double* z, lapack_int ldz ); + +lapack_int LAPACKE_chbgvd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* bb, lapack_int ldbb, float* w, + lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zhbgvd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* bb, lapack_int ldbb, + double* w, lapack_complex_double* z, + lapack_int ldz ); + +lapack_int LAPACKE_chbgvx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* bb, lapack_int ldbb, + lapack_complex_float* q, lapack_int ldq, float vl, + float vu, lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int ldz, lapack_int* ifail ); +lapack_int LAPACKE_zhbgvx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* bb, lapack_int ldbb, + lapack_complex_double* q, lapack_int ldq, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_int* ifail ); + +lapack_int LAPACKE_chbtrd( int matrix_order, char vect, char uplo, lapack_int n, + lapack_int kd, lapack_complex_float* ab, + lapack_int ldab, float* d, float* e, + lapack_complex_float* q, lapack_int ldq ); +lapack_int LAPACKE_zhbtrd( int matrix_order, char vect, char uplo, lapack_int n, + lapack_int kd, lapack_complex_double* ab, + lapack_int ldab, double* d, double* e, + lapack_complex_double* q, lapack_int ldq ); + +lapack_int LAPACKE_checon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, float anorm, float* rcond ); +lapack_int LAPACKE_zhecon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, double anorm, + double* rcond ); + +lapack_int LAPACKE_cheequb( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* s, float* scond, float* amax ); +lapack_int LAPACKE_zheequb( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* s, double* scond, double* amax ); + +lapack_int LAPACKE_cheev( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, float* w ); +lapack_int LAPACKE_zheev( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, double* w ); + +lapack_int LAPACKE_cheevd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, float* w ); +lapack_int LAPACKE_zheevd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + double* w ); + +lapack_int LAPACKE_cheevr( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_complex_float* a, + lapack_int lda, float vl, float vu, lapack_int il, + lapack_int iu, float abstol, lapack_int* m, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_int* isuppz ); +lapack_int LAPACKE_zheevr( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_complex_double* a, + lapack_int lda, double vl, double vu, lapack_int il, + lapack_int iu, double abstol, lapack_int* m, + double* w, lapack_complex_double* z, lapack_int ldz, + lapack_int* isuppz ); + +lapack_int LAPACKE_cheevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_complex_float* a, + lapack_int lda, float vl, float vu, lapack_int il, + lapack_int iu, float abstol, lapack_int* m, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_int* ifail ); +lapack_int LAPACKE_zheevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_complex_double* a, + lapack_int lda, double vl, double vu, lapack_int il, + lapack_int iu, double abstol, lapack_int* m, + double* w, lapack_complex_double* z, lapack_int ldz, + lapack_int* ifail ); + +lapack_int LAPACKE_chegst( int matrix_order, lapack_int itype, char uplo, + lapack_int n, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zhegst( int matrix_order, lapack_int itype, char uplo, + lapack_int n, lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_chegv( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, float* w ); +lapack_int LAPACKE_zhegv( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, double* w ); + +lapack_int LAPACKE_chegvd( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, float* w ); +lapack_int LAPACKE_zhegvd( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, double* w ); + +lapack_int LAPACKE_chegvx( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, float vl, + float vu, lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int ldz, lapack_int* ifail ); +lapack_int LAPACKE_zhegvx( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_int* ifail ); + +lapack_int LAPACKE_cherfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_zherfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_cherfsx( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* af, lapack_int ldaf, + const lapack_int* ipiv, const float* s, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params ); +lapack_int LAPACKE_zherfsx( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* af, lapack_int ldaf, + const lapack_int* ipiv, const double* s, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params ); + +lapack_int LAPACKE_chesv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zhesv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_chesvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, lapack_complex_float* af, + lapack_int ldaf, lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr ); +lapack_int LAPACKE_zhesvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, lapack_complex_double* af, + lapack_int ldaf, lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr ); + +lapack_int LAPACKE_chesvxx( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* s, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_zhesvxx( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* s, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params ); + +lapack_int LAPACKE_chetrd( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, float* d, + float* e, lapack_complex_float* tau ); +lapack_int LAPACKE_zhetrd( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, double* d, + double* e, lapack_complex_double* tau ); + +lapack_int LAPACKE_chetrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* ipiv ); +lapack_int LAPACKE_zhetrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv ); + +lapack_int LAPACKE_chetri( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv ); +lapack_int LAPACKE_zhetri( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv ); + +lapack_int LAPACKE_chetrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zhetrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_chfrk( int matrix_order, char transr, char uplo, char trans, + lapack_int n, lapack_int k, float alpha, + const lapack_complex_float* a, lapack_int lda, + float beta, lapack_complex_float* c ); +lapack_int LAPACKE_zhfrk( int matrix_order, char transr, char uplo, char trans, + lapack_int n, lapack_int k, double alpha, + const lapack_complex_double* a, lapack_int lda, + double beta, lapack_complex_double* c ); + +lapack_int LAPACKE_shgeqz( int matrix_order, char job, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + float* h, lapack_int ldh, float* t, lapack_int ldt, + float* alphar, float* alphai, float* beta, float* q, + lapack_int ldq, float* z, lapack_int ldz ); +lapack_int LAPACKE_dhgeqz( int matrix_order, char job, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + double* h, lapack_int ldh, double* t, lapack_int ldt, + double* alphar, double* alphai, double* beta, + double* q, lapack_int ldq, double* z, + lapack_int ldz ); +lapack_int LAPACKE_chgeqz( int matrix_order, char job, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + lapack_complex_float* h, lapack_int ldh, + lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* alpha, + lapack_complex_float* beta, lapack_complex_float* q, + lapack_int ldq, lapack_complex_float* z, + lapack_int ldz ); +lapack_int LAPACKE_zhgeqz( int matrix_order, char job, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + lapack_complex_double* h, lapack_int ldh, + lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* z, lapack_int ldz ); + +lapack_int LAPACKE_chpcon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, + const lapack_int* ipiv, float anorm, float* rcond ); +lapack_int LAPACKE_zhpcon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, + const lapack_int* ipiv, double anorm, + double* rcond ); + +lapack_int LAPACKE_chpev( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_complex_float* ap, float* w, + lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zhpev( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_complex_double* ap, double* w, + lapack_complex_double* z, lapack_int ldz ); + +lapack_int LAPACKE_chpevd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_complex_float* ap, float* w, + lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zhpevd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_complex_double* ap, double* w, + lapack_complex_double* z, lapack_int ldz ); + +lapack_int LAPACKE_chpevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_complex_float* ap, float vl, + float vu, lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int ldz, lapack_int* ifail ); +lapack_int LAPACKE_zhpevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_complex_double* ap, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_int* ifail ); + +lapack_int LAPACKE_chpgst( int matrix_order, lapack_int itype, char uplo, + lapack_int n, lapack_complex_float* ap, + const lapack_complex_float* bp ); +lapack_int LAPACKE_zhpgst( int matrix_order, lapack_int itype, char uplo, + lapack_int n, lapack_complex_double* ap, + const lapack_complex_double* bp ); + +lapack_int LAPACKE_chpgv( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, lapack_complex_float* ap, + lapack_complex_float* bp, float* w, + lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zhpgv( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, lapack_complex_double* ap, + lapack_complex_double* bp, double* w, + lapack_complex_double* z, lapack_int ldz ); + +lapack_int LAPACKE_chpgvd( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, lapack_complex_float* ap, + lapack_complex_float* bp, float* w, + lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zhpgvd( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, lapack_complex_double* ap, + lapack_complex_double* bp, double* w, + lapack_complex_double* z, lapack_int ldz ); + +lapack_int LAPACKE_chpgvx( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, + lapack_complex_float* ap, lapack_complex_float* bp, + float vl, float vu, lapack_int il, lapack_int iu, + float abstol, lapack_int* m, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_int* ifail ); +lapack_int LAPACKE_zhpgvx( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, + lapack_complex_double* ap, lapack_complex_double* bp, + double vl, double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_int* ifail ); + +lapack_int LAPACKE_chprfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + const lapack_complex_float* afp, + const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_zhprfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* ap, + const lapack_complex_double* afp, + const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_chpsv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* ap, + lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zhpsv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* ap, + lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_chpsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + lapack_complex_float* afp, lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr ); +lapack_int LAPACKE_zhpsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* ap, + lapack_complex_double* afp, lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr ); + +lapack_int LAPACKE_chptrd( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap, float* d, float* e, + lapack_complex_float* tau ); +lapack_int LAPACKE_zhptrd( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap, double* d, double* e, + lapack_complex_double* tau ); + +lapack_int LAPACKE_chptrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap, lapack_int* ipiv ); +lapack_int LAPACKE_zhptrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap, lapack_int* ipiv ); + +lapack_int LAPACKE_chptri( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap, const lapack_int* ipiv ); +lapack_int LAPACKE_zhptri( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap, const lapack_int* ipiv ); + +lapack_int LAPACKE_chptrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zhptrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* ap, + const lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_shsein( int matrix_order, char job, char eigsrc, char initv, + lapack_logical* select, lapack_int n, const float* h, + lapack_int ldh, float* wr, const float* wi, + float* vl, lapack_int ldvl, float* vr, + lapack_int ldvr, lapack_int mm, lapack_int* m, + lapack_int* ifaill, lapack_int* ifailr ); +lapack_int LAPACKE_dhsein( int matrix_order, char job, char eigsrc, char initv, + lapack_logical* select, lapack_int n, + const double* h, lapack_int ldh, double* wr, + const double* wi, double* vl, lapack_int ldvl, + double* vr, lapack_int ldvr, lapack_int mm, + lapack_int* m, lapack_int* ifaill, + lapack_int* ifailr ); +lapack_int LAPACKE_chsein( int matrix_order, char job, char eigsrc, char initv, + const lapack_logical* select, lapack_int n, + const lapack_complex_float* h, lapack_int ldh, + lapack_complex_float* w, lapack_complex_float* vl, + lapack_int ldvl, lapack_complex_float* vr, + lapack_int ldvr, lapack_int mm, lapack_int* m, + lapack_int* ifaill, lapack_int* ifailr ); +lapack_int LAPACKE_zhsein( int matrix_order, char job, char eigsrc, char initv, + const lapack_logical* select, lapack_int n, + const lapack_complex_double* h, lapack_int ldh, + lapack_complex_double* w, lapack_complex_double* vl, + lapack_int ldvl, lapack_complex_double* vr, + lapack_int ldvr, lapack_int mm, lapack_int* m, + lapack_int* ifaill, lapack_int* ifailr ); + +lapack_int LAPACKE_shseqr( int matrix_order, char job, char compz, lapack_int n, + lapack_int ilo, lapack_int ihi, float* h, + lapack_int ldh, float* wr, float* wi, float* z, + lapack_int ldz ); +lapack_int LAPACKE_dhseqr( int matrix_order, char job, char compz, lapack_int n, + lapack_int ilo, lapack_int ihi, double* h, + lapack_int ldh, double* wr, double* wi, double* z, + lapack_int ldz ); +lapack_int LAPACKE_chseqr( int matrix_order, char job, char compz, lapack_int n, + lapack_int ilo, lapack_int ihi, + lapack_complex_float* h, lapack_int ldh, + lapack_complex_float* w, lapack_complex_float* z, + lapack_int ldz ); +lapack_int LAPACKE_zhseqr( int matrix_order, char job, char compz, lapack_int n, + lapack_int ilo, lapack_int ihi, + lapack_complex_double* h, lapack_int ldh, + lapack_complex_double* w, lapack_complex_double* z, + lapack_int ldz ); + +lapack_int LAPACKE_clacgv( lapack_int n, lapack_complex_float* x, + lapack_int incx ); +lapack_int LAPACKE_zlacgv( lapack_int n, lapack_complex_double* x, + lapack_int incx ); + +lapack_int LAPACKE_slacpy( int matrix_order, char uplo, lapack_int m, + lapack_int n, const float* a, lapack_int lda, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dlacpy( int matrix_order, char uplo, lapack_int m, + lapack_int n, const double* a, lapack_int lda, double* b, + lapack_int ldb ); +lapack_int LAPACKE_clacpy( int matrix_order, char uplo, lapack_int m, + lapack_int n, const lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zlacpy( int matrix_order, char uplo, lapack_int m, + lapack_int n, const lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_zlag2c( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + lapack_complex_float* sa, lapack_int ldsa ); + +lapack_int LAPACKE_slag2d( int matrix_order, lapack_int m, lapack_int n, + const float* sa, lapack_int ldsa, double* a, + lapack_int lda ); + +lapack_int LAPACKE_dlag2s( int matrix_order, lapack_int m, lapack_int n, + const double* a, lapack_int lda, float* sa, + lapack_int ldsa ); + +lapack_int LAPACKE_clag2z( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_float* sa, lapack_int ldsa, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_slagge( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const float* d, + float* a, lapack_int lda, lapack_int* iseed ); +lapack_int LAPACKE_dlagge( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const double* d, + double* a, lapack_int lda, lapack_int* iseed ); +lapack_int LAPACKE_clagge( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const float* d, + lapack_complex_float* a, lapack_int lda, + lapack_int* iseed ); +lapack_int LAPACKE_zlagge( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const double* d, + lapack_complex_double* a, lapack_int lda, + lapack_int* iseed ); + +float LAPACKE_slamch( char cmach ); +double LAPACKE_dlamch( char cmach ); + +float LAPACKE_slange( int matrix_order, char norm, lapack_int m, + lapack_int n, const float* a, lapack_int lda ); +double LAPACKE_dlange( int matrix_order, char norm, lapack_int m, + lapack_int n, const double* a, lapack_int lda ); +float LAPACKE_clange( int matrix_order, char norm, lapack_int m, + lapack_int n, const lapack_complex_float* a, + lapack_int lda ); +double LAPACKE_zlange( int matrix_order, char norm, lapack_int m, + lapack_int n, const lapack_complex_double* a, + lapack_int lda ); + +float LAPACKE_clanhe( int matrix_order, char norm, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda ); +double LAPACKE_zlanhe( int matrix_order, char norm, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda ); + +float LAPACKE_slansy( int matrix_order, char norm, char uplo, lapack_int n, + const float* a, lapack_int lda ); +double LAPACKE_dlansy( int matrix_order, char norm, char uplo, lapack_int n, + const double* a, lapack_int lda ); +float LAPACKE_clansy( int matrix_order, char norm, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda ); +double LAPACKE_zlansy( int matrix_order, char norm, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda ); + +float LAPACKE_slantr( int matrix_order, char norm, char uplo, char diag, + lapack_int m, lapack_int n, const float* a, + lapack_int lda ); +double LAPACKE_dlantr( int matrix_order, char norm, char uplo, char diag, + lapack_int m, lapack_int n, const double* a, + lapack_int lda ); +float LAPACKE_clantr( int matrix_order, char norm, char uplo, char diag, + lapack_int m, lapack_int n, const lapack_complex_float* a, + lapack_int lda ); +double LAPACKE_zlantr( int matrix_order, char norm, char uplo, char diag, + lapack_int m, lapack_int n, const lapack_complex_double* a, + lapack_int lda ); + + +lapack_int LAPACKE_slarfb( int matrix_order, char side, char trans, char direct, + char storev, lapack_int m, lapack_int n, + lapack_int k, const float* v, lapack_int ldv, + const float* t, lapack_int ldt, float* c, + lapack_int ldc ); +lapack_int LAPACKE_dlarfb( int matrix_order, char side, char trans, char direct, + char storev, lapack_int m, lapack_int n, + lapack_int k, const double* v, lapack_int ldv, + const double* t, lapack_int ldt, double* c, + lapack_int ldc ); +lapack_int LAPACKE_clarfb( int matrix_order, char side, char trans, char direct, + char storev, lapack_int m, lapack_int n, + lapack_int k, const lapack_complex_float* v, + lapack_int ldv, const lapack_complex_float* t, + lapack_int ldt, lapack_complex_float* c, + lapack_int ldc ); +lapack_int LAPACKE_zlarfb( int matrix_order, char side, char trans, char direct, + char storev, lapack_int m, lapack_int n, + lapack_int k, const lapack_complex_double* v, + lapack_int ldv, const lapack_complex_double* t, + lapack_int ldt, lapack_complex_double* c, + lapack_int ldc ); + +lapack_int LAPACKE_slarfg( lapack_int n, float* alpha, float* x, + lapack_int incx, float* tau ); +lapack_int LAPACKE_dlarfg( lapack_int n, double* alpha, double* x, + lapack_int incx, double* tau ); +lapack_int LAPACKE_clarfg( lapack_int n, lapack_complex_float* alpha, + lapack_complex_float* x, lapack_int incx, + lapack_complex_float* tau ); +lapack_int LAPACKE_zlarfg( lapack_int n, lapack_complex_double* alpha, + lapack_complex_double* x, lapack_int incx, + lapack_complex_double* tau ); + +lapack_int LAPACKE_slarft( int matrix_order, char direct, char storev, + lapack_int n, lapack_int k, const float* v, + lapack_int ldv, const float* tau, float* t, + lapack_int ldt ); +lapack_int LAPACKE_dlarft( int matrix_order, char direct, char storev, + lapack_int n, lapack_int k, const double* v, + lapack_int ldv, const double* tau, double* t, + lapack_int ldt ); +lapack_int LAPACKE_clarft( int matrix_order, char direct, char storev, + lapack_int n, lapack_int k, + const lapack_complex_float* v, lapack_int ldv, + const lapack_complex_float* tau, + lapack_complex_float* t, lapack_int ldt ); +lapack_int LAPACKE_zlarft( int matrix_order, char direct, char storev, + lapack_int n, lapack_int k, + const lapack_complex_double* v, lapack_int ldv, + const lapack_complex_double* tau, + lapack_complex_double* t, lapack_int ldt ); + +lapack_int LAPACKE_slarfx( int matrix_order, char side, lapack_int m, + lapack_int n, const float* v, float tau, float* c, + lapack_int ldc, float* work ); +lapack_int LAPACKE_dlarfx( int matrix_order, char side, lapack_int m, + lapack_int n, const double* v, double tau, double* c, + lapack_int ldc, double* work ); +lapack_int LAPACKE_clarfx( int matrix_order, char side, lapack_int m, + lapack_int n, const lapack_complex_float* v, + lapack_complex_float tau, lapack_complex_float* c, + lapack_int ldc, lapack_complex_float* work ); +lapack_int LAPACKE_zlarfx( int matrix_order, char side, lapack_int m, + lapack_int n, const lapack_complex_double* v, + lapack_complex_double tau, lapack_complex_double* c, + lapack_int ldc, lapack_complex_double* work ); + +lapack_int LAPACKE_slarnv( lapack_int idist, lapack_int* iseed, lapack_int n, + float* x ); +lapack_int LAPACKE_dlarnv( lapack_int idist, lapack_int* iseed, lapack_int n, + double* x ); +lapack_int LAPACKE_clarnv( lapack_int idist, lapack_int* iseed, lapack_int n, + lapack_complex_float* x ); +lapack_int LAPACKE_zlarnv( lapack_int idist, lapack_int* iseed, lapack_int n, + lapack_complex_double* x ); + +lapack_int LAPACKE_slaset( int matrix_order, char uplo, lapack_int m, + lapack_int n, float alpha, float beta, float* a, + lapack_int lda ); +lapack_int LAPACKE_dlaset( int matrix_order, char uplo, lapack_int m, + lapack_int n, double alpha, double beta, double* a, + lapack_int lda ); +lapack_int LAPACKE_claset( int matrix_order, char uplo, lapack_int m, + lapack_int n, lapack_complex_float alpha, + lapack_complex_float beta, lapack_complex_float* a, + lapack_int lda ); +lapack_int LAPACKE_zlaset( int matrix_order, char uplo, lapack_int m, + lapack_int n, lapack_complex_double alpha, + lapack_complex_double beta, lapack_complex_double* a, + lapack_int lda ); + +lapack_int LAPACKE_slasrt( char id, lapack_int n, float* d ); +lapack_int LAPACKE_dlasrt( char id, lapack_int n, double* d ); + +lapack_int LAPACKE_slaswp( int matrix_order, lapack_int n, float* a, + lapack_int lda, lapack_int k1, lapack_int k2, + const lapack_int* ipiv, lapack_int incx ); +lapack_int LAPACKE_dlaswp( int matrix_order, lapack_int n, double* a, + lapack_int lda, lapack_int k1, lapack_int k2, + const lapack_int* ipiv, lapack_int incx ); +lapack_int LAPACKE_claswp( int matrix_order, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int k1, lapack_int k2, const lapack_int* ipiv, + lapack_int incx ); +lapack_int LAPACKE_zlaswp( int matrix_order, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int k1, lapack_int k2, const lapack_int* ipiv, + lapack_int incx ); + +lapack_int LAPACKE_slatms( int matrix_order, lapack_int m, lapack_int n, + char dist, lapack_int* iseed, char sym, float* d, + lapack_int mode, float cond, float dmax, + lapack_int kl, lapack_int ku, char pack, float* a, + lapack_int lda ); +lapack_int LAPACKE_dlatms( int matrix_order, lapack_int m, lapack_int n, + char dist, lapack_int* iseed, char sym, double* d, + lapack_int mode, double cond, double dmax, + lapack_int kl, lapack_int ku, char pack, double* a, + lapack_int lda ); +lapack_int LAPACKE_clatms( int matrix_order, lapack_int m, lapack_int n, + char dist, lapack_int* iseed, char sym, float* d, + lapack_int mode, float cond, float dmax, + lapack_int kl, lapack_int ku, char pack, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_zlatms( int matrix_order, lapack_int m, lapack_int n, + char dist, lapack_int* iseed, char sym, double* d, + lapack_int mode, double cond, double dmax, + lapack_int kl, lapack_int ku, char pack, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_slauum( int matrix_order, char uplo, lapack_int n, float* a, + lapack_int lda ); +lapack_int LAPACKE_dlauum( int matrix_order, char uplo, lapack_int n, double* a, + lapack_int lda ); +lapack_int LAPACKE_clauum( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_zlauum( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_sopgtr( int matrix_order, char uplo, lapack_int n, + const float* ap, const float* tau, float* q, + lapack_int ldq ); +lapack_int LAPACKE_dopgtr( int matrix_order, char uplo, lapack_int n, + const double* ap, const double* tau, double* q, + lapack_int ldq ); + +lapack_int LAPACKE_sopmtr( int matrix_order, char side, char uplo, char trans, + lapack_int m, lapack_int n, const float* ap, + const float* tau, float* c, lapack_int ldc ); +lapack_int LAPACKE_dopmtr( int matrix_order, char side, char uplo, char trans, + lapack_int m, lapack_int n, const double* ap, + const double* tau, double* c, lapack_int ldc ); + +lapack_int LAPACKE_sorgbr( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int k, float* a, lapack_int lda, + const float* tau ); +lapack_int LAPACKE_dorgbr( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int k, double* a, + lapack_int lda, const double* tau ); + +lapack_int LAPACKE_sorghr( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, float* a, lapack_int lda, + const float* tau ); +lapack_int LAPACKE_dorghr( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, double* a, lapack_int lda, + const double* tau ); + +lapack_int LAPACKE_sorglq( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, float* a, lapack_int lda, + const float* tau ); +lapack_int LAPACKE_dorglq( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, double* a, lapack_int lda, + const double* tau ); + +lapack_int LAPACKE_sorgql( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, float* a, lapack_int lda, + const float* tau ); +lapack_int LAPACKE_dorgql( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, double* a, lapack_int lda, + const double* tau ); + +lapack_int LAPACKE_sorgqr( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, float* a, lapack_int lda, + const float* tau ); +lapack_int LAPACKE_dorgqr( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, double* a, lapack_int lda, + const double* tau ); + +lapack_int LAPACKE_sorgrq( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, float* a, lapack_int lda, + const float* tau ); +lapack_int LAPACKE_dorgrq( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, double* a, lapack_int lda, + const double* tau ); + +lapack_int LAPACKE_sorgtr( int matrix_order, char uplo, lapack_int n, float* a, + lapack_int lda, const float* tau ); +lapack_int LAPACKE_dorgtr( int matrix_order, char uplo, lapack_int n, double* a, + lapack_int lda, const double* tau ); + +lapack_int LAPACKE_sormbr( int matrix_order, char vect, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const float* a, lapack_int lda, const float* tau, + float* c, lapack_int ldc ); +lapack_int LAPACKE_dormbr( int matrix_order, char vect, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const double* a, lapack_int lda, const double* tau, + double* c, lapack_int ldc ); + +lapack_int LAPACKE_sormhr( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int ilo, + lapack_int ihi, const float* a, lapack_int lda, + const float* tau, float* c, lapack_int ldc ); +lapack_int LAPACKE_dormhr( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int ilo, + lapack_int ihi, const double* a, lapack_int lda, + const double* tau, double* c, lapack_int ldc ); + +lapack_int LAPACKE_sormlq( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const float* a, lapack_int lda, const float* tau, + float* c, lapack_int ldc ); +lapack_int LAPACKE_dormlq( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const double* a, lapack_int lda, const double* tau, + double* c, lapack_int ldc ); + +lapack_int LAPACKE_sormql( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const float* a, lapack_int lda, const float* tau, + float* c, lapack_int ldc ); +lapack_int LAPACKE_dormql( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const double* a, lapack_int lda, const double* tau, + double* c, lapack_int ldc ); + +lapack_int LAPACKE_sormqr( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const float* a, lapack_int lda, const float* tau, + float* c, lapack_int ldc ); +lapack_int LAPACKE_dormqr( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const double* a, lapack_int lda, const double* tau, + double* c, lapack_int ldc ); + +lapack_int LAPACKE_sormrq( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const float* a, lapack_int lda, const float* tau, + float* c, lapack_int ldc ); +lapack_int LAPACKE_dormrq( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const double* a, lapack_int lda, const double* tau, + double* c, lapack_int ldc ); + +lapack_int LAPACKE_sormrz( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, const float* a, lapack_int lda, + const float* tau, float* c, lapack_int ldc ); +lapack_int LAPACKE_dormrz( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, const double* a, lapack_int lda, + const double* tau, double* c, lapack_int ldc ); + +lapack_int LAPACKE_sormtr( int matrix_order, char side, char uplo, char trans, + lapack_int m, lapack_int n, const float* a, + lapack_int lda, const float* tau, float* c, + lapack_int ldc ); +lapack_int LAPACKE_dormtr( int matrix_order, char side, char uplo, char trans, + lapack_int m, lapack_int n, const double* a, + lapack_int lda, const double* tau, double* c, + lapack_int ldc ); + +lapack_int LAPACKE_spbcon( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const float* ab, lapack_int ldab, + float anorm, float* rcond ); +lapack_int LAPACKE_dpbcon( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const double* ab, lapack_int ldab, + double anorm, double* rcond ); +lapack_int LAPACKE_cpbcon( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const lapack_complex_float* ab, + lapack_int ldab, float anorm, float* rcond ); +lapack_int LAPACKE_zpbcon( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const lapack_complex_double* ab, + lapack_int ldab, double anorm, double* rcond ); + +lapack_int LAPACKE_spbequ( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const float* ab, lapack_int ldab, + float* s, float* scond, float* amax ); +lapack_int LAPACKE_dpbequ( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const double* ab, lapack_int ldab, + double* s, double* scond, double* amax ); +lapack_int LAPACKE_cpbequ( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const lapack_complex_float* ab, + lapack_int ldab, float* s, float* scond, + float* amax ); +lapack_int LAPACKE_zpbequ( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const lapack_complex_double* ab, + lapack_int ldab, double* s, double* scond, + double* amax ); + +lapack_int LAPACKE_spbrfs( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, const float* ab, + lapack_int ldab, const float* afb, lapack_int ldafb, + const float* b, lapack_int ldb, float* x, + lapack_int ldx, float* ferr, float* berr ); +lapack_int LAPACKE_dpbrfs( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, const double* ab, + lapack_int ldab, const double* afb, lapack_int ldafb, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* ferr, double* berr ); +lapack_int LAPACKE_cpbrfs( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + const lapack_complex_float* ab, lapack_int ldab, + const lapack_complex_float* afb, lapack_int ldafb, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_zpbrfs( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + const lapack_complex_double* ab, lapack_int ldab, + const lapack_complex_double* afb, lapack_int ldafb, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_spbstf( int matrix_order, char uplo, lapack_int n, + lapack_int kb, float* bb, lapack_int ldbb ); +lapack_int LAPACKE_dpbstf( int matrix_order, char uplo, lapack_int n, + lapack_int kb, double* bb, lapack_int ldbb ); +lapack_int LAPACKE_cpbstf( int matrix_order, char uplo, lapack_int n, + lapack_int kb, lapack_complex_float* bb, + lapack_int ldbb ); +lapack_int LAPACKE_zpbstf( int matrix_order, char uplo, lapack_int n, + lapack_int kb, lapack_complex_double* bb, + lapack_int ldbb ); + +lapack_int LAPACKE_spbsv( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, float* ab, + lapack_int ldab, float* b, lapack_int ldb ); +lapack_int LAPACKE_dpbsv( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, double* ab, + lapack_int ldab, double* b, lapack_int ldb ); +lapack_int LAPACKE_cpbsv( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zpbsv( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_spbsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, float* ab, + lapack_int ldab, float* afb, lapack_int ldafb, + char* equed, float* s, float* b, lapack_int ldb, + float* x, lapack_int ldx, float* rcond, float* ferr, + float* berr ); +lapack_int LAPACKE_dpbsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, double* ab, + lapack_int ldab, double* afb, lapack_int ldafb, + char* equed, double* s, double* b, lapack_int ldb, + double* x, lapack_int ldx, double* rcond, + double* ferr, double* berr ); +lapack_int LAPACKE_cpbsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* afb, lapack_int ldafb, + char* equed, float* s, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr ); +lapack_int LAPACKE_zpbsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* afb, lapack_int ldafb, + char* equed, double* s, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* rcond, double* ferr, + double* berr ); + +lapack_int LAPACKE_spbtrf( int matrix_order, char uplo, lapack_int n, + lapack_int kd, float* ab, lapack_int ldab ); +lapack_int LAPACKE_dpbtrf( int matrix_order, char uplo, lapack_int n, + lapack_int kd, double* ab, lapack_int ldab ); +lapack_int LAPACKE_cpbtrf( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_complex_float* ab, + lapack_int ldab ); +lapack_int LAPACKE_zpbtrf( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_complex_double* ab, + lapack_int ldab ); + +lapack_int LAPACKE_spbtrs( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, const float* ab, + lapack_int ldab, float* b, lapack_int ldb ); +lapack_int LAPACKE_dpbtrs( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, const double* ab, + lapack_int ldab, double* b, lapack_int ldb ); +lapack_int LAPACKE_cpbtrs( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + const lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zpbtrs( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + const lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_spftrf( int matrix_order, char transr, char uplo, + lapack_int n, float* a ); +lapack_int LAPACKE_dpftrf( int matrix_order, char transr, char uplo, + lapack_int n, double* a ); +lapack_int LAPACKE_cpftrf( int matrix_order, char transr, char uplo, + lapack_int n, lapack_complex_float* a ); +lapack_int LAPACKE_zpftrf( int matrix_order, char transr, char uplo, + lapack_int n, lapack_complex_double* a ); + +lapack_int LAPACKE_spftri( int matrix_order, char transr, char uplo, + lapack_int n, float* a ); +lapack_int LAPACKE_dpftri( int matrix_order, char transr, char uplo, + lapack_int n, double* a ); +lapack_int LAPACKE_cpftri( int matrix_order, char transr, char uplo, + lapack_int n, lapack_complex_float* a ); +lapack_int LAPACKE_zpftri( int matrix_order, char transr, char uplo, + lapack_int n, lapack_complex_double* a ); + +lapack_int LAPACKE_spftrs( int matrix_order, char transr, char uplo, + lapack_int n, lapack_int nrhs, const float* a, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dpftrs( int matrix_order, char transr, char uplo, + lapack_int n, lapack_int nrhs, const double* a, + double* b, lapack_int ldb ); +lapack_int LAPACKE_cpftrs( int matrix_order, char transr, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zpftrs( int matrix_order, char transr, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_spocon( int matrix_order, char uplo, lapack_int n, + const float* a, lapack_int lda, float anorm, + float* rcond ); +lapack_int LAPACKE_dpocon( int matrix_order, char uplo, lapack_int n, + const double* a, lapack_int lda, double anorm, + double* rcond ); +lapack_int LAPACKE_cpocon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float anorm, float* rcond ); +lapack_int LAPACKE_zpocon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double anorm, double* rcond ); + +lapack_int LAPACKE_spoequ( int matrix_order, lapack_int n, const float* a, + lapack_int lda, float* s, float* scond, + float* amax ); +lapack_int LAPACKE_dpoequ( int matrix_order, lapack_int n, const double* a, + lapack_int lda, double* s, double* scond, + double* amax ); +lapack_int LAPACKE_cpoequ( int matrix_order, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* s, float* scond, float* amax ); +lapack_int LAPACKE_zpoequ( int matrix_order, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* s, double* scond, double* amax ); + +lapack_int LAPACKE_spoequb( int matrix_order, lapack_int n, const float* a, + lapack_int lda, float* s, float* scond, + float* amax ); +lapack_int LAPACKE_dpoequb( int matrix_order, lapack_int n, const double* a, + lapack_int lda, double* s, double* scond, + double* amax ); +lapack_int LAPACKE_cpoequb( int matrix_order, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* s, float* scond, float* amax ); +lapack_int LAPACKE_zpoequb( int matrix_order, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* s, double* scond, double* amax ); + +lapack_int LAPACKE_sporfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const float* af, lapack_int ldaf, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr ); +lapack_int LAPACKE_dporfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* a, lapack_int lda, + const double* af, lapack_int ldaf, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* ferr, double* berr ); +lapack_int LAPACKE_cporfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* af, + lapack_int ldaf, const lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* ferr, float* berr ); +lapack_int LAPACKE_zporfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* af, + lapack_int ldaf, const lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* ferr, double* berr ); + +lapack_int LAPACKE_sporfsx( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, const float* a, + lapack_int lda, const float* af, lapack_int ldaf, + const float* s, const float* b, lapack_int ldb, + float* x, lapack_int ldx, float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_dporfsx( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, const double* a, + lapack_int lda, const double* af, lapack_int ldaf, + const double* s, const double* b, lapack_int ldb, + double* x, lapack_int ldx, double* rcond, + double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params ); +lapack_int LAPACKE_cporfsx( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* af, lapack_int ldaf, + const float* s, const lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_zporfsx( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* af, lapack_int ldaf, + const double* s, const lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params ); + +lapack_int LAPACKE_sposv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dposv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cposv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zposv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb ); +lapack_int LAPACKE_dsposv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + double* b, lapack_int ldb, double* x, lapack_int ldx, + lapack_int* iter ); +lapack_int LAPACKE_zcposv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, lapack_int* iter ); + +lapack_int LAPACKE_sposvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, float* af, + lapack_int ldaf, char* equed, float* s, float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr ); +lapack_int LAPACKE_dposvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + double* af, lapack_int ldaf, char* equed, double* s, + double* b, lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr ); +lapack_int LAPACKE_cposvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* af, + lapack_int ldaf, char* equed, float* s, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr ); +lapack_int LAPACKE_zposvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* af, + lapack_int ldaf, char* equed, double* s, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr ); + +lapack_int LAPACKE_sposvxx( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* af, lapack_int ldaf, + char* equed, float* s, float* b, lapack_int ldb, + float* x, lapack_int ldx, float* rcond, + float* rpvgrw, float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params ); +lapack_int LAPACKE_dposvxx( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* af, lapack_int ldaf, + char* equed, double* s, double* b, lapack_int ldb, + double* x, lapack_int ldx, double* rcond, + double* rpvgrw, double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params ); +lapack_int LAPACKE_cposvxx( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + char* equed, float* s, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* rpvgrw, + float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params ); +lapack_int LAPACKE_zposvxx( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + char* equed, double* s, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* rcond, double* rpvgrw, + double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params ); + +lapack_int LAPACKE_spotrf( int matrix_order, char uplo, lapack_int n, float* a, + lapack_int lda ); +lapack_int LAPACKE_dpotrf( int matrix_order, char uplo, lapack_int n, double* a, + lapack_int lda ); +lapack_int LAPACKE_cpotrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_zpotrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_spotri( int matrix_order, char uplo, lapack_int n, float* a, + lapack_int lda ); +lapack_int LAPACKE_dpotri( int matrix_order, char uplo, lapack_int n, double* a, + lapack_int lda ); +lapack_int LAPACKE_cpotri( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_zpotri( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_spotrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dpotrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* a, lapack_int lda, + double* b, lapack_int ldb ); +lapack_int LAPACKE_cpotrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zpotrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_sppcon( int matrix_order, char uplo, lapack_int n, + const float* ap, float anorm, float* rcond ); +lapack_int LAPACKE_dppcon( int matrix_order, char uplo, lapack_int n, + const double* ap, double anorm, double* rcond ); +lapack_int LAPACKE_cppcon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, float anorm, + float* rcond ); +lapack_int LAPACKE_zppcon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, double anorm, + double* rcond ); + +lapack_int LAPACKE_sppequ( int matrix_order, char uplo, lapack_int n, + const float* ap, float* s, float* scond, + float* amax ); +lapack_int LAPACKE_dppequ( int matrix_order, char uplo, lapack_int n, + const double* ap, double* s, double* scond, + double* amax ); +lapack_int LAPACKE_cppequ( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, float* s, + float* scond, float* amax ); +lapack_int LAPACKE_zppequ( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, double* s, + double* scond, double* amax ); + +lapack_int LAPACKE_spprfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* ap, const float* afp, + const float* b, lapack_int ldb, float* x, + lapack_int ldx, float* ferr, float* berr ); +lapack_int LAPACKE_dpprfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* ap, const double* afp, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* ferr, double* berr ); +lapack_int LAPACKE_cpprfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + const lapack_complex_float* afp, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_zpprfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* ap, + const lapack_complex_double* afp, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_sppsv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, float* ap, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dppsv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, double* ap, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cppsv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* ap, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zppsv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* ap, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_sppsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, float* ap, float* afp, char* equed, + float* s, float* b, lapack_int ldb, float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr ); +lapack_int LAPACKE_dppsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, double* ap, double* afp, + char* equed, double* s, double* b, lapack_int ldb, + double* x, lapack_int ldx, double* rcond, + double* ferr, double* berr ); +lapack_int LAPACKE_cppsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* ap, + lapack_complex_float* afp, char* equed, float* s, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr ); +lapack_int LAPACKE_zppsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* ap, + lapack_complex_double* afp, char* equed, double* s, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr ); + +lapack_int LAPACKE_spptrf( int matrix_order, char uplo, lapack_int n, + float* ap ); +lapack_int LAPACKE_dpptrf( int matrix_order, char uplo, lapack_int n, + double* ap ); +lapack_int LAPACKE_cpptrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap ); +lapack_int LAPACKE_zpptrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap ); + +lapack_int LAPACKE_spptri( int matrix_order, char uplo, lapack_int n, + float* ap ); +lapack_int LAPACKE_dpptri( int matrix_order, char uplo, lapack_int n, + double* ap ); +lapack_int LAPACKE_cpptri( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap ); +lapack_int LAPACKE_zpptri( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap ); + +lapack_int LAPACKE_spptrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* ap, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dpptrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* ap, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cpptrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zpptrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* ap, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_spstrf( int matrix_order, char uplo, lapack_int n, float* a, + lapack_int lda, lapack_int* piv, lapack_int* rank, + float tol ); +lapack_int LAPACKE_dpstrf( int matrix_order, char uplo, lapack_int n, double* a, + lapack_int lda, lapack_int* piv, lapack_int* rank, + double tol ); +lapack_int LAPACKE_cpstrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* piv, lapack_int* rank, float tol ); +lapack_int LAPACKE_zpstrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* piv, lapack_int* rank, double tol ); + +lapack_int LAPACKE_sptcon( lapack_int n, const float* d, const float* e, + float anorm, float* rcond ); +lapack_int LAPACKE_dptcon( lapack_int n, const double* d, const double* e, + double anorm, double* rcond ); +lapack_int LAPACKE_cptcon( lapack_int n, const float* d, + const lapack_complex_float* e, float anorm, + float* rcond ); +lapack_int LAPACKE_zptcon( lapack_int n, const double* d, + const lapack_complex_double* e, double anorm, + double* rcond ); + +lapack_int LAPACKE_spteqr( int matrix_order, char compz, lapack_int n, float* d, + float* e, float* z, lapack_int ldz ); +lapack_int LAPACKE_dpteqr( int matrix_order, char compz, lapack_int n, + double* d, double* e, double* z, lapack_int ldz ); +lapack_int LAPACKE_cpteqr( int matrix_order, char compz, lapack_int n, float* d, + float* e, lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zpteqr( int matrix_order, char compz, lapack_int n, + double* d, double* e, lapack_complex_double* z, + lapack_int ldz ); + +lapack_int LAPACKE_sptrfs( int matrix_order, lapack_int n, lapack_int nrhs, + const float* d, const float* e, const float* df, + const float* ef, const float* b, lapack_int ldb, + float* x, lapack_int ldx, float* ferr, float* berr ); +lapack_int LAPACKE_dptrfs( int matrix_order, lapack_int n, lapack_int nrhs, + const double* d, const double* e, const double* df, + const double* ef, const double* b, lapack_int ldb, + double* x, lapack_int ldx, double* ferr, + double* berr ); +lapack_int LAPACKE_cptrfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* d, + const lapack_complex_float* e, const float* df, + const lapack_complex_float* ef, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_zptrfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* d, + const lapack_complex_double* e, const double* df, + const lapack_complex_double* ef, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_sptsv( int matrix_order, lapack_int n, lapack_int nrhs, + float* d, float* e, float* b, lapack_int ldb ); +lapack_int LAPACKE_dptsv( int matrix_order, lapack_int n, lapack_int nrhs, + double* d, double* e, double* b, lapack_int ldb ); +lapack_int LAPACKE_cptsv( int matrix_order, lapack_int n, lapack_int nrhs, + float* d, lapack_complex_float* e, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zptsv( int matrix_order, lapack_int n, lapack_int nrhs, + double* d, lapack_complex_double* e, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_sptsvx( int matrix_order, char fact, lapack_int n, + lapack_int nrhs, const float* d, const float* e, + float* df, float* ef, const float* b, lapack_int ldb, + float* x, lapack_int ldx, float* rcond, float* ferr, + float* berr ); +lapack_int LAPACKE_dptsvx( int matrix_order, char fact, lapack_int n, + lapack_int nrhs, const double* d, const double* e, + double* df, double* ef, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr ); +lapack_int LAPACKE_cptsvx( int matrix_order, char fact, lapack_int n, + lapack_int nrhs, const float* d, + const lapack_complex_float* e, float* df, + lapack_complex_float* ef, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr ); +lapack_int LAPACKE_zptsvx( int matrix_order, char fact, lapack_int n, + lapack_int nrhs, const double* d, + const lapack_complex_double* e, double* df, + lapack_complex_double* ef, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr ); + +lapack_int LAPACKE_spttrf( lapack_int n, float* d, float* e ); +lapack_int LAPACKE_dpttrf( lapack_int n, double* d, double* e ); +lapack_int LAPACKE_cpttrf( lapack_int n, float* d, lapack_complex_float* e ); +lapack_int LAPACKE_zpttrf( lapack_int n, double* d, lapack_complex_double* e ); + +lapack_int LAPACKE_spttrs( int matrix_order, lapack_int n, lapack_int nrhs, + const float* d, const float* e, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dpttrs( int matrix_order, lapack_int n, lapack_int nrhs, + const double* d, const double* e, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cpttrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* d, + const lapack_complex_float* e, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zpttrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* d, + const lapack_complex_double* e, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_ssbev( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int kd, float* ab, lapack_int ldab, float* w, + float* z, lapack_int ldz ); +lapack_int LAPACKE_dsbev( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int kd, double* ab, lapack_int ldab, double* w, + double* z, lapack_int ldz ); + +lapack_int LAPACKE_ssbevd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int kd, float* ab, lapack_int ldab, float* w, + float* z, lapack_int ldz ); +lapack_int LAPACKE_dsbevd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int kd, double* ab, lapack_int ldab, + double* w, double* z, lapack_int ldz ); + +lapack_int LAPACKE_ssbevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_int kd, float* ab, + lapack_int ldab, float* q, lapack_int ldq, float vl, + float vu, lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, lapack_int ldz, + lapack_int* ifail ); +lapack_int LAPACKE_dsbevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_int kd, double* ab, + lapack_int ldab, double* q, lapack_int ldq, + double vl, double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, double* z, + lapack_int ldz, lapack_int* ifail ); + +lapack_int LAPACKE_ssbgst( int matrix_order, char vect, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, float* ab, + lapack_int ldab, const float* bb, lapack_int ldbb, + float* x, lapack_int ldx ); +lapack_int LAPACKE_dsbgst( int matrix_order, char vect, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, double* ab, + lapack_int ldab, const double* bb, lapack_int ldbb, + double* x, lapack_int ldx ); + +lapack_int LAPACKE_ssbgv( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, float* ab, + lapack_int ldab, float* bb, lapack_int ldbb, float* w, + float* z, lapack_int ldz ); +lapack_int LAPACKE_dsbgv( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, double* ab, + lapack_int ldab, double* bb, lapack_int ldbb, + double* w, double* z, lapack_int ldz ); + +lapack_int LAPACKE_ssbgvd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, float* ab, + lapack_int ldab, float* bb, lapack_int ldbb, + float* w, float* z, lapack_int ldz ); +lapack_int LAPACKE_dsbgvd( int matrix_order, char jobz, char uplo, lapack_int n, + lapack_int ka, lapack_int kb, double* ab, + lapack_int ldab, double* bb, lapack_int ldbb, + double* w, double* z, lapack_int ldz ); + +lapack_int LAPACKE_ssbgvx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + float* ab, lapack_int ldab, float* bb, + lapack_int ldbb, float* q, lapack_int ldq, float vl, + float vu, lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, lapack_int ldz, + lapack_int* ifail ); +lapack_int LAPACKE_dsbgvx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + double* ab, lapack_int ldab, double* bb, + lapack_int ldbb, double* q, lapack_int ldq, + double vl, double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, double* z, + lapack_int ldz, lapack_int* ifail ); + +lapack_int LAPACKE_ssbtrd( int matrix_order, char vect, char uplo, lapack_int n, + lapack_int kd, float* ab, lapack_int ldab, float* d, + float* e, float* q, lapack_int ldq ); +lapack_int LAPACKE_dsbtrd( int matrix_order, char vect, char uplo, lapack_int n, + lapack_int kd, double* ab, lapack_int ldab, + double* d, double* e, double* q, lapack_int ldq ); + +lapack_int LAPACKE_ssfrk( int matrix_order, char transr, char uplo, char trans, + lapack_int n, lapack_int k, float alpha, + const float* a, lapack_int lda, float beta, + float* c ); +lapack_int LAPACKE_dsfrk( int matrix_order, char transr, char uplo, char trans, + lapack_int n, lapack_int k, double alpha, + const double* a, lapack_int lda, double beta, + double* c ); + +lapack_int LAPACKE_sspcon( int matrix_order, char uplo, lapack_int n, + const float* ap, const lapack_int* ipiv, float anorm, + float* rcond ); +lapack_int LAPACKE_dspcon( int matrix_order, char uplo, lapack_int n, + const double* ap, const lapack_int* ipiv, + double anorm, double* rcond ); +lapack_int LAPACKE_cspcon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, + const lapack_int* ipiv, float anorm, float* rcond ); +lapack_int LAPACKE_zspcon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, + const lapack_int* ipiv, double anorm, + double* rcond ); + +lapack_int LAPACKE_sspev( int matrix_order, char jobz, char uplo, lapack_int n, + float* ap, float* w, float* z, lapack_int ldz ); +lapack_int LAPACKE_dspev( int matrix_order, char jobz, char uplo, lapack_int n, + double* ap, double* w, double* z, lapack_int ldz ); + +lapack_int LAPACKE_sspevd( int matrix_order, char jobz, char uplo, lapack_int n, + float* ap, float* w, float* z, lapack_int ldz ); +lapack_int LAPACKE_dspevd( int matrix_order, char jobz, char uplo, lapack_int n, + double* ap, double* w, double* z, lapack_int ldz ); + +lapack_int LAPACKE_sspevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, float* ap, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, lapack_int ldz, + lapack_int* ifail ); +lapack_int LAPACKE_dspevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, double* ap, double vl, double vu, + lapack_int il, lapack_int iu, double abstol, + lapack_int* m, double* w, double* z, lapack_int ldz, + lapack_int* ifail ); + +lapack_int LAPACKE_sspgst( int matrix_order, lapack_int itype, char uplo, + lapack_int n, float* ap, const float* bp ); +lapack_int LAPACKE_dspgst( int matrix_order, lapack_int itype, char uplo, + lapack_int n, double* ap, const double* bp ); + +lapack_int LAPACKE_sspgv( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, float* ap, float* bp, + float* w, float* z, lapack_int ldz ); +lapack_int LAPACKE_dspgv( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, double* ap, double* bp, + double* w, double* z, lapack_int ldz ); + +lapack_int LAPACKE_sspgvd( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, float* ap, float* bp, + float* w, float* z, lapack_int ldz ); +lapack_int LAPACKE_dspgvd( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, double* ap, double* bp, + double* w, double* z, lapack_int ldz ); + +lapack_int LAPACKE_sspgvx( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, float* ap, + float* bp, float vl, float vu, lapack_int il, + lapack_int iu, float abstol, lapack_int* m, float* w, + float* z, lapack_int ldz, lapack_int* ifail ); +lapack_int LAPACKE_dspgvx( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, double* ap, + double* bp, double vl, double vu, lapack_int il, + lapack_int iu, double abstol, lapack_int* m, + double* w, double* z, lapack_int ldz, + lapack_int* ifail ); + +lapack_int LAPACKE_ssprfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* ap, const float* afp, + const lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr ); +lapack_int LAPACKE_dsprfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* ap, const double* afp, + const lapack_int* ipiv, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* ferr, double* berr ); +lapack_int LAPACKE_csprfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + const lapack_complex_float* afp, + const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_zsprfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* ap, + const lapack_complex_double* afp, + const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_sspsv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, float* ap, lapack_int* ipiv, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dspsv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, double* ap, lapack_int* ipiv, + double* b, lapack_int ldb ); +lapack_int LAPACKE_cspsv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* ap, + lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zspsv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* ap, + lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_sspsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const float* ap, float* afp, + lapack_int* ipiv, const float* b, lapack_int ldb, + float* x, lapack_int ldx, float* rcond, float* ferr, + float* berr ); +lapack_int LAPACKE_dspsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const double* ap, double* afp, + lapack_int* ipiv, const double* b, lapack_int ldb, + double* x, lapack_int ldx, double* rcond, + double* ferr, double* berr ); +lapack_int LAPACKE_cspsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + lapack_complex_float* afp, lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr ); +lapack_int LAPACKE_zspsvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* ap, + lapack_complex_double* afp, lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr ); + +lapack_int LAPACKE_ssptrd( int matrix_order, char uplo, lapack_int n, float* ap, + float* d, float* e, float* tau ); +lapack_int LAPACKE_dsptrd( int matrix_order, char uplo, lapack_int n, + double* ap, double* d, double* e, double* tau ); + +lapack_int LAPACKE_ssptrf( int matrix_order, char uplo, lapack_int n, float* ap, + lapack_int* ipiv ); +lapack_int LAPACKE_dsptrf( int matrix_order, char uplo, lapack_int n, + double* ap, lapack_int* ipiv ); +lapack_int LAPACKE_csptrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap, lapack_int* ipiv ); +lapack_int LAPACKE_zsptrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap, lapack_int* ipiv ); + +lapack_int LAPACKE_ssptri( int matrix_order, char uplo, lapack_int n, float* ap, + const lapack_int* ipiv ); +lapack_int LAPACKE_dsptri( int matrix_order, char uplo, lapack_int n, + double* ap, const lapack_int* ipiv ); +lapack_int LAPACKE_csptri( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap, const lapack_int* ipiv ); +lapack_int LAPACKE_zsptri( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap, const lapack_int* ipiv ); + +lapack_int LAPACKE_ssptrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* ap, + const lapack_int* ipiv, float* b, lapack_int ldb ); +lapack_int LAPACKE_dsptrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* ap, + const lapack_int* ipiv, double* b, lapack_int ldb ); +lapack_int LAPACKE_csptrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zsptrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* ap, + const lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_sstebz( char range, char order, lapack_int n, float vl, + float vu, lapack_int il, lapack_int iu, float abstol, + const float* d, const float* e, lapack_int* m, + lapack_int* nsplit, float* w, lapack_int* iblock, + lapack_int* isplit ); +lapack_int LAPACKE_dstebz( char range, char order, lapack_int n, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, const double* d, const double* e, + lapack_int* m, lapack_int* nsplit, double* w, + lapack_int* iblock, lapack_int* isplit ); + +lapack_int LAPACKE_sstedc( int matrix_order, char compz, lapack_int n, float* d, + float* e, float* z, lapack_int ldz ); +lapack_int LAPACKE_dstedc( int matrix_order, char compz, lapack_int n, + double* d, double* e, double* z, lapack_int ldz ); +lapack_int LAPACKE_cstedc( int matrix_order, char compz, lapack_int n, float* d, + float* e, lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zstedc( int matrix_order, char compz, lapack_int n, + double* d, double* e, lapack_complex_double* z, + lapack_int ldz ); + +lapack_int LAPACKE_sstegr( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, lapack_int ldz, + lapack_int* isuppz ); +lapack_int LAPACKE_dstegr( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, double* z, + lapack_int ldz, lapack_int* isuppz ); +lapack_int LAPACKE_cstegr( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int ldz, lapack_int* isuppz ); +lapack_int LAPACKE_zstegr( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_int* isuppz ); + +lapack_int LAPACKE_sstein( int matrix_order, lapack_int n, const float* d, + const float* e, lapack_int m, const float* w, + const lapack_int* iblock, const lapack_int* isplit, + float* z, lapack_int ldz, lapack_int* ifailv ); +lapack_int LAPACKE_dstein( int matrix_order, lapack_int n, const double* d, + const double* e, lapack_int m, const double* w, + const lapack_int* iblock, const lapack_int* isplit, + double* z, lapack_int ldz, lapack_int* ifailv ); +lapack_int LAPACKE_cstein( int matrix_order, lapack_int n, const float* d, + const float* e, lapack_int m, const float* w, + const lapack_int* iblock, const lapack_int* isplit, + lapack_complex_float* z, lapack_int ldz, + lapack_int* ifailv ); +lapack_int LAPACKE_zstein( int matrix_order, lapack_int n, const double* d, + const double* e, lapack_int m, const double* w, + const lapack_int* iblock, const lapack_int* isplit, + lapack_complex_double* z, lapack_int ldz, + lapack_int* ifailv ); + +lapack_int LAPACKE_sstemr( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, float vu, + lapack_int il, lapack_int iu, lapack_int* m, + float* w, float* z, lapack_int ldz, lapack_int nzc, + lapack_int* isuppz, lapack_logical* tryrac ); +lapack_int LAPACKE_dstemr( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + lapack_int* m, double* w, double* z, lapack_int ldz, + lapack_int nzc, lapack_int* isuppz, + lapack_logical* tryrac ); +lapack_int LAPACKE_cstemr( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, float vu, + lapack_int il, lapack_int iu, lapack_int* m, + float* w, lapack_complex_float* z, lapack_int ldz, + lapack_int nzc, lapack_int* isuppz, + lapack_logical* tryrac ); +lapack_int LAPACKE_zstemr( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + lapack_int* m, double* w, lapack_complex_double* z, + lapack_int ldz, lapack_int nzc, lapack_int* isuppz, + lapack_logical* tryrac ); + +lapack_int LAPACKE_ssteqr( int matrix_order, char compz, lapack_int n, float* d, + float* e, float* z, lapack_int ldz ); +lapack_int LAPACKE_dsteqr( int matrix_order, char compz, lapack_int n, + double* d, double* e, double* z, lapack_int ldz ); +lapack_int LAPACKE_csteqr( int matrix_order, char compz, lapack_int n, float* d, + float* e, lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zsteqr( int matrix_order, char compz, lapack_int n, + double* d, double* e, lapack_complex_double* z, + lapack_int ldz ); + +lapack_int LAPACKE_ssterf( lapack_int n, float* d, float* e ); +lapack_int LAPACKE_dsterf( lapack_int n, double* d, double* e ); + +lapack_int LAPACKE_sstev( int matrix_order, char jobz, lapack_int n, float* d, + float* e, float* z, lapack_int ldz ); +lapack_int LAPACKE_dstev( int matrix_order, char jobz, lapack_int n, double* d, + double* e, double* z, lapack_int ldz ); + +lapack_int LAPACKE_sstevd( int matrix_order, char jobz, lapack_int n, float* d, + float* e, float* z, lapack_int ldz ); +lapack_int LAPACKE_dstevd( int matrix_order, char jobz, lapack_int n, double* d, + double* e, double* z, lapack_int ldz ); + +lapack_int LAPACKE_sstevr( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, lapack_int ldz, + lapack_int* isuppz ); +lapack_int LAPACKE_dstevr( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, double* z, + lapack_int ldz, lapack_int* isuppz ); + +lapack_int LAPACKE_sstevx( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, lapack_int ldz, + lapack_int* ifail ); +lapack_int LAPACKE_dstevx( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, double* z, + lapack_int ldz, lapack_int* ifail ); + +lapack_int LAPACKE_ssycon( int matrix_order, char uplo, lapack_int n, + const float* a, lapack_int lda, + const lapack_int* ipiv, float anorm, float* rcond ); +lapack_int LAPACKE_dsycon( int matrix_order, char uplo, lapack_int n, + const double* a, lapack_int lda, + const lapack_int* ipiv, double anorm, + double* rcond ); +lapack_int LAPACKE_csycon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, float anorm, float* rcond ); +lapack_int LAPACKE_zsycon( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, double anorm, + double* rcond ); + +lapack_int LAPACKE_ssyequb( int matrix_order, char uplo, lapack_int n, + const float* a, lapack_int lda, float* s, + float* scond, float* amax ); +lapack_int LAPACKE_dsyequb( int matrix_order, char uplo, lapack_int n, + const double* a, lapack_int lda, double* s, + double* scond, double* amax ); +lapack_int LAPACKE_csyequb( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* s, float* scond, float* amax ); +lapack_int LAPACKE_zsyequb( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* s, double* scond, double* amax ); + +lapack_int LAPACKE_ssyev( int matrix_order, char jobz, char uplo, lapack_int n, + float* a, lapack_int lda, float* w ); +lapack_int LAPACKE_dsyev( int matrix_order, char jobz, char uplo, lapack_int n, + double* a, lapack_int lda, double* w ); + +lapack_int LAPACKE_ssyevd( int matrix_order, char jobz, char uplo, lapack_int n, + float* a, lapack_int lda, float* w ); +lapack_int LAPACKE_dsyevd( int matrix_order, char jobz, char uplo, lapack_int n, + double* a, lapack_int lda, double* w ); + +lapack_int LAPACKE_ssyevr( int matrix_order, char jobz, char range, char uplo, + lapack_int n, float* a, lapack_int lda, float vl, + float vu, lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, lapack_int ldz, + lapack_int* isuppz ); +lapack_int LAPACKE_dsyevr( int matrix_order, char jobz, char range, char uplo, + lapack_int n, double* a, lapack_int lda, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, double* z, + lapack_int ldz, lapack_int* isuppz ); + +lapack_int LAPACKE_ssyevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, float* a, lapack_int lda, float vl, + float vu, lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, lapack_int ldz, + lapack_int* ifail ); +lapack_int LAPACKE_dsyevx( int matrix_order, char jobz, char range, char uplo, + lapack_int n, double* a, lapack_int lda, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, double* z, + lapack_int ldz, lapack_int* ifail ); + +lapack_int LAPACKE_ssygst( int matrix_order, lapack_int itype, char uplo, + lapack_int n, float* a, lapack_int lda, + const float* b, lapack_int ldb ); +lapack_int LAPACKE_dsygst( int matrix_order, lapack_int itype, char uplo, + lapack_int n, double* a, lapack_int lda, + const double* b, lapack_int ldb ); + +lapack_int LAPACKE_ssygv( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, float* a, lapack_int lda, + float* b, lapack_int ldb, float* w ); +lapack_int LAPACKE_dsygv( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, double* a, lapack_int lda, + double* b, lapack_int ldb, double* w ); + +lapack_int LAPACKE_ssygvd( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, float* a, lapack_int lda, + float* b, lapack_int ldb, float* w ); +lapack_int LAPACKE_dsygvd( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, double* a, lapack_int lda, + double* b, lapack_int ldb, double* w ); + +lapack_int LAPACKE_ssygvx( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, float vl, + float vu, lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, lapack_int ldz, + lapack_int* ifail ); +lapack_int LAPACKE_dsygvx( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, double* z, + lapack_int ldz, lapack_int* ifail ); + +lapack_int LAPACKE_ssyrfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const float* af, lapack_int ldaf, + const lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr ); +lapack_int LAPACKE_dsyrfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* a, lapack_int lda, + const double* af, lapack_int ldaf, + const lapack_int* ipiv, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* ferr, double* berr ); +lapack_int LAPACKE_csyrfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_zsyrfs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_ssyrfsx( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, const float* a, + lapack_int lda, const float* af, lapack_int ldaf, + const lapack_int* ipiv, const float* s, + const float* b, lapack_int ldb, float* x, + lapack_int ldx, float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_dsyrfsx( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, const double* a, + lapack_int lda, const double* af, lapack_int ldaf, + const lapack_int* ipiv, const double* s, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params ); +lapack_int LAPACKE_csyrfsx( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* af, lapack_int ldaf, + const lapack_int* ipiv, const float* s, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params ); +lapack_int LAPACKE_zsyrfsx( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* af, lapack_int ldaf, + const lapack_int* ipiv, const double* s, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params ); + +lapack_int LAPACKE_ssysv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, + lapack_int* ipiv, float* b, lapack_int ldb ); +lapack_int LAPACKE_dsysv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + lapack_int* ipiv, double* b, lapack_int ldb ); +lapack_int LAPACKE_csysv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zsysv( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_ssysvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + float* af, lapack_int ldaf, lapack_int* ipiv, + const float* b, lapack_int ldb, float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr ); +lapack_int LAPACKE_dsysvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const double* a, lapack_int lda, + double* af, lapack_int ldaf, lapack_int* ipiv, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* rcond, double* ferr, + double* berr ); +lapack_int LAPACKE_csysvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, lapack_complex_float* af, + lapack_int ldaf, lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr ); +lapack_int LAPACKE_zsysvx( int matrix_order, char fact, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, lapack_complex_double* af, + lapack_int ldaf, lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr ); + +lapack_int LAPACKE_ssysvxx( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* s, float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_dsysvxx( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* s, double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params ); +lapack_int LAPACKE_csysvxx( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* s, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params ); +lapack_int LAPACKE_zsysvxx( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* s, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params ); + +lapack_int LAPACKE_ssytrd( int matrix_order, char uplo, lapack_int n, float* a, + lapack_int lda, float* d, float* e, float* tau ); +lapack_int LAPACKE_dsytrd( int matrix_order, char uplo, lapack_int n, double* a, + lapack_int lda, double* d, double* e, double* tau ); + +lapack_int LAPACKE_ssytrf( int matrix_order, char uplo, lapack_int n, float* a, + lapack_int lda, lapack_int* ipiv ); +lapack_int LAPACKE_dsytrf( int matrix_order, char uplo, lapack_int n, double* a, + lapack_int lda, lapack_int* ipiv ); +lapack_int LAPACKE_csytrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* ipiv ); +lapack_int LAPACKE_zsytrf( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv ); + +lapack_int LAPACKE_ssytri( int matrix_order, char uplo, lapack_int n, float* a, + lapack_int lda, const lapack_int* ipiv ); +lapack_int LAPACKE_dsytri( int matrix_order, char uplo, lapack_int n, double* a, + lapack_int lda, const lapack_int* ipiv ); +lapack_int LAPACKE_csytri( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv ); +lapack_int LAPACKE_zsytri( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv ); + +lapack_int LAPACKE_ssytrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const lapack_int* ipiv, float* b, lapack_int ldb ); +lapack_int LAPACKE_dsytrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* a, lapack_int lda, + const lapack_int* ipiv, double* b, lapack_int ldb ); +lapack_int LAPACKE_csytrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zsytrs( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_stbcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, lapack_int kd, const float* ab, + lapack_int ldab, float* rcond ); +lapack_int LAPACKE_dtbcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, lapack_int kd, const double* ab, + lapack_int ldab, double* rcond ); +lapack_int LAPACKE_ctbcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, lapack_int kd, + const lapack_complex_float* ab, lapack_int ldab, + float* rcond ); +lapack_int LAPACKE_ztbcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, lapack_int kd, + const lapack_complex_double* ab, lapack_int ldab, + double* rcond ); + +lapack_int LAPACKE_stbrfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int kd, lapack_int nrhs, + const float* ab, lapack_int ldab, const float* b, + lapack_int ldb, const float* x, lapack_int ldx, + float* ferr, float* berr ); +lapack_int LAPACKE_dtbrfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int kd, lapack_int nrhs, + const double* ab, lapack_int ldab, const double* b, + lapack_int ldb, const double* x, lapack_int ldx, + double* ferr, double* berr ); +lapack_int LAPACKE_ctbrfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int kd, lapack_int nrhs, + const lapack_complex_float* ab, lapack_int ldab, + const lapack_complex_float* b, lapack_int ldb, + const lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr ); +lapack_int LAPACKE_ztbrfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int kd, lapack_int nrhs, + const lapack_complex_double* ab, lapack_int ldab, + const lapack_complex_double* b, lapack_int ldb, + const lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_stbtrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int kd, lapack_int nrhs, + const float* ab, lapack_int ldab, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dtbtrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int kd, lapack_int nrhs, + const double* ab, lapack_int ldab, double* b, + lapack_int ldb ); +lapack_int LAPACKE_ctbtrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int kd, lapack_int nrhs, + const lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_ztbtrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int kd, lapack_int nrhs, + const lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_stfsm( int matrix_order, char transr, char side, char uplo, + char trans, char diag, lapack_int m, lapack_int n, + float alpha, const float* a, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dtfsm( int matrix_order, char transr, char side, char uplo, + char trans, char diag, lapack_int m, lapack_int n, + double alpha, const double* a, double* b, + lapack_int ldb ); +lapack_int LAPACKE_ctfsm( int matrix_order, char transr, char side, char uplo, + char trans, char diag, lapack_int m, lapack_int n, + lapack_complex_float alpha, + const lapack_complex_float* a, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_ztfsm( int matrix_order, char transr, char side, char uplo, + char trans, char diag, lapack_int m, lapack_int n, + lapack_complex_double alpha, + const lapack_complex_double* a, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_stftri( int matrix_order, char transr, char uplo, char diag, + lapack_int n, float* a ); +lapack_int LAPACKE_dtftri( int matrix_order, char transr, char uplo, char diag, + lapack_int n, double* a ); +lapack_int LAPACKE_ctftri( int matrix_order, char transr, char uplo, char diag, + lapack_int n, lapack_complex_float* a ); +lapack_int LAPACKE_ztftri( int matrix_order, char transr, char uplo, char diag, + lapack_int n, lapack_complex_double* a ); + +lapack_int LAPACKE_stfttp( int matrix_order, char transr, char uplo, + lapack_int n, const float* arf, float* ap ); +lapack_int LAPACKE_dtfttp( int matrix_order, char transr, char uplo, + lapack_int n, const double* arf, double* ap ); +lapack_int LAPACKE_ctfttp( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_float* arf, + lapack_complex_float* ap ); +lapack_int LAPACKE_ztfttp( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_double* arf, + lapack_complex_double* ap ); + +lapack_int LAPACKE_stfttr( int matrix_order, char transr, char uplo, + lapack_int n, const float* arf, float* a, + lapack_int lda ); +lapack_int LAPACKE_dtfttr( int matrix_order, char transr, char uplo, + lapack_int n, const double* arf, double* a, + lapack_int lda ); +lapack_int LAPACKE_ctfttr( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_float* arf, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_ztfttr( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_double* arf, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_stgevc( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + const float* s, lapack_int lds, const float* p, + lapack_int ldp, float* vl, lapack_int ldvl, + float* vr, lapack_int ldvr, lapack_int mm, + lapack_int* m ); +lapack_int LAPACKE_dtgevc( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + const double* s, lapack_int lds, const double* p, + lapack_int ldp, double* vl, lapack_int ldvl, + double* vr, lapack_int ldvr, lapack_int mm, + lapack_int* m ); +lapack_int LAPACKE_ctgevc( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_float* s, lapack_int lds, + const lapack_complex_float* p, lapack_int ldp, + lapack_complex_float* vl, lapack_int ldvl, + lapack_complex_float* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m ); +lapack_int LAPACKE_ztgevc( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_double* s, lapack_int lds, + const lapack_complex_double* p, lapack_int ldp, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m ); + +lapack_int LAPACKE_stgexc( int matrix_order, lapack_logical wantq, + lapack_logical wantz, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, float* q, + lapack_int ldq, float* z, lapack_int ldz, + lapack_int* ifst, lapack_int* ilst ); +lapack_int LAPACKE_dtgexc( int matrix_order, lapack_logical wantq, + lapack_logical wantz, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, double* q, + lapack_int ldq, double* z, lapack_int ldz, + lapack_int* ifst, lapack_int* ilst ); +lapack_int LAPACKE_ctgexc( int matrix_order, lapack_logical wantq, + lapack_logical wantz, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* z, lapack_int ldz, + lapack_int ifst, lapack_int ilst ); +lapack_int LAPACKE_ztgexc( int matrix_order, lapack_logical wantq, + lapack_logical wantz, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* z, lapack_int ldz, + lapack_int ifst, lapack_int ilst ); + +lapack_int LAPACKE_stgsen( int matrix_order, lapack_int ijob, + lapack_logical wantq, lapack_logical wantz, + const lapack_logical* select, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, + float* alphar, float* alphai, float* beta, float* q, + lapack_int ldq, float* z, lapack_int ldz, + lapack_int* m, float* pl, float* pr, float* dif ); +lapack_int LAPACKE_dtgsen( int matrix_order, lapack_int ijob, + lapack_logical wantq, lapack_logical wantz, + const lapack_logical* select, lapack_int n, + double* a, lapack_int lda, double* b, lapack_int ldb, + double* alphar, double* alphai, double* beta, + double* q, lapack_int ldq, double* z, lapack_int ldz, + lapack_int* m, double* pl, double* pr, double* dif ); +lapack_int LAPACKE_ctgsen( int matrix_order, lapack_int ijob, + lapack_logical wantq, lapack_logical wantz, + const lapack_logical* select, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* alpha, + lapack_complex_float* beta, lapack_complex_float* q, + lapack_int ldq, lapack_complex_float* z, + lapack_int ldz, lapack_int* m, float* pl, float* pr, + float* dif ); +lapack_int LAPACKE_ztgsen( int matrix_order, lapack_int ijob, + lapack_logical wantq, lapack_logical wantz, + const lapack_logical* select, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* z, lapack_int ldz, + lapack_int* m, double* pl, double* pr, double* dif ); + +lapack_int LAPACKE_stgsja( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int p, lapack_int n, + lapack_int k, lapack_int l, float* a, lapack_int lda, + float* b, lapack_int ldb, float tola, float tolb, + float* alpha, float* beta, float* u, lapack_int ldu, + float* v, lapack_int ldv, float* q, lapack_int ldq, + lapack_int* ncycle ); +lapack_int LAPACKE_dtgsja( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int p, lapack_int n, + lapack_int k, lapack_int l, double* a, + lapack_int lda, double* b, lapack_int ldb, + double tola, double tolb, double* alpha, + double* beta, double* u, lapack_int ldu, double* v, + lapack_int ldv, double* q, lapack_int ldq, + lapack_int* ncycle ); +lapack_int LAPACKE_ctgsja( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int p, lapack_int n, + lapack_int k, lapack_int l, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, float tola, float tolb, float* alpha, + float* beta, lapack_complex_float* u, lapack_int ldu, + lapack_complex_float* v, lapack_int ldv, + lapack_complex_float* q, lapack_int ldq, + lapack_int* ncycle ); +lapack_int LAPACKE_ztgsja( int matrix_order, char jobu, char jobv, char jobq, + lapack_int m, lapack_int p, lapack_int n, + lapack_int k, lapack_int l, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, double tola, double tolb, + double* alpha, double* beta, + lapack_complex_double* u, lapack_int ldu, + lapack_complex_double* v, lapack_int ldv, + lapack_complex_double* q, lapack_int ldq, + lapack_int* ncycle ); + +lapack_int LAPACKE_stgsna( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const float* a, lapack_int lda, const float* b, + lapack_int ldb, const float* vl, lapack_int ldvl, + const float* vr, lapack_int ldvr, float* s, + float* dif, lapack_int mm, lapack_int* m ); +lapack_int LAPACKE_dtgsna( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const double* a, lapack_int lda, const double* b, + lapack_int ldb, const double* vl, lapack_int ldvl, + const double* vr, lapack_int ldvr, double* s, + double* dif, lapack_int mm, lapack_int* m ); +lapack_int LAPACKE_ctgsna( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* b, lapack_int ldb, + const lapack_complex_float* vl, lapack_int ldvl, + const lapack_complex_float* vr, lapack_int ldvr, + float* s, float* dif, lapack_int mm, lapack_int* m ); +lapack_int LAPACKE_ztgsna( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* b, lapack_int ldb, + const lapack_complex_double* vl, lapack_int ldvl, + const lapack_complex_double* vr, lapack_int ldvr, + double* s, double* dif, lapack_int mm, + lapack_int* m ); + +lapack_int LAPACKE_stgsyl( int matrix_order, char trans, lapack_int ijob, + lapack_int m, lapack_int n, const float* a, + lapack_int lda, const float* b, lapack_int ldb, + float* c, lapack_int ldc, const float* d, + lapack_int ldd, const float* e, lapack_int lde, + float* f, lapack_int ldf, float* scale, float* dif ); +lapack_int LAPACKE_dtgsyl( int matrix_order, char trans, lapack_int ijob, + lapack_int m, lapack_int n, const double* a, + lapack_int lda, const double* b, lapack_int ldb, + double* c, lapack_int ldc, const double* d, + lapack_int ldd, const double* e, lapack_int lde, + double* f, lapack_int ldf, double* scale, + double* dif ); +lapack_int LAPACKE_ctgsyl( int matrix_order, char trans, lapack_int ijob, + lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* c, lapack_int ldc, + const lapack_complex_float* d, lapack_int ldd, + const lapack_complex_float* e, lapack_int lde, + lapack_complex_float* f, lapack_int ldf, + float* scale, float* dif ); +lapack_int LAPACKE_ztgsyl( int matrix_order, char trans, lapack_int ijob, + lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* c, lapack_int ldc, + const lapack_complex_double* d, lapack_int ldd, + const lapack_complex_double* e, lapack_int lde, + lapack_complex_double* f, lapack_int ldf, + double* scale, double* dif ); + +lapack_int LAPACKE_stpcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, const float* ap, float* rcond ); +lapack_int LAPACKE_dtpcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, const double* ap, double* rcond ); +lapack_int LAPACKE_ctpcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, const lapack_complex_float* ap, + float* rcond ); +lapack_int LAPACKE_ztpcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, const lapack_complex_double* ap, + double* rcond ); + +lapack_int LAPACKE_stprfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, const float* ap, + const float* b, lapack_int ldb, const float* x, + lapack_int ldx, float* ferr, float* berr ); +lapack_int LAPACKE_dtprfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, const double* ap, + const double* b, lapack_int ldb, const double* x, + lapack_int ldx, double* ferr, double* berr ); +lapack_int LAPACKE_ctprfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* ap, + const lapack_complex_float* b, lapack_int ldb, + const lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr ); +lapack_int LAPACKE_ztprfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* ap, + const lapack_complex_double* b, lapack_int ldb, + const lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_stptri( int matrix_order, char uplo, char diag, lapack_int n, + float* ap ); +lapack_int LAPACKE_dtptri( int matrix_order, char uplo, char diag, lapack_int n, + double* ap ); +lapack_int LAPACKE_ctptri( int matrix_order, char uplo, char diag, lapack_int n, + lapack_complex_float* ap ); +lapack_int LAPACKE_ztptri( int matrix_order, char uplo, char diag, lapack_int n, + lapack_complex_double* ap ); + +lapack_int LAPACKE_stptrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, const float* ap, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dtptrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, const double* ap, + double* b, lapack_int ldb ); +lapack_int LAPACKE_ctptrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* ap, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_ztptrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* ap, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_stpttf( int matrix_order, char transr, char uplo, + lapack_int n, const float* ap, float* arf ); +lapack_int LAPACKE_dtpttf( int matrix_order, char transr, char uplo, + lapack_int n, const double* ap, double* arf ); +lapack_int LAPACKE_ctpttf( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_float* ap, + lapack_complex_float* arf ); +lapack_int LAPACKE_ztpttf( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_double* ap, + lapack_complex_double* arf ); + +lapack_int LAPACKE_stpttr( int matrix_order, char uplo, lapack_int n, + const float* ap, float* a, lapack_int lda ); +lapack_int LAPACKE_dtpttr( int matrix_order, char uplo, lapack_int n, + const double* ap, double* a, lapack_int lda ); +lapack_int LAPACKE_ctpttr( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_ztpttr( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_strcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, const float* a, lapack_int lda, + float* rcond ); +lapack_int LAPACKE_dtrcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, const double* a, lapack_int lda, + double* rcond ); +lapack_int LAPACKE_ctrcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, const lapack_complex_float* a, + lapack_int lda, float* rcond ); +lapack_int LAPACKE_ztrcon( int matrix_order, char norm, char uplo, char diag, + lapack_int n, const lapack_complex_double* a, + lapack_int lda, double* rcond ); + +lapack_int LAPACKE_strevc( int matrix_order, char side, char howmny, + lapack_logical* select, lapack_int n, const float* t, + lapack_int ldt, float* vl, lapack_int ldvl, + float* vr, lapack_int ldvr, lapack_int mm, + lapack_int* m ); +lapack_int LAPACKE_dtrevc( int matrix_order, char side, char howmny, + lapack_logical* select, lapack_int n, + const double* t, lapack_int ldt, double* vl, + lapack_int ldvl, double* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m ); +lapack_int LAPACKE_ctrevc( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* vl, lapack_int ldvl, + lapack_complex_float* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m ); +lapack_int LAPACKE_ztrevc( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m ); + +lapack_int LAPACKE_strexc( int matrix_order, char compq, lapack_int n, float* t, + lapack_int ldt, float* q, lapack_int ldq, + lapack_int* ifst, lapack_int* ilst ); +lapack_int LAPACKE_dtrexc( int matrix_order, char compq, lapack_int n, + double* t, lapack_int ldt, double* q, lapack_int ldq, + lapack_int* ifst, lapack_int* ilst ); +lapack_int LAPACKE_ctrexc( int matrix_order, char compq, lapack_int n, + lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* q, lapack_int ldq, + lapack_int ifst, lapack_int ilst ); +lapack_int LAPACKE_ztrexc( int matrix_order, char compq, lapack_int n, + lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* q, lapack_int ldq, + lapack_int ifst, lapack_int ilst ); + +lapack_int LAPACKE_strrfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, const float* a, + lapack_int lda, const float* b, lapack_int ldb, + const float* x, lapack_int ldx, float* ferr, + float* berr ); +lapack_int LAPACKE_dtrrfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, const double* a, + lapack_int lda, const double* b, lapack_int ldb, + const double* x, lapack_int ldx, double* ferr, + double* berr ); +lapack_int LAPACKE_ctrrfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* b, lapack_int ldb, + const lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr ); +lapack_int LAPACKE_ztrrfs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* b, lapack_int ldb, + const lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr ); + +lapack_int LAPACKE_strsen( int matrix_order, char job, char compq, + const lapack_logical* select, lapack_int n, float* t, + lapack_int ldt, float* q, lapack_int ldq, float* wr, + float* wi, lapack_int* m, float* s, float* sep ); +lapack_int LAPACKE_dtrsen( int matrix_order, char job, char compq, + const lapack_logical* select, lapack_int n, + double* t, lapack_int ldt, double* q, lapack_int ldq, + double* wr, double* wi, lapack_int* m, double* s, + double* sep ); +lapack_int LAPACKE_ctrsen( int matrix_order, char job, char compq, + const lapack_logical* select, lapack_int n, + lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* w, lapack_int* m, float* s, + float* sep ); +lapack_int LAPACKE_ztrsen( int matrix_order, char job, char compq, + const lapack_logical* select, lapack_int n, + lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* w, lapack_int* m, double* s, + double* sep ); + +lapack_int LAPACKE_strsna( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const float* t, lapack_int ldt, const float* vl, + lapack_int ldvl, const float* vr, lapack_int ldvr, + float* s, float* sep, lapack_int mm, lapack_int* m ); +lapack_int LAPACKE_dtrsna( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const double* t, lapack_int ldt, const double* vl, + lapack_int ldvl, const double* vr, lapack_int ldvr, + double* s, double* sep, lapack_int mm, + lapack_int* m ); +lapack_int LAPACKE_ctrsna( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_float* t, lapack_int ldt, + const lapack_complex_float* vl, lapack_int ldvl, + const lapack_complex_float* vr, lapack_int ldvr, + float* s, float* sep, lapack_int mm, lapack_int* m ); +lapack_int LAPACKE_ztrsna( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_double* t, lapack_int ldt, + const lapack_complex_double* vl, lapack_int ldvl, + const lapack_complex_double* vr, lapack_int ldvr, + double* s, double* sep, lapack_int mm, + lapack_int* m ); + +lapack_int LAPACKE_strsyl( int matrix_order, char trana, char tranb, + lapack_int isgn, lapack_int m, lapack_int n, + const float* a, lapack_int lda, const float* b, + lapack_int ldb, float* c, lapack_int ldc, + float* scale ); +lapack_int LAPACKE_dtrsyl( int matrix_order, char trana, char tranb, + lapack_int isgn, lapack_int m, lapack_int n, + const double* a, lapack_int lda, const double* b, + lapack_int ldb, double* c, lapack_int ldc, + double* scale ); +lapack_int LAPACKE_ctrsyl( int matrix_order, char trana, char tranb, + lapack_int isgn, lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* c, lapack_int ldc, + float* scale ); +lapack_int LAPACKE_ztrsyl( int matrix_order, char trana, char tranb, + lapack_int isgn, lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* c, lapack_int ldc, + double* scale ); + +lapack_int LAPACKE_strtri( int matrix_order, char uplo, char diag, lapack_int n, + float* a, lapack_int lda ); +lapack_int LAPACKE_dtrtri( int matrix_order, char uplo, char diag, lapack_int n, + double* a, lapack_int lda ); +lapack_int LAPACKE_ctrtri( int matrix_order, char uplo, char diag, lapack_int n, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_ztrtri( int matrix_order, char uplo, char diag, lapack_int n, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_strtrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, const float* a, + lapack_int lda, float* b, lapack_int ldb ); +lapack_int LAPACKE_dtrtrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, const double* a, + lapack_int lda, double* b, lapack_int ldb ); +lapack_int LAPACKE_ctrtrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_ztrtrs( int matrix_order, char uplo, char trans, char diag, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_strttf( int matrix_order, char transr, char uplo, + lapack_int n, const float* a, lapack_int lda, + float* arf ); +lapack_int LAPACKE_dtrttf( int matrix_order, char transr, char uplo, + lapack_int n, const double* a, lapack_int lda, + double* arf ); +lapack_int LAPACKE_ctrttf( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_float* a, + lapack_int lda, lapack_complex_float* arf ); +lapack_int LAPACKE_ztrttf( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_double* a, + lapack_int lda, lapack_complex_double* arf ); + +lapack_int LAPACKE_strttp( int matrix_order, char uplo, lapack_int n, + const float* a, lapack_int lda, float* ap ); +lapack_int LAPACKE_dtrttp( int matrix_order, char uplo, lapack_int n, + const double* a, lapack_int lda, double* ap ); +lapack_int LAPACKE_ctrttp( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + lapack_complex_float* ap ); +lapack_int LAPACKE_ztrttp( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + lapack_complex_double* ap ); + +lapack_int LAPACKE_stzrzf( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau ); +lapack_int LAPACKE_dtzrzf( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau ); +lapack_int LAPACKE_ctzrzf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau ); +lapack_int LAPACKE_ztzrzf( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau ); + +lapack_int LAPACKE_cungbr( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int k, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau ); +lapack_int LAPACKE_zungbr( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int k, lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* tau ); + +lapack_int LAPACKE_cunghr( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau ); +lapack_int LAPACKE_zunghr( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* tau ); + +lapack_int LAPACKE_cunglq( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau ); +lapack_int LAPACKE_zunglq( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* tau ); + +lapack_int LAPACKE_cungql( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau ); +lapack_int LAPACKE_zungql( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* tau ); + +lapack_int LAPACKE_cungqr( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau ); +lapack_int LAPACKE_zungqr( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* tau ); + +lapack_int LAPACKE_cungrq( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau ); +lapack_int LAPACKE_zungrq( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* tau ); + +lapack_int LAPACKE_cungtr( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau ); +lapack_int LAPACKE_zungtr( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau ); + +lapack_int LAPACKE_cunmbr( int matrix_order, char vect, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc ); +lapack_int LAPACKE_zunmbr( int matrix_order, char vect, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc ); + +lapack_int LAPACKE_cunmhr( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int ilo, + lapack_int ihi, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc ); +lapack_int LAPACKE_zunmhr( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int ilo, + lapack_int ihi, const lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc ); + +lapack_int LAPACKE_cunmlq( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc ); +lapack_int LAPACKE_zunmlq( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc ); + +lapack_int LAPACKE_cunmql( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc ); +lapack_int LAPACKE_zunmql( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc ); + +lapack_int LAPACKE_cunmqr( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc ); +lapack_int LAPACKE_zunmqr( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc ); + +lapack_int LAPACKE_cunmrq( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc ); +lapack_int LAPACKE_zunmrq( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc ); + +lapack_int LAPACKE_cunmrz( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc ); +lapack_int LAPACKE_zunmrz( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, const lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc ); + +lapack_int LAPACKE_cunmtr( int matrix_order, char side, char uplo, char trans, + lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc ); +lapack_int LAPACKE_zunmtr( int matrix_order, char side, char uplo, char trans, + lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc ); + +lapack_int LAPACKE_cupgtr( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, + const lapack_complex_float* tau, + lapack_complex_float* q, lapack_int ldq ); +lapack_int LAPACKE_zupgtr( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, + const lapack_complex_double* tau, + lapack_complex_double* q, lapack_int ldq ); + +lapack_int LAPACKE_cupmtr( int matrix_order, char side, char uplo, char trans, + lapack_int m, lapack_int n, + const lapack_complex_float* ap, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc ); +lapack_int LAPACKE_zupmtr( int matrix_order, char side, char uplo, char trans, + lapack_int m, lapack_int n, + const lapack_complex_double* ap, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc ); + +lapack_int LAPACKE_sbdsdc_work( int matrix_order, char uplo, char compq, + lapack_int n, float* d, float* e, float* u, + lapack_int ldu, float* vt, lapack_int ldvt, + float* q, lapack_int* iq, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dbdsdc_work( int matrix_order, char uplo, char compq, + lapack_int n, double* d, double* e, double* u, + lapack_int ldu, double* vt, lapack_int ldvt, + double* q, lapack_int* iq, double* work, + lapack_int* iwork ); + +lapack_int LAPACKE_sbdsqr_work( int matrix_order, char uplo, lapack_int n, + lapack_int ncvt, lapack_int nru, lapack_int ncc, + float* d, float* e, float* vt, lapack_int ldvt, + float* u, lapack_int ldu, float* c, + lapack_int ldc, float* work ); +lapack_int LAPACKE_dbdsqr_work( int matrix_order, char uplo, lapack_int n, + lapack_int ncvt, lapack_int nru, lapack_int ncc, + double* d, double* e, double* vt, + lapack_int ldvt, double* u, lapack_int ldu, + double* c, lapack_int ldc, double* work ); +lapack_int LAPACKE_cbdsqr_work( int matrix_order, char uplo, lapack_int n, + lapack_int ncvt, lapack_int nru, lapack_int ncc, + float* d, float* e, lapack_complex_float* vt, + lapack_int ldvt, lapack_complex_float* u, + lapack_int ldu, lapack_complex_float* c, + lapack_int ldc, float* work ); +lapack_int LAPACKE_zbdsqr_work( int matrix_order, char uplo, lapack_int n, + lapack_int ncvt, lapack_int nru, lapack_int ncc, + double* d, double* e, lapack_complex_double* vt, + lapack_int ldvt, lapack_complex_double* u, + lapack_int ldu, lapack_complex_double* c, + lapack_int ldc, double* work ); + +lapack_int LAPACKE_sdisna_work( char job, lapack_int m, lapack_int n, + const float* d, float* sep ); +lapack_int LAPACKE_ddisna_work( char job, lapack_int m, lapack_int n, + const double* d, double* sep ); + +lapack_int LAPACKE_sgbbrd_work( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int ncc, lapack_int kl, + lapack_int ku, float* ab, lapack_int ldab, + float* d, float* e, float* q, lapack_int ldq, + float* pt, lapack_int ldpt, float* c, + lapack_int ldc, float* work ); +lapack_int LAPACKE_dgbbrd_work( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int ncc, lapack_int kl, + lapack_int ku, double* ab, lapack_int ldab, + double* d, double* e, double* q, lapack_int ldq, + double* pt, lapack_int ldpt, double* c, + lapack_int ldc, double* work ); +lapack_int LAPACKE_cgbbrd_work( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int ncc, lapack_int kl, + lapack_int ku, lapack_complex_float* ab, + lapack_int ldab, float* d, float* e, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* pt, lapack_int ldpt, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zgbbrd_work( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int ncc, lapack_int kl, + lapack_int ku, lapack_complex_double* ab, + lapack_int ldab, double* d, double* e, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* pt, lapack_int ldpt, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sgbcon_work( int matrix_order, char norm, lapack_int n, + lapack_int kl, lapack_int ku, const float* ab, + lapack_int ldab, const lapack_int* ipiv, + float anorm, float* rcond, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dgbcon_work( int matrix_order, char norm, lapack_int n, + lapack_int kl, lapack_int ku, const double* ab, + lapack_int ldab, const lapack_int* ipiv, + double anorm, double* rcond, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cgbcon_work( int matrix_order, char norm, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_float* ab, lapack_int ldab, + const lapack_int* ipiv, float anorm, + float* rcond, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zgbcon_work( int matrix_order, char norm, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_double* ab, + lapack_int ldab, const lapack_int* ipiv, + double anorm, double* rcond, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sgbequ_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const float* ab, + lapack_int ldab, float* r, float* c, + float* rowcnd, float* colcnd, float* amax ); +lapack_int LAPACKE_dgbequ_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const double* ab, + lapack_int ldab, double* r, double* c, + double* rowcnd, double* colcnd, double* amax ); +lapack_int LAPACKE_cgbequ_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_float* ab, lapack_int ldab, + float* r, float* c, float* rowcnd, + float* colcnd, float* amax ); +lapack_int LAPACKE_zgbequ_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_double* ab, + lapack_int ldab, double* r, double* c, + double* rowcnd, double* colcnd, double* amax ); + +lapack_int LAPACKE_sgbequb_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const float* ab, + lapack_int ldab, float* r, float* c, + float* rowcnd, float* colcnd, float* amax ); +lapack_int LAPACKE_dgbequb_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const double* ab, + lapack_int ldab, double* r, double* c, + double* rowcnd, double* colcnd, double* amax ); +lapack_int LAPACKE_cgbequb_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_float* ab, + lapack_int ldab, float* r, float* c, + float* rowcnd, float* colcnd, float* amax ); +lapack_int LAPACKE_zgbequb_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + const lapack_complex_double* ab, + lapack_int ldab, double* r, double* c, + double* rowcnd, double* colcnd, double* amax ); + +lapack_int LAPACKE_sgbrfs_work( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const float* ab, lapack_int ldab, + const float* afb, lapack_int ldafb, + const lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dgbrfs_work( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const double* ab, lapack_int ldab, + const double* afb, lapack_int ldafb, + const lapack_int* ipiv, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* ferr, double* berr, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cgbrfs_work( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const lapack_complex_float* ab, lapack_int ldab, + const lapack_complex_float* afb, + lapack_int ldafb, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zgbrfs_work( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const lapack_complex_double* ab, + lapack_int ldab, + const lapack_complex_double* afb, + lapack_int ldafb, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sgbrfsx_work( int matrix_order, char trans, char equed, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, const float* ab, + lapack_int ldab, const float* afb, + lapack_int ldafb, const lapack_int* ipiv, + const float* r, const float* c, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dgbrfsx_work( int matrix_order, char trans, char equed, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, const double* ab, + lapack_int ldab, const double* afb, + lapack_int ldafb, const lapack_int* ipiv, + const double* r, const double* c, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cgbrfsx_work( int matrix_order, char trans, char equed, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, + const lapack_complex_float* ab, + lapack_int ldab, + const lapack_complex_float* afb, + lapack_int ldafb, const lapack_int* ipiv, + const float* r, const float* c, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zgbrfsx_work( int matrix_order, char trans, char equed, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, + const lapack_complex_double* ab, + lapack_int ldab, + const lapack_complex_double* afb, + lapack_int ldafb, const lapack_int* ipiv, + const double* r, const double* c, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_sgbsv_work( int matrix_order, lapack_int n, lapack_int kl, + lapack_int ku, lapack_int nrhs, float* ab, + lapack_int ldab, lapack_int* ipiv, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dgbsv_work( int matrix_order, lapack_int n, lapack_int kl, + lapack_int ku, lapack_int nrhs, double* ab, + lapack_int ldab, lapack_int* ipiv, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cgbsv_work( int matrix_order, lapack_int n, lapack_int kl, + lapack_int ku, lapack_int nrhs, + lapack_complex_float* ab, lapack_int ldab, + lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zgbsv_work( int matrix_order, lapack_int n, lapack_int kl, + lapack_int ku, lapack_int nrhs, + lapack_complex_double* ab, lapack_int ldab, + lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_sgbsvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, float* ab, lapack_int ldab, + float* afb, lapack_int ldafb, lapack_int* ipiv, + char* equed, float* r, float* c, float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + float* work, lapack_int* iwork ); +lapack_int LAPACKE_dgbsvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, double* ab, lapack_int ldab, + double* afb, lapack_int ldafb, lapack_int* ipiv, + char* equed, double* r, double* c, double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_cgbsvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, lapack_complex_float* ab, + lapack_int ldab, lapack_complex_float* afb, + lapack_int ldafb, lapack_int* ipiv, char* equed, + float* r, float* c, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zgbsvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, lapack_complex_double* ab, + lapack_int ldab, lapack_complex_double* afb, + lapack_int ldafb, lapack_int* ipiv, char* equed, + double* r, double* c, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* rcond, double* ferr, + double* berr, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_sgbsvxx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, float* ab, lapack_int ldab, + float* afb, lapack_int ldafb, lapack_int* ipiv, + char* equed, float* r, float* c, float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dgbsvxx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, double* ab, lapack_int ldab, + double* afb, lapack_int ldafb, + lapack_int* ipiv, char* equed, double* r, + double* c, double* b, lapack_int ldb, + double* x, lapack_int ldx, double* rcond, + double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cgbsvxx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, lapack_complex_float* ab, + lapack_int ldab, lapack_complex_float* afb, + lapack_int ldafb, lapack_int* ipiv, + char* equed, float* r, float* c, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zgbsvxx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int kl, lapack_int ku, + lapack_int nrhs, lapack_complex_double* ab, + lapack_int ldab, lapack_complex_double* afb, + lapack_int ldafb, lapack_int* ipiv, + char* equed, double* r, double* c, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_sgbtrf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, float* ab, + lapack_int ldab, lapack_int* ipiv ); +lapack_int LAPACKE_dgbtrf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, double* ab, + lapack_int ldab, lapack_int* ipiv ); +lapack_int LAPACKE_cgbtrf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + lapack_complex_float* ab, lapack_int ldab, + lapack_int* ipiv ); +lapack_int LAPACKE_zgbtrf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, + lapack_complex_double* ab, lapack_int ldab, + lapack_int* ipiv ); + +lapack_int LAPACKE_sgbtrs_work( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const float* ab, lapack_int ldab, + const lapack_int* ipiv, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dgbtrs_work( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const double* ab, lapack_int ldab, + const lapack_int* ipiv, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cgbtrs_work( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const lapack_complex_float* ab, lapack_int ldab, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zgbtrs_work( int matrix_order, char trans, lapack_int n, + lapack_int kl, lapack_int ku, lapack_int nrhs, + const lapack_complex_double* ab, + lapack_int ldab, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_sgebak_work( int matrix_order, char job, char side, + lapack_int n, lapack_int ilo, lapack_int ihi, + const float* scale, lapack_int m, float* v, + lapack_int ldv ); +lapack_int LAPACKE_dgebak_work( int matrix_order, char job, char side, + lapack_int n, lapack_int ilo, lapack_int ihi, + const double* scale, lapack_int m, double* v, + lapack_int ldv ); +lapack_int LAPACKE_cgebak_work( int matrix_order, char job, char side, + lapack_int n, lapack_int ilo, lapack_int ihi, + const float* scale, lapack_int m, + lapack_complex_float* v, lapack_int ldv ); +lapack_int LAPACKE_zgebak_work( int matrix_order, char job, char side, + lapack_int n, lapack_int ilo, lapack_int ihi, + const double* scale, lapack_int m, + lapack_complex_double* v, lapack_int ldv ); + +lapack_int LAPACKE_sgebal_work( int matrix_order, char job, lapack_int n, + float* a, lapack_int lda, lapack_int* ilo, + lapack_int* ihi, float* scale ); +lapack_int LAPACKE_dgebal_work( int matrix_order, char job, lapack_int n, + double* a, lapack_int lda, lapack_int* ilo, + lapack_int* ihi, double* scale ); +lapack_int LAPACKE_cgebal_work( int matrix_order, char job, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* ilo, lapack_int* ihi, + float* scale ); +lapack_int LAPACKE_zgebal_work( int matrix_order, char job, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* ilo, lapack_int* ihi, + double* scale ); + +lapack_int LAPACKE_sgebrd_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* d, float* e, + float* tauq, float* taup, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dgebrd_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* d, double* e, + double* tauq, double* taup, double* work, + lapack_int lwork ); +lapack_int LAPACKE_cgebrd_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + float* d, float* e, lapack_complex_float* tauq, + lapack_complex_float* taup, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zgebrd_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + double* d, double* e, + lapack_complex_double* tauq, + lapack_complex_double* taup, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sgecon_work( int matrix_order, char norm, lapack_int n, + const float* a, lapack_int lda, float anorm, + float* rcond, float* work, lapack_int* iwork ); +lapack_int LAPACKE_dgecon_work( int matrix_order, char norm, lapack_int n, + const double* a, lapack_int lda, double anorm, + double* rcond, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cgecon_work( int matrix_order, char norm, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float anorm, float* rcond, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zgecon_work( int matrix_order, char norm, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double anorm, double* rcond, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sgeequ_work( int matrix_order, lapack_int m, lapack_int n, + const float* a, lapack_int lda, float* r, + float* c, float* rowcnd, float* colcnd, + float* amax ); +lapack_int LAPACKE_dgeequ_work( int matrix_order, lapack_int m, lapack_int n, + const double* a, lapack_int lda, double* r, + double* c, double* rowcnd, double* colcnd, + double* amax ); +lapack_int LAPACKE_cgeequ_work( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* r, float* c, float* rowcnd, + float* colcnd, float* amax ); +lapack_int LAPACKE_zgeequ_work( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* r, double* c, double* rowcnd, + double* colcnd, double* amax ); + +lapack_int LAPACKE_sgeequb_work( int matrix_order, lapack_int m, lapack_int n, + const float* a, lapack_int lda, float* r, + float* c, float* rowcnd, float* colcnd, + float* amax ); +lapack_int LAPACKE_dgeequb_work( int matrix_order, lapack_int m, lapack_int n, + const double* a, lapack_int lda, double* r, + double* c, double* rowcnd, double* colcnd, + double* amax ); +lapack_int LAPACKE_cgeequb_work( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* r, float* c, float* rowcnd, + float* colcnd, float* amax ); +lapack_int LAPACKE_zgeequb_work( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* r, double* c, double* rowcnd, + double* colcnd, double* amax ); + +lapack_int LAPACKE_sgees_work( int matrix_order, char jobvs, char sort, + LAPACK_S_SELECT2 select, lapack_int n, float* a, + lapack_int lda, lapack_int* sdim, float* wr, + float* wi, float* vs, lapack_int ldvs, + float* work, lapack_int lwork, + lapack_logical* bwork ); +lapack_int LAPACKE_dgees_work( int matrix_order, char jobvs, char sort, + LAPACK_D_SELECT2 select, lapack_int n, double* a, + lapack_int lda, lapack_int* sdim, double* wr, + double* wi, double* vs, lapack_int ldvs, + double* work, lapack_int lwork, + lapack_logical* bwork ); +lapack_int LAPACKE_cgees_work( int matrix_order, char jobvs, char sort, + LAPACK_C_SELECT1 select, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* sdim, lapack_complex_float* w, + lapack_complex_float* vs, lapack_int ldvs, + lapack_complex_float* work, lapack_int lwork, + float* rwork, lapack_logical* bwork ); +lapack_int LAPACKE_zgees_work( int matrix_order, char jobvs, char sort, + LAPACK_Z_SELECT1 select, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* sdim, lapack_complex_double* w, + lapack_complex_double* vs, lapack_int ldvs, + lapack_complex_double* work, lapack_int lwork, + double* rwork, lapack_logical* bwork ); + +lapack_int LAPACKE_sgeesx_work( int matrix_order, char jobvs, char sort, + LAPACK_S_SELECT2 select, char sense, + lapack_int n, float* a, lapack_int lda, + lapack_int* sdim, float* wr, float* wi, + float* vs, lapack_int ldvs, float* rconde, + float* rcondv, float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork, + lapack_logical* bwork ); +lapack_int LAPACKE_dgeesx_work( int matrix_order, char jobvs, char sort, + LAPACK_D_SELECT2 select, char sense, + lapack_int n, double* a, lapack_int lda, + lapack_int* sdim, double* wr, double* wi, + double* vs, lapack_int ldvs, double* rconde, + double* rcondv, double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork, + lapack_logical* bwork ); +lapack_int LAPACKE_cgeesx_work( int matrix_order, char jobvs, char sort, + LAPACK_C_SELECT1 select, char sense, + lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_int* sdim, + lapack_complex_float* w, + lapack_complex_float* vs, lapack_int ldvs, + float* rconde, float* rcondv, + lapack_complex_float* work, lapack_int lwork, + float* rwork, lapack_logical* bwork ); +lapack_int LAPACKE_zgeesx_work( int matrix_order, char jobvs, char sort, + LAPACK_Z_SELECT1 select, char sense, + lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_int* sdim, + lapack_complex_double* w, + lapack_complex_double* vs, lapack_int ldvs, + double* rconde, double* rcondv, + lapack_complex_double* work, lapack_int lwork, + double* rwork, lapack_logical* bwork ); + +lapack_int LAPACKE_sgeev_work( int matrix_order, char jobvl, char jobvr, + lapack_int n, float* a, lapack_int lda, + float* wr, float* wi, float* vl, lapack_int ldvl, + float* vr, lapack_int ldvr, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dgeev_work( int matrix_order, char jobvl, char jobvr, + lapack_int n, double* a, lapack_int lda, + double* wr, double* wi, double* vl, + lapack_int ldvl, double* vr, lapack_int ldvr, + double* work, lapack_int lwork ); +lapack_int LAPACKE_cgeev_work( int matrix_order, char jobvl, char jobvr, + lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* w, + lapack_complex_float* vl, lapack_int ldvl, + lapack_complex_float* vr, lapack_int ldvr, + lapack_complex_float* work, lapack_int lwork, + float* rwork ); +lapack_int LAPACKE_zgeev_work( int matrix_order, char jobvl, char jobvr, + lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* w, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr, + lapack_complex_double* work, lapack_int lwork, + double* rwork ); + +lapack_int LAPACKE_sgeevx_work( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, float* a, + lapack_int lda, float* wr, float* wi, float* vl, + lapack_int ldvl, float* vr, lapack_int ldvr, + lapack_int* ilo, lapack_int* ihi, float* scale, + float* abnrm, float* rconde, float* rcondv, + float* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_dgeevx_work( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, double* a, + lapack_int lda, double* wr, double* wi, + double* vl, lapack_int ldvl, double* vr, + lapack_int ldvr, lapack_int* ilo, + lapack_int* ihi, double* scale, double* abnrm, + double* rconde, double* rcondv, double* work, + lapack_int lwork, lapack_int* iwork ); +lapack_int LAPACKE_cgeevx_work( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* w, + lapack_complex_float* vl, lapack_int ldvl, + lapack_complex_float* vr, lapack_int ldvr, + lapack_int* ilo, lapack_int* ihi, float* scale, + float* abnrm, float* rconde, float* rcondv, + lapack_complex_float* work, lapack_int lwork, + float* rwork ); +lapack_int LAPACKE_zgeevx_work( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* w, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr, + lapack_int* ilo, lapack_int* ihi, double* scale, + double* abnrm, double* rconde, double* rcondv, + lapack_complex_double* work, lapack_int lwork, + double* rwork ); + +lapack_int LAPACKE_sgehrd_work( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, float* a, lapack_int lda, + float* tau, float* work, lapack_int lwork ); +lapack_int LAPACKE_dgehrd_work( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, double* a, lapack_int lda, + double* tau, double* work, lapack_int lwork ); +lapack_int LAPACKE_cgehrd_work( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zgehrd_work( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sgejsv_work( int matrix_order, char joba, char jobu, + char jobv, char jobr, char jobt, char jobp, + lapack_int m, lapack_int n, float* a, + lapack_int lda, float* sva, float* u, + lapack_int ldu, float* v, lapack_int ldv, + float* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_dgejsv_work( int matrix_order, char joba, char jobu, + char jobv, char jobr, char jobt, char jobp, + lapack_int m, lapack_int n, double* a, + lapack_int lda, double* sva, double* u, + lapack_int ldu, double* v, lapack_int ldv, + double* work, lapack_int lwork, + lapack_int* iwork ); + +lapack_int LAPACKE_sgelq2_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau, + float* work ); +lapack_int LAPACKE_dgelq2_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau, + double* work ); +lapack_int LAPACKE_cgelq2_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau, + lapack_complex_float* work ); +lapack_int LAPACKE_zgelq2_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau, + lapack_complex_double* work ); + +lapack_int LAPACKE_sgelqf_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dgelqf_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau, + double* work, lapack_int lwork ); +lapack_int LAPACKE_cgelqf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zgelqf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sgels_work( int matrix_order, char trans, lapack_int m, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* b, lapack_int ldb, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dgels_work( int matrix_order, char trans, lapack_int m, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* b, lapack_int ldb, + double* work, lapack_int lwork ); +lapack_int LAPACKE_cgels_work( int matrix_order, char trans, lapack_int m, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zgels_work( int matrix_order, char trans, lapack_int m, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sgelsd_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, + float* b, lapack_int ldb, float* s, float rcond, + lapack_int* rank, float* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_dgelsd_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + double* b, lapack_int ldb, double* s, + double rcond, lapack_int* rank, double* work, + lapack_int lwork, lapack_int* iwork ); +lapack_int LAPACKE_cgelsd_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, float* s, float rcond, + lapack_int* rank, lapack_complex_float* work, + lapack_int lwork, float* rwork, + lapack_int* iwork ); +lapack_int LAPACKE_zgelsd_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, double* s, double rcond, + lapack_int* rank, lapack_complex_double* work, + lapack_int lwork, double* rwork, + lapack_int* iwork ); + +lapack_int LAPACKE_sgelss_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, + float* b, lapack_int ldb, float* s, float rcond, + lapack_int* rank, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dgelss_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + double* b, lapack_int ldb, double* s, + double rcond, lapack_int* rank, double* work, + lapack_int lwork ); +lapack_int LAPACKE_cgelss_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, float* s, float rcond, + lapack_int* rank, lapack_complex_float* work, + lapack_int lwork, float* rwork ); +lapack_int LAPACKE_zgelss_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, double* s, double rcond, + lapack_int* rank, lapack_complex_double* work, + lapack_int lwork, double* rwork ); + +lapack_int LAPACKE_sgelsy_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, + float* b, lapack_int ldb, lapack_int* jpvt, + float rcond, lapack_int* rank, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dgelsy_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + double* b, lapack_int ldb, lapack_int* jpvt, + double rcond, lapack_int* rank, double* work, + lapack_int lwork ); +lapack_int LAPACKE_cgelsy_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, lapack_int* jpvt, float rcond, + lapack_int* rank, lapack_complex_float* work, + lapack_int lwork, float* rwork ); +lapack_int LAPACKE_zgelsy_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_int* jpvt, double rcond, + lapack_int* rank, lapack_complex_double* work, + lapack_int lwork, double* rwork ); + +lapack_int LAPACKE_sgeqlf_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dgeqlf_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau, + double* work, lapack_int lwork ); +lapack_int LAPACKE_cgeqlf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zgeqlf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sgeqp3_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, lapack_int* jpvt, + float* tau, float* work, lapack_int lwork ); +lapack_int LAPACKE_dgeqp3_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, lapack_int* jpvt, + double* tau, double* work, lapack_int lwork ); +lapack_int LAPACKE_cgeqp3_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* jpvt, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork, + float* rwork ); +lapack_int LAPACKE_zgeqp3_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* jpvt, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork, + double* rwork ); + +lapack_int LAPACKE_sgeqpf_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, lapack_int* jpvt, + float* tau, float* work ); +lapack_int LAPACKE_dgeqpf_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, lapack_int* jpvt, + double* tau, double* work ); +lapack_int LAPACKE_cgeqpf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* jpvt, lapack_complex_float* tau, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zgeqpf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* jpvt, lapack_complex_double* tau, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sgeqr2_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau, + float* work ); +lapack_int LAPACKE_dgeqr2_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau, + double* work ); +lapack_int LAPACKE_cgeqr2_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau, + lapack_complex_float* work ); +lapack_int LAPACKE_zgeqr2_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau, + lapack_complex_double* work ); + +lapack_int LAPACKE_sgeqrf_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dgeqrf_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau, + double* work, lapack_int lwork ); +lapack_int LAPACKE_cgeqrf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zgeqrf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sgeqrfp_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dgeqrfp_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau, + double* work, lapack_int lwork ); +lapack_int LAPACKE_cgeqrfp_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zgeqrfp_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau, + lapack_complex_double* work, + lapack_int lwork ); + +lapack_int LAPACKE_sgerfs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const float* af, lapack_int ldaf, + const lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dgerfs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const double* a, + lapack_int lda, const double* af, + lapack_int ldaf, const lapack_int* ipiv, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* ferr, double* berr, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_cgerfs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zgerfs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sgerfsx_work( int matrix_order, char trans, char equed, + lapack_int n, lapack_int nrhs, const float* a, + lapack_int lda, const float* af, + lapack_int ldaf, const lapack_int* ipiv, + const float* r, const float* c, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dgerfsx_work( int matrix_order, char trans, char equed, + lapack_int n, lapack_int nrhs, const double* a, + lapack_int lda, const double* af, + lapack_int ldaf, const lapack_int* ipiv, + const double* r, const double* c, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cgerfsx_work( int matrix_order, char trans, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* af, + lapack_int ldaf, const lapack_int* ipiv, + const float* r, const float* c, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zgerfsx_work( int matrix_order, char trans, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* af, + lapack_int ldaf, const lapack_int* ipiv, + const double* r, const double* c, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_sgerqf_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dgerqf_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau, + double* work, lapack_int lwork ); +lapack_int LAPACKE_cgerqf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zgerqf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sgesdd_work( int matrix_order, char jobz, lapack_int m, + lapack_int n, float* a, lapack_int lda, + float* s, float* u, lapack_int ldu, float* vt, + lapack_int ldvt, float* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_dgesdd_work( int matrix_order, char jobz, lapack_int m, + lapack_int n, double* a, lapack_int lda, + double* s, double* u, lapack_int ldu, + double* vt, lapack_int ldvt, double* work, + lapack_int lwork, lapack_int* iwork ); +lapack_int LAPACKE_cgesdd_work( int matrix_order, char jobz, lapack_int m, + lapack_int n, lapack_complex_float* a, + lapack_int lda, float* s, + lapack_complex_float* u, lapack_int ldu, + lapack_complex_float* vt, lapack_int ldvt, + lapack_complex_float* work, lapack_int lwork, + float* rwork, lapack_int* iwork ); +lapack_int LAPACKE_zgesdd_work( int matrix_order, char jobz, lapack_int m, + lapack_int n, lapack_complex_double* a, + lapack_int lda, double* s, + lapack_complex_double* u, lapack_int ldu, + lapack_complex_double* vt, lapack_int ldvt, + lapack_complex_double* work, lapack_int lwork, + double* rwork, lapack_int* iwork ); + +lapack_int LAPACKE_sgesv_work( int matrix_order, lapack_int n, lapack_int nrhs, + float* a, lapack_int lda, lapack_int* ipiv, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dgesv_work( int matrix_order, lapack_int n, lapack_int nrhs, + double* a, lapack_int lda, lapack_int* ipiv, + double* b, lapack_int ldb ); +lapack_int LAPACKE_cgesv_work( int matrix_order, lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zgesv_work( int matrix_order, lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); +lapack_int LAPACKE_dsgesv_work( int matrix_order, lapack_int n, lapack_int nrhs, + double* a, lapack_int lda, lapack_int* ipiv, + double* b, lapack_int ldb, double* x, + lapack_int ldx, double* work, float* swork, + lapack_int* iter ); +lapack_int LAPACKE_zcgesv_work( int matrix_order, lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, lapack_complex_double* work, + lapack_complex_float* swork, double* rwork, + lapack_int* iter ); + +lapack_int LAPACKE_sgesvd_work( int matrix_order, char jobu, char jobvt, + lapack_int m, lapack_int n, float* a, + lapack_int lda, float* s, float* u, + lapack_int ldu, float* vt, lapack_int ldvt, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dgesvd_work( int matrix_order, char jobu, char jobvt, + lapack_int m, lapack_int n, double* a, + lapack_int lda, double* s, double* u, + lapack_int ldu, double* vt, lapack_int ldvt, + double* work, lapack_int lwork ); +lapack_int LAPACKE_cgesvd_work( int matrix_order, char jobu, char jobvt, + lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + float* s, lapack_complex_float* u, + lapack_int ldu, lapack_complex_float* vt, + lapack_int ldvt, lapack_complex_float* work, + lapack_int lwork, float* rwork ); +lapack_int LAPACKE_zgesvd_work( int matrix_order, char jobu, char jobvt, + lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + double* s, lapack_complex_double* u, + lapack_int ldu, lapack_complex_double* vt, + lapack_int ldvt, lapack_complex_double* work, + lapack_int lwork, double* rwork ); + +lapack_int LAPACKE_sgesvj_work( int matrix_order, char joba, char jobu, + char jobv, lapack_int m, lapack_int n, float* a, + lapack_int lda, float* sva, lapack_int mv, + float* v, lapack_int ldv, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dgesvj_work( int matrix_order, char joba, char jobu, + char jobv, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* sva, + lapack_int mv, double* v, lapack_int ldv, + double* work, lapack_int lwork ); + +lapack_int LAPACKE_sgesvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* r, + float* c, float* b, lapack_int ldb, float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr, float* work, lapack_int* iwork ); +lapack_int LAPACKE_dgesvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* r, + double* c, double* b, lapack_int ldb, double* x, + lapack_int ldx, double* rcond, double* ferr, + double* berr, double* work, lapack_int* iwork ); +lapack_int LAPACKE_cgesvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* r, + float* c, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zgesvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* r, + double* c, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* rcond, double* ferr, + double* berr, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_sgesvxx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* r, + float* c, float* b, lapack_int ldb, float* x, + lapack_int ldx, float* rcond, float* rpvgrw, + float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dgesvxx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* r, + double* c, double* b, lapack_int ldb, + double* x, lapack_int ldx, double* rcond, + double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cgesvxx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* r, + float* c, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* rpvgrw, + float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zgesvxx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* r, + double* c, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* rcond, double* rpvgrw, + double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sgetf2_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, lapack_int* ipiv ); +lapack_int LAPACKE_dgetf2_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, lapack_int* ipiv ); +lapack_int LAPACKE_cgetf2_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* ipiv ); +lapack_int LAPACKE_zgetf2_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv ); + +lapack_int LAPACKE_sgetrf_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, lapack_int* ipiv ); +lapack_int LAPACKE_dgetrf_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, lapack_int* ipiv ); +lapack_int LAPACKE_cgetrf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* ipiv ); +lapack_int LAPACKE_zgetrf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv ); + +lapack_int LAPACKE_sgetri_work( int matrix_order, lapack_int n, float* a, + lapack_int lda, const lapack_int* ipiv, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dgetri_work( int matrix_order, lapack_int n, double* a, + lapack_int lda, const lapack_int* ipiv, + double* work, lapack_int lwork ); +lapack_int LAPACKE_cgetri_work( int matrix_order, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zgetri_work( int matrix_order, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sgetrs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const lapack_int* ipiv, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dgetrs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const double* a, + lapack_int lda, const lapack_int* ipiv, + double* b, lapack_int ldb ); +lapack_int LAPACKE_cgetrs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zgetrs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_sggbak_work( int matrix_order, char job, char side, + lapack_int n, lapack_int ilo, lapack_int ihi, + const float* lscale, const float* rscale, + lapack_int m, float* v, lapack_int ldv ); +lapack_int LAPACKE_dggbak_work( int matrix_order, char job, char side, + lapack_int n, lapack_int ilo, lapack_int ihi, + const double* lscale, const double* rscale, + lapack_int m, double* v, lapack_int ldv ); +lapack_int LAPACKE_cggbak_work( int matrix_order, char job, char side, + lapack_int n, lapack_int ilo, lapack_int ihi, + const float* lscale, const float* rscale, + lapack_int m, lapack_complex_float* v, + lapack_int ldv ); +lapack_int LAPACKE_zggbak_work( int matrix_order, char job, char side, + lapack_int n, lapack_int ilo, lapack_int ihi, + const double* lscale, const double* rscale, + lapack_int m, lapack_complex_double* v, + lapack_int ldv ); + +lapack_int LAPACKE_sggbal_work( int matrix_order, char job, lapack_int n, + float* a, lapack_int lda, float* b, + lapack_int ldb, lapack_int* ilo, + lapack_int* ihi, float* lscale, float* rscale, + float* work ); +lapack_int LAPACKE_dggbal_work( int matrix_order, char job, lapack_int n, + double* a, lapack_int lda, double* b, + lapack_int ldb, lapack_int* ilo, + lapack_int* ihi, double* lscale, double* rscale, + double* work ); +lapack_int LAPACKE_cggbal_work( int matrix_order, char job, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_int* ilo, lapack_int* ihi, float* lscale, + float* rscale, float* work ); +lapack_int LAPACKE_zggbal_work( int matrix_order, char job, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_int* ilo, lapack_int* ihi, + double* lscale, double* rscale, double* work ); + +lapack_int LAPACKE_sgges_work( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_S_SELECT3 selctg, lapack_int n, + float* a, lapack_int lda, float* b, + lapack_int ldb, lapack_int* sdim, float* alphar, + float* alphai, float* beta, float* vsl, + lapack_int ldvsl, float* vsr, lapack_int ldvsr, + float* work, lapack_int lwork, + lapack_logical* bwork ); +lapack_int LAPACKE_dgges_work( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_D_SELECT3 selctg, lapack_int n, + double* a, lapack_int lda, double* b, + lapack_int ldb, lapack_int* sdim, double* alphar, + double* alphai, double* beta, double* vsl, + lapack_int ldvsl, double* vsr, lapack_int ldvsr, + double* work, lapack_int lwork, + lapack_logical* bwork ); +lapack_int LAPACKE_cgges_work( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_C_SELECT2 selctg, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_int* sdim, lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* vsl, lapack_int ldvsl, + lapack_complex_float* vsr, lapack_int ldvsr, + lapack_complex_float* work, lapack_int lwork, + float* rwork, lapack_logical* bwork ); +lapack_int LAPACKE_zgges_work( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_Z_SELECT2 selctg, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_int* sdim, lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* vsl, lapack_int ldvsl, + lapack_complex_double* vsr, lapack_int ldvsr, + lapack_complex_double* work, lapack_int lwork, + double* rwork, lapack_logical* bwork ); + +lapack_int LAPACKE_sggesx_work( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_S_SELECT3 selctg, char sense, + lapack_int n, float* a, lapack_int lda, + float* b, lapack_int ldb, lapack_int* sdim, + float* alphar, float* alphai, float* beta, + float* vsl, lapack_int ldvsl, float* vsr, + lapack_int ldvsr, float* rconde, float* rcondv, + float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork, + lapack_logical* bwork ); +lapack_int LAPACKE_dggesx_work( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_D_SELECT3 selctg, char sense, + lapack_int n, double* a, lapack_int lda, + double* b, lapack_int ldb, lapack_int* sdim, + double* alphar, double* alphai, double* beta, + double* vsl, lapack_int ldvsl, double* vsr, + lapack_int ldvsr, double* rconde, + double* rcondv, double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork, + lapack_logical* bwork ); +lapack_int LAPACKE_cggesx_work( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_C_SELECT2 selctg, char sense, + lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, lapack_int* sdim, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* vsl, lapack_int ldvsl, + lapack_complex_float* vsr, lapack_int ldvsr, + float* rconde, float* rcondv, + lapack_complex_float* work, lapack_int lwork, + float* rwork, lapack_int* iwork, + lapack_int liwork, lapack_logical* bwork ); +lapack_int LAPACKE_zggesx_work( int matrix_order, char jobvsl, char jobvsr, + char sort, LAPACK_Z_SELECT2 selctg, char sense, + lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_int* sdim, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* vsl, lapack_int ldvsl, + lapack_complex_double* vsr, lapack_int ldvsr, + double* rconde, double* rcondv, + lapack_complex_double* work, lapack_int lwork, + double* rwork, lapack_int* iwork, + lapack_int liwork, lapack_logical* bwork ); + +lapack_int LAPACKE_sggev_work( int matrix_order, char jobvl, char jobvr, + lapack_int n, float* a, lapack_int lda, float* b, + lapack_int ldb, float* alphar, float* alphai, + float* beta, float* vl, lapack_int ldvl, + float* vr, lapack_int ldvr, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dggev_work( int matrix_order, char jobvl, char jobvr, + lapack_int n, double* a, lapack_int lda, + double* b, lapack_int ldb, double* alphar, + double* alphai, double* beta, double* vl, + lapack_int ldvl, double* vr, lapack_int ldvr, + double* work, lapack_int lwork ); +lapack_int LAPACKE_cggev_work( int matrix_order, char jobvl, char jobvr, + lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* vl, lapack_int ldvl, + lapack_complex_float* vr, lapack_int ldvr, + lapack_complex_float* work, lapack_int lwork, + float* rwork ); +lapack_int LAPACKE_zggev_work( int matrix_order, char jobvl, char jobvr, + lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr, + lapack_complex_double* work, lapack_int lwork, + double* rwork ); + +lapack_int LAPACKE_sggevx_work( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, + float* alphar, float* alphai, float* beta, + float* vl, lapack_int ldvl, float* vr, + lapack_int ldvr, lapack_int* ilo, + lapack_int* ihi, float* lscale, float* rscale, + float* abnrm, float* bbnrm, float* rconde, + float* rcondv, float* work, lapack_int lwork, + lapack_int* iwork, lapack_logical* bwork ); +lapack_int LAPACKE_dggevx_work( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, + double* alphar, double* alphai, double* beta, + double* vl, lapack_int ldvl, double* vr, + lapack_int ldvr, lapack_int* ilo, + lapack_int* ihi, double* lscale, double* rscale, + double* abnrm, double* bbnrm, double* rconde, + double* rcondv, double* work, lapack_int lwork, + lapack_int* iwork, lapack_logical* bwork ); +lapack_int LAPACKE_cggevx_work( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* vl, lapack_int ldvl, + lapack_complex_float* vr, lapack_int ldvr, + lapack_int* ilo, lapack_int* ihi, float* lscale, + float* rscale, float* abnrm, float* bbnrm, + float* rconde, float* rcondv, + lapack_complex_float* work, lapack_int lwork, + float* rwork, lapack_int* iwork, + lapack_logical* bwork ); +lapack_int LAPACKE_zggevx_work( int matrix_order, char balanc, char jobvl, + char jobvr, char sense, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr, + lapack_int* ilo, lapack_int* ihi, + double* lscale, double* rscale, double* abnrm, + double* bbnrm, double* rconde, double* rcondv, + lapack_complex_double* work, lapack_int lwork, + double* rwork, lapack_int* iwork, + lapack_logical* bwork ); + +lapack_int LAPACKE_sggglm_work( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, float* a, lapack_int lda, + float* b, lapack_int ldb, float* d, float* x, + float* y, float* work, lapack_int lwork ); +lapack_int LAPACKE_dggglm_work( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, double* a, lapack_int lda, + double* b, lapack_int ldb, double* d, double* x, + double* y, double* work, lapack_int lwork ); +lapack_int LAPACKE_cggglm_work( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* d, + lapack_complex_float* x, + lapack_complex_float* y, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zggglm_work( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* d, + lapack_complex_double* x, + lapack_complex_double* y, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sgghrd_work( int matrix_order, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + float* a, lapack_int lda, float* b, + lapack_int ldb, float* q, lapack_int ldq, + float* z, lapack_int ldz ); +lapack_int LAPACKE_dgghrd_work( int matrix_order, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + double* a, lapack_int lda, double* b, + lapack_int ldb, double* q, lapack_int ldq, + double* z, lapack_int ldz ); +lapack_int LAPACKE_cgghrd_work( int matrix_order, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* z, lapack_int ldz ); +lapack_int LAPACKE_zgghrd_work( int matrix_order, char compq, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* z, lapack_int ldz ); + +lapack_int LAPACKE_sgglse_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int p, float* a, lapack_int lda, + float* b, lapack_int ldb, float* c, float* d, + float* x, float* work, lapack_int lwork ); +lapack_int LAPACKE_dgglse_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int p, double* a, lapack_int lda, + double* b, lapack_int ldb, double* c, double* d, + double* x, double* work, lapack_int lwork ); +lapack_int LAPACKE_cgglse_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int p, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* c, + lapack_complex_float* d, + lapack_complex_float* x, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zgglse_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int p, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* c, + lapack_complex_double* d, + lapack_complex_double* x, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sggqrf_work( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, float* a, lapack_int lda, + float* taua, float* b, lapack_int ldb, + float* taub, float* work, lapack_int lwork ); +lapack_int LAPACKE_dggqrf_work( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, double* a, lapack_int lda, + double* taua, double* b, lapack_int ldb, + double* taub, double* work, lapack_int lwork ); +lapack_int LAPACKE_cggqrf_work( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* taua, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* taub, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zggqrf_work( int matrix_order, lapack_int n, lapack_int m, + lapack_int p, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* taua, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* taub, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sggrqf_work( int matrix_order, lapack_int m, lapack_int p, + lapack_int n, float* a, lapack_int lda, + float* taua, float* b, lapack_int ldb, + float* taub, float* work, lapack_int lwork ); +lapack_int LAPACKE_dggrqf_work( int matrix_order, lapack_int m, lapack_int p, + lapack_int n, double* a, lapack_int lda, + double* taua, double* b, lapack_int ldb, + double* taub, double* work, lapack_int lwork ); +lapack_int LAPACKE_cggrqf_work( int matrix_order, lapack_int m, lapack_int p, + lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* taua, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* taub, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zggrqf_work( int matrix_order, lapack_int m, lapack_int p, + lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* taua, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* taub, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_sggsvd_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int n, + lapack_int p, lapack_int* k, lapack_int* l, + float* a, lapack_int lda, float* b, + lapack_int ldb, float* alpha, float* beta, + float* u, lapack_int ldu, float* v, + lapack_int ldv, float* q, lapack_int ldq, + float* work, lapack_int* iwork ); +lapack_int LAPACKE_dggsvd_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int n, + lapack_int p, lapack_int* k, lapack_int* l, + double* a, lapack_int lda, double* b, + lapack_int ldb, double* alpha, double* beta, + double* u, lapack_int ldu, double* v, + lapack_int ldv, double* q, lapack_int ldq, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_cggsvd_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int n, + lapack_int p, lapack_int* k, lapack_int* l, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + float* alpha, float* beta, + lapack_complex_float* u, lapack_int ldu, + lapack_complex_float* v, lapack_int ldv, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* work, float* rwork, + lapack_int* iwork ); +lapack_int LAPACKE_zggsvd_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int n, + lapack_int p, lapack_int* k, lapack_int* l, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + double* alpha, double* beta, + lapack_complex_double* u, lapack_int ldu, + lapack_complex_double* v, lapack_int ldv, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* work, double* rwork, + lapack_int* iwork ); + +lapack_int LAPACKE_sggsvp_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int p, + lapack_int n, float* a, lapack_int lda, + float* b, lapack_int ldb, float tola, + float tolb, lapack_int* k, lapack_int* l, + float* u, lapack_int ldu, float* v, + lapack_int ldv, float* q, lapack_int ldq, + lapack_int* iwork, float* tau, float* work ); +lapack_int LAPACKE_dggsvp_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int p, + lapack_int n, double* a, lapack_int lda, + double* b, lapack_int ldb, double tola, + double tolb, lapack_int* k, lapack_int* l, + double* u, lapack_int ldu, double* v, + lapack_int ldv, double* q, lapack_int ldq, + lapack_int* iwork, double* tau, double* work ); +lapack_int LAPACKE_cggsvp_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int p, + lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, float tola, float tolb, + lapack_int* k, lapack_int* l, + lapack_complex_float* u, lapack_int ldu, + lapack_complex_float* v, lapack_int ldv, + lapack_complex_float* q, lapack_int ldq, + lapack_int* iwork, float* rwork, + lapack_complex_float* tau, + lapack_complex_float* work ); +lapack_int LAPACKE_zggsvp_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int p, + lapack_int n, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, double tola, double tolb, + lapack_int* k, lapack_int* l, + lapack_complex_double* u, lapack_int ldu, + lapack_complex_double* v, lapack_int ldv, + lapack_complex_double* q, lapack_int ldq, + lapack_int* iwork, double* rwork, + lapack_complex_double* tau, + lapack_complex_double* work ); + +lapack_int LAPACKE_sgtcon_work( char norm, lapack_int n, const float* dl, + const float* d, const float* du, + const float* du2, const lapack_int* ipiv, + float anorm, float* rcond, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dgtcon_work( char norm, lapack_int n, const double* dl, + const double* d, const double* du, + const double* du2, const lapack_int* ipiv, + double anorm, double* rcond, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cgtcon_work( char norm, lapack_int n, + const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + const lapack_complex_float* du2, + const lapack_int* ipiv, float anorm, + float* rcond, lapack_complex_float* work ); +lapack_int LAPACKE_zgtcon_work( char norm, lapack_int n, + const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + const lapack_complex_double* du2, + const lapack_int* ipiv, double anorm, + double* rcond, lapack_complex_double* work ); + +lapack_int LAPACKE_sgtrfs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const float* dl, + const float* d, const float* du, + const float* dlf, const float* df, + const float* duf, const float* du2, + const lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dgtrfs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const double* dl, + const double* d, const double* du, + const double* dlf, const double* df, + const double* duf, const double* du2, + const lapack_int* ipiv, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* ferr, double* berr, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cgtrfs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + const lapack_complex_float* dlf, + const lapack_complex_float* df, + const lapack_complex_float* duf, + const lapack_complex_float* du2, + const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zgtrfs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, + const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + const lapack_complex_double* dlf, + const lapack_complex_double* df, + const lapack_complex_double* duf, + const lapack_complex_double* du2, + const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs, + float* dl, float* d, float* du, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs, + double* dl, double* d, double* du, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs, + lapack_complex_float* dl, + lapack_complex_float* d, + lapack_complex_float* du, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs, + lapack_complex_double* dl, + lapack_complex_double* d, + lapack_complex_double* du, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_sgtsvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, const float* dl, + const float* d, const float* du, float* dlf, + float* df, float* duf, float* du2, + lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + float* work, lapack_int* iwork ); +lapack_int LAPACKE_dgtsvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, const double* dl, + const double* d, const double* du, double* dlf, + double* df, double* duf, double* du2, + lapack_int* ipiv, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_cgtsvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + lapack_complex_float* dlf, + lapack_complex_float* df, + lapack_complex_float* duf, + lapack_complex_float* du2, lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zgtsvx_work( int matrix_order, char fact, char trans, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + lapack_complex_double* dlf, + lapack_complex_double* df, + lapack_complex_double* duf, + lapack_complex_double* du2, lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sgttrf_work( lapack_int n, float* dl, float* d, float* du, + float* du2, lapack_int* ipiv ); +lapack_int LAPACKE_dgttrf_work( lapack_int n, double* dl, double* d, double* du, + double* du2, lapack_int* ipiv ); +lapack_int LAPACKE_cgttrf_work( lapack_int n, lapack_complex_float* dl, + lapack_complex_float* d, + lapack_complex_float* du, + lapack_complex_float* du2, lapack_int* ipiv ); +lapack_int LAPACKE_zgttrf_work( lapack_int n, lapack_complex_double* dl, + lapack_complex_double* d, + lapack_complex_double* du, + lapack_complex_double* du2, lapack_int* ipiv ); + +lapack_int LAPACKE_sgttrs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const float* dl, + const float* d, const float* du, + const float* du2, const lapack_int* ipiv, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dgttrs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const double* dl, + const double* d, const double* du, + const double* du2, const lapack_int* ipiv, + double* b, lapack_int ldb ); +lapack_int LAPACKE_cgttrs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + const lapack_complex_float* du2, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zgttrs_work( int matrix_order, char trans, lapack_int n, + lapack_int nrhs, + const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + const lapack_complex_double* du2, + const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_chbev_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int kd, + lapack_complex_float* ab, lapack_int ldab, + float* w, lapack_complex_float* z, + lapack_int ldz, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zhbev_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int kd, + lapack_complex_double* ab, lapack_int ldab, + double* w, lapack_complex_double* z, + lapack_int ldz, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_chbevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int kd, + lapack_complex_float* ab, lapack_int ldab, + float* w, lapack_complex_float* z, + lapack_int ldz, lapack_complex_float* work, + lapack_int lwork, float* rwork, + lapack_int lrwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_zhbevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int kd, + lapack_complex_double* ab, lapack_int ldab, + double* w, lapack_complex_double* z, + lapack_int ldz, lapack_complex_double* work, + lapack_int lwork, double* rwork, + lapack_int lrwork, lapack_int* iwork, + lapack_int liwork ); + +lapack_int LAPACKE_chbevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, lapack_int kd, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* q, lapack_int ldq, + float vl, float vu, lapack_int il, + lapack_int iu, float abstol, lapack_int* m, + float* w, lapack_complex_float* z, + lapack_int ldz, lapack_complex_float* work, + float* rwork, lapack_int* iwork, + lapack_int* ifail ); +lapack_int LAPACKE_zhbevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, lapack_int kd, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* q, lapack_int ldq, + double vl, double vu, lapack_int il, + lapack_int iu, double abstol, lapack_int* m, + double* w, lapack_complex_double* z, + lapack_int ldz, lapack_complex_double* work, + double* rwork, lapack_int* iwork, + lapack_int* ifail ); + +lapack_int LAPACKE_chbgst_work( int matrix_order, char vect, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + lapack_complex_float* ab, lapack_int ldab, + const lapack_complex_float* bb, lapack_int ldbb, + lapack_complex_float* x, lapack_int ldx, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zhbgst_work( int matrix_order, char vect, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + lapack_complex_double* ab, lapack_int ldab, + const lapack_complex_double* bb, + lapack_int ldbb, lapack_complex_double* x, + lapack_int ldx, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_chbgv_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* bb, lapack_int ldbb, + float* w, lapack_complex_float* z, + lapack_int ldz, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zhbgv_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* bb, lapack_int ldbb, + double* w, lapack_complex_double* z, + lapack_int ldz, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_chbgvd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* bb, lapack_int ldbb, + float* w, lapack_complex_float* z, + lapack_int ldz, lapack_complex_float* work, + lapack_int lwork, float* rwork, + lapack_int lrwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_zhbgvd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* bb, lapack_int ldbb, + double* w, lapack_complex_double* z, + lapack_int ldz, lapack_complex_double* work, + lapack_int lwork, double* rwork, + lapack_int lrwork, lapack_int* iwork, + lapack_int liwork ); + +lapack_int LAPACKE_chbgvx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, lapack_int ka, + lapack_int kb, lapack_complex_float* ab, + lapack_int ldab, lapack_complex_float* bb, + lapack_int ldbb, lapack_complex_float* q, + lapack_int ldq, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_complex_float* work, float* rwork, + lapack_int* iwork, lapack_int* ifail ); +lapack_int LAPACKE_zhbgvx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, lapack_int ka, + lapack_int kb, lapack_complex_double* ab, + lapack_int ldab, lapack_complex_double* bb, + lapack_int ldbb, lapack_complex_double* q, + lapack_int ldq, double vl, double vu, + lapack_int il, lapack_int iu, double abstol, + lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_complex_double* work, double* rwork, + lapack_int* iwork, lapack_int* ifail ); + +lapack_int LAPACKE_chbtrd_work( int matrix_order, char vect, char uplo, + lapack_int n, lapack_int kd, + lapack_complex_float* ab, lapack_int ldab, + float* d, float* e, lapack_complex_float* q, + lapack_int ldq, lapack_complex_float* work ); +lapack_int LAPACKE_zhbtrd_work( int matrix_order, char vect, char uplo, + lapack_int n, lapack_int kd, + lapack_complex_double* ab, lapack_int ldab, + double* d, double* e, lapack_complex_double* q, + lapack_int ldq, lapack_complex_double* work ); + +lapack_int LAPACKE_checon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, float anorm, + float* rcond, lapack_complex_float* work ); +lapack_int LAPACKE_zhecon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, double anorm, + double* rcond, lapack_complex_double* work ); + +lapack_int LAPACKE_cheequb_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* s, float* scond, float* amax, + lapack_complex_float* work ); +lapack_int LAPACKE_zheequb_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* s, double* scond, double* amax, + lapack_complex_double* work ); + +lapack_int LAPACKE_cheev_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_complex_float* a, + lapack_int lda, float* w, + lapack_complex_float* work, lapack_int lwork, + float* rwork ); +lapack_int LAPACKE_zheev_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_complex_double* a, + lapack_int lda, double* w, + lapack_complex_double* work, lapack_int lwork, + double* rwork ); + +lapack_int LAPACKE_cheevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_complex_float* a, + lapack_int lda, float* w, + lapack_complex_float* work, lapack_int lwork, + float* rwork, lapack_int lrwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_zheevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_complex_double* a, + lapack_int lda, double* w, + lapack_complex_double* work, lapack_int lwork, + double* rwork, lapack_int lrwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_cheevr_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + float vl, float vu, lapack_int il, + lapack_int iu, float abstol, lapack_int* m, + float* w, lapack_complex_float* z, + lapack_int ldz, lapack_int* isuppz, + lapack_complex_float* work, lapack_int lwork, + float* rwork, lapack_int lrwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_zheevr_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + double vl, double vu, lapack_int il, + lapack_int iu, double abstol, lapack_int* m, + double* w, lapack_complex_double* z, + lapack_int ldz, lapack_int* isuppz, + lapack_complex_double* work, lapack_int lwork, + double* rwork, lapack_int lrwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_cheevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + float vl, float vu, lapack_int il, + lapack_int iu, float abstol, lapack_int* m, + float* w, lapack_complex_float* z, + lapack_int ldz, lapack_complex_float* work, + lapack_int lwork, float* rwork, + lapack_int* iwork, lapack_int* ifail ); +lapack_int LAPACKE_zheevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + double vl, double vu, lapack_int il, + lapack_int iu, double abstol, lapack_int* m, + double* w, lapack_complex_double* z, + lapack_int ldz, lapack_complex_double* work, + lapack_int lwork, double* rwork, + lapack_int* iwork, lapack_int* ifail ); + +lapack_int LAPACKE_chegst_work( int matrix_order, lapack_int itype, char uplo, + lapack_int n, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zhegst_work( int matrix_order, lapack_int itype, char uplo, + lapack_int n, lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_chegv_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb, float* w, + lapack_complex_float* work, lapack_int lwork, + float* rwork ); +lapack_int LAPACKE_zhegv_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + double* w, lapack_complex_double* work, + lapack_int lwork, double* rwork ); + +lapack_int LAPACKE_chegvd_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + float* w, lapack_complex_float* work, + lapack_int lwork, float* rwork, + lapack_int lrwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_zhegvd_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + double* w, lapack_complex_double* work, + lapack_int lwork, double* rwork, + lapack_int lrwork, lapack_int* iwork, + lapack_int liwork ); + +lapack_int LAPACKE_chegvx_work( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + float vl, float vu, lapack_int il, + lapack_int iu, float abstol, lapack_int* m, + float* w, lapack_complex_float* z, + lapack_int ldz, lapack_complex_float* work, + lapack_int lwork, float* rwork, + lapack_int* iwork, lapack_int* ifail ); +lapack_int LAPACKE_zhegvx_work( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + double vl, double vu, lapack_int il, + lapack_int iu, double abstol, lapack_int* m, + double* w, lapack_complex_double* z, + lapack_int ldz, lapack_complex_double* work, + lapack_int lwork, double* rwork, + lapack_int* iwork, lapack_int* ifail ); + +lapack_int LAPACKE_cherfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zherfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_cherfsx_work( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* af, + lapack_int ldaf, const lapack_int* ipiv, + const float* s, const lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zherfsx_work( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* af, + lapack_int ldaf, const lapack_int* ipiv, + const double* s, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_chesv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zhesv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_chesvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + lapack_int* ipiv, const lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr, lapack_complex_float* work, + lapack_int lwork, float* rwork ); +lapack_int LAPACKE_zhesvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, lapack_int lwork, + double* rwork ); + +lapack_int LAPACKE_chesvxx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* s, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zhesvxx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* s, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_chetrd_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + float* d, float* e, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zhetrd_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + double* d, double* e, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_chetrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* ipiv, lapack_complex_float* work, + lapack_int lwork ); +lapack_int LAPACKE_zhetrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv, lapack_complex_double* work, + lapack_int lwork ); + +lapack_int LAPACKE_chetri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_float* work ); +lapack_int LAPACKE_zhetri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_double* work ); + +lapack_int LAPACKE_chetrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zhetrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_chfrk_work( int matrix_order, char transr, char uplo, + char trans, lapack_int n, lapack_int k, + float alpha, const lapack_complex_float* a, + lapack_int lda, float beta, + lapack_complex_float* c ); +lapack_int LAPACKE_zhfrk_work( int matrix_order, char transr, char uplo, + char trans, lapack_int n, lapack_int k, + double alpha, const lapack_complex_double* a, + lapack_int lda, double beta, + lapack_complex_double* c ); + +lapack_int LAPACKE_shgeqz_work( int matrix_order, char job, char compq, + char compz, lapack_int n, lapack_int ilo, + lapack_int ihi, float* h, lapack_int ldh, + float* t, lapack_int ldt, float* alphar, + float* alphai, float* beta, float* q, + lapack_int ldq, float* z, lapack_int ldz, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dhgeqz_work( int matrix_order, char job, char compq, + char compz, lapack_int n, lapack_int ilo, + lapack_int ihi, double* h, lapack_int ldh, + double* t, lapack_int ldt, double* alphar, + double* alphai, double* beta, double* q, + lapack_int ldq, double* z, lapack_int ldz, + double* work, lapack_int lwork ); +lapack_int LAPACKE_chgeqz_work( int matrix_order, char job, char compq, + char compz, lapack_int n, lapack_int ilo, + lapack_int ihi, lapack_complex_float* h, + lapack_int ldh, lapack_complex_float* t, + lapack_int ldt, lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* z, lapack_int ldz, + lapack_complex_float* work, lapack_int lwork, + float* rwork ); +lapack_int LAPACKE_zhgeqz_work( int matrix_order, char job, char compq, + char compz, lapack_int n, lapack_int ilo, + lapack_int ihi, lapack_complex_double* h, + lapack_int ldh, lapack_complex_double* t, + lapack_int ldt, lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* z, lapack_int ldz, + lapack_complex_double* work, lapack_int lwork, + double* rwork ); + +lapack_int LAPACKE_chpcon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, + const lapack_int* ipiv, float anorm, + float* rcond, lapack_complex_float* work ); +lapack_int LAPACKE_zhpcon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, + const lapack_int* ipiv, double anorm, + double* rcond, lapack_complex_double* work ); + +lapack_int LAPACKE_chpev_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_complex_float* ap, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zhpev_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_complex_double* ap, + double* w, lapack_complex_double* z, + lapack_int ldz, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_chpevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_complex_float* ap, + float* w, lapack_complex_float* z, + lapack_int ldz, lapack_complex_float* work, + lapack_int lwork, float* rwork, + lapack_int lrwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_zhpevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_complex_double* ap, + double* w, lapack_complex_double* z, + lapack_int ldz, lapack_complex_double* work, + lapack_int lwork, double* rwork, + lapack_int lrwork, lapack_int* iwork, + lapack_int liwork ); + +lapack_int LAPACKE_chpevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, + lapack_complex_float* ap, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_complex_float* work, float* rwork, + lapack_int* iwork, lapack_int* ifail ); +lapack_int LAPACKE_zhpevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, + lapack_complex_double* ap, double vl, double vu, + lapack_int il, lapack_int iu, double abstol, + lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_complex_double* work, double* rwork, + lapack_int* iwork, lapack_int* ifail ); + +lapack_int LAPACKE_chpgst_work( int matrix_order, lapack_int itype, char uplo, + lapack_int n, lapack_complex_float* ap, + const lapack_complex_float* bp ); +lapack_int LAPACKE_zhpgst_work( int matrix_order, lapack_int itype, char uplo, + lapack_int n, lapack_complex_double* ap, + const lapack_complex_double* bp ); + +lapack_int LAPACKE_chpgv_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, + lapack_complex_float* ap, + lapack_complex_float* bp, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zhpgv_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, + lapack_complex_double* ap, + lapack_complex_double* bp, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_chpgvd_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, + lapack_complex_float* ap, + lapack_complex_float* bp, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_complex_float* work, lapack_int lwork, + float* rwork, lapack_int lrwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_zhpgvd_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, + lapack_complex_double* ap, + lapack_complex_double* bp, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_complex_double* work, lapack_int lwork, + double* rwork, lapack_int lrwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_chpgvx_work( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, + lapack_complex_float* ap, + lapack_complex_float* bp, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_complex_float* work, float* rwork, + lapack_int* iwork, lapack_int* ifail ); +lapack_int LAPACKE_zhpgvx_work( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, + lapack_complex_double* ap, + lapack_complex_double* bp, double vl, double vu, + lapack_int il, lapack_int iu, double abstol, + lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_complex_double* work, double* rwork, + lapack_int* iwork, lapack_int* ifail ); + +lapack_int LAPACKE_chprfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + const lapack_complex_float* afp, + const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zhprfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, + const lapack_complex_double* ap, + const lapack_complex_double* afp, + const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_chpsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* ap, + lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zhpsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* ap, + lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_chpsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* ap, + lapack_complex_float* afp, lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zhpsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* ap, + lapack_complex_double* afp, lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_chptrd_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap, float* d, float* e, + lapack_complex_float* tau ); +lapack_int LAPACKE_zhptrd_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap, double* d, double* e, + lapack_complex_double* tau ); + +lapack_int LAPACKE_chptrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap, lapack_int* ipiv ); +lapack_int LAPACKE_zhptrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap, lapack_int* ipiv ); + +lapack_int LAPACKE_chptri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap, + const lapack_int* ipiv, + lapack_complex_float* work ); +lapack_int LAPACKE_zhptri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap, + const lapack_int* ipiv, + lapack_complex_double* work ); + +lapack_int LAPACKE_chptrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zhptrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, + const lapack_complex_double* ap, + const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_shsein_work( int matrix_order, char job, char eigsrc, + char initv, lapack_logical* select, + lapack_int n, const float* h, lapack_int ldh, + float* wr, const float* wi, float* vl, + lapack_int ldvl, float* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, float* work, + lapack_int* ifaill, lapack_int* ifailr ); +lapack_int LAPACKE_dhsein_work( int matrix_order, char job, char eigsrc, + char initv, lapack_logical* select, + lapack_int n, const double* h, lapack_int ldh, + double* wr, const double* wi, double* vl, + lapack_int ldvl, double* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, double* work, + lapack_int* ifaill, lapack_int* ifailr ); +lapack_int LAPACKE_chsein_work( int matrix_order, char job, char eigsrc, + char initv, const lapack_logical* select, + lapack_int n, const lapack_complex_float* h, + lapack_int ldh, lapack_complex_float* w, + lapack_complex_float* vl, lapack_int ldvl, + lapack_complex_float* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, + lapack_complex_float* work, float* rwork, + lapack_int* ifaill, lapack_int* ifailr ); +lapack_int LAPACKE_zhsein_work( int matrix_order, char job, char eigsrc, + char initv, const lapack_logical* select, + lapack_int n, const lapack_complex_double* h, + lapack_int ldh, lapack_complex_double* w, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, + lapack_complex_double* work, double* rwork, + lapack_int* ifaill, lapack_int* ifailr ); + +lapack_int LAPACKE_shseqr_work( int matrix_order, char job, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + float* h, lapack_int ldh, float* wr, float* wi, + float* z, lapack_int ldz, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dhseqr_work( int matrix_order, char job, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + double* h, lapack_int ldh, double* wr, + double* wi, double* z, lapack_int ldz, + double* work, lapack_int lwork ); +lapack_int LAPACKE_chseqr_work( int matrix_order, char job, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + lapack_complex_float* h, lapack_int ldh, + lapack_complex_float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zhseqr_work( int matrix_order, char job, char compz, + lapack_int n, lapack_int ilo, lapack_int ihi, + lapack_complex_double* h, lapack_int ldh, + lapack_complex_double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_clacgv_work( lapack_int n, lapack_complex_float* x, + lapack_int incx ); +lapack_int LAPACKE_zlacgv_work( lapack_int n, lapack_complex_double* x, + lapack_int incx ); + +lapack_int LAPACKE_slacpy_work( int matrix_order, char uplo, lapack_int m, + lapack_int n, const float* a, lapack_int lda, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dlacpy_work( int matrix_order, char uplo, lapack_int m, + lapack_int n, const double* a, lapack_int lda, + double* b, lapack_int ldb ); +lapack_int LAPACKE_clacpy_work( int matrix_order, char uplo, lapack_int m, + lapack_int n, const lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zlacpy_work( int matrix_order, char uplo, lapack_int m, + lapack_int n, const lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_zlag2c_work( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + lapack_complex_float* sa, lapack_int ldsa ); + +lapack_int LAPACKE_slag2d_work( int matrix_order, lapack_int m, lapack_int n, + const float* sa, lapack_int ldsa, double* a, + lapack_int lda ); + +lapack_int LAPACKE_dlag2s_work( int matrix_order, lapack_int m, lapack_int n, + const double* a, lapack_int lda, float* sa, + lapack_int ldsa ); + +lapack_int LAPACKE_clag2z_work( int matrix_order, lapack_int m, lapack_int n, + const lapack_complex_float* sa, lapack_int ldsa, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_slagge_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const float* d, + float* a, lapack_int lda, lapack_int* iseed, + float* work ); +lapack_int LAPACKE_dlagge_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const double* d, + double* a, lapack_int lda, lapack_int* iseed, + double* work ); +lapack_int LAPACKE_clagge_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const float* d, + lapack_complex_float* a, lapack_int lda, + lapack_int* iseed, lapack_complex_float* work ); +lapack_int LAPACKE_zlagge_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int kl, lapack_int ku, const double* d, + lapack_complex_double* a, lapack_int lda, + lapack_int* iseed, + lapack_complex_double* work ); + +lapack_int LAPACKE_claghe_work( int matrix_order, lapack_int n, lapack_int k, + const float* d, lapack_complex_float* a, + lapack_int lda, lapack_int* iseed, + lapack_complex_float* work ); +lapack_int LAPACKE_zlaghe_work( int matrix_order, lapack_int n, lapack_int k, + const double* d, lapack_complex_double* a, + lapack_int lda, lapack_int* iseed, + lapack_complex_double* work ); + +lapack_int LAPACKE_slagsy_work( int matrix_order, lapack_int n, lapack_int k, + const float* d, float* a, lapack_int lda, + lapack_int* iseed, float* work ); +lapack_int LAPACKE_dlagsy_work( int matrix_order, lapack_int n, lapack_int k, + const double* d, double* a, lapack_int lda, + lapack_int* iseed, double* work ); +lapack_int LAPACKE_clagsy_work( int matrix_order, lapack_int n, lapack_int k, + const float* d, lapack_complex_float* a, + lapack_int lda, lapack_int* iseed, + lapack_complex_float* work ); +lapack_int LAPACKE_zlagsy_work( int matrix_order, lapack_int n, lapack_int k, + const double* d, lapack_complex_double* a, + lapack_int lda, lapack_int* iseed, + lapack_complex_double* work ); + +lapack_int LAPACKE_slapmr_work( int matrix_order, lapack_logical forwrd, + lapack_int m, lapack_int n, float* x, + lapack_int ldx, lapack_int* k ); +lapack_int LAPACKE_dlapmr_work( int matrix_order, lapack_logical forwrd, + lapack_int m, lapack_int n, double* x, + lapack_int ldx, lapack_int* k ); +lapack_int LAPACKE_clapmr_work( int matrix_order, lapack_logical forwrd, + lapack_int m, lapack_int n, + lapack_complex_float* x, lapack_int ldx, + lapack_int* k ); +lapack_int LAPACKE_zlapmr_work( int matrix_order, lapack_logical forwrd, + lapack_int m, lapack_int n, + lapack_complex_double* x, lapack_int ldx, + lapack_int* k ); + +lapack_int LAPACKE_slartgp_work( float f, float g, float* cs, float* sn, + float* r ); +lapack_int LAPACKE_dlartgp_work( double f, double g, double* cs, double* sn, + double* r ); + +lapack_int LAPACKE_slartgs_work( float x, float y, float sigma, float* cs, + float* sn ); +lapack_int LAPACKE_dlartgs_work( double x, double y, double sigma, double* cs, + double* sn ); + +float LAPACKE_slapy2_work( float x, float y ); +double LAPACKE_dlapy2_work( double x, double y ); + +float LAPACKE_slapy3_work( float x, float y, float z ); +double LAPACKE_dlapy3_work( double x, double y, double z ); + +float LAPACKE_slamch_work( char cmach ); +double LAPACKE_dlamch_work( char cmach ); + +float LAPACKE_slange_work( int matrix_order, char norm, lapack_int m, + lapack_int n, const float* a, lapack_int lda, + float* work ); +double LAPACKE_dlange_work( int matrix_order, char norm, lapack_int m, + lapack_int n, const double* a, lapack_int lda, + double* work ); +float LAPACKE_clange_work( int matrix_order, char norm, lapack_int m, + lapack_int n, const lapack_complex_float* a, + lapack_int lda, float* work ); +double LAPACKE_zlange_work( int matrix_order, char norm, lapack_int m, + lapack_int n, const lapack_complex_double* a, + lapack_int lda, double* work ); + +float LAPACKE_clanhe_work( int matrix_order, char norm, char uplo, + lapack_int n, const lapack_complex_float* a, + lapack_int lda, float* work ); +double LAPACKE_zlanhe_work( int matrix_order, char norm, char uplo, + lapack_int n, const lapack_complex_double* a, + lapack_int lda, double* work ); + +float LAPACKE_slansy_work( int matrix_order, char norm, char uplo, + lapack_int n, const float* a, lapack_int lda, + float* work ); +double LAPACKE_dlansy_work( int matrix_order, char norm, char uplo, + lapack_int n, const double* a, lapack_int lda, + double* work ); +float LAPACKE_clansy_work( int matrix_order, char norm, char uplo, + lapack_int n, const lapack_complex_float* a, + lapack_int lda, float* work ); +double LAPACKE_zlansy_work( int matrix_order, char norm, char uplo, + lapack_int n, const lapack_complex_double* a, + lapack_int lda, double* work ); + +float LAPACKE_slantr_work( int matrix_order, char norm, char uplo, + char diag, lapack_int m, lapack_int n, const float* a, + lapack_int lda, float* work ); +double LAPACKE_dlantr_work( int matrix_order, char norm, char uplo, + char diag, lapack_int m, lapack_int n, + const double* a, lapack_int lda, double* work ); +float LAPACKE_clantr_work( int matrix_order, char norm, char uplo, + char diag, lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* work ); +double LAPACKE_zlantr_work( int matrix_order, char norm, char uplo, + char diag, lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* work ); + +lapack_int LAPACKE_slarfb_work( int matrix_order, char side, char trans, + char direct, char storev, lapack_int m, + lapack_int n, lapack_int k, const float* v, + lapack_int ldv, const float* t, lapack_int ldt, + float* c, lapack_int ldc, float* work, + lapack_int ldwork ); +lapack_int LAPACKE_dlarfb_work( int matrix_order, char side, char trans, + char direct, char storev, lapack_int m, + lapack_int n, lapack_int k, const double* v, + lapack_int ldv, const double* t, lapack_int ldt, + double* c, lapack_int ldc, double* work, + lapack_int ldwork ); +lapack_int LAPACKE_clarfb_work( int matrix_order, char side, char trans, + char direct, char storev, lapack_int m, + lapack_int n, lapack_int k, + const lapack_complex_float* v, lapack_int ldv, + const lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work, lapack_int ldwork ); +lapack_int LAPACKE_zlarfb_work( int matrix_order, char side, char trans, + char direct, char storev, lapack_int m, + lapack_int n, lapack_int k, + const lapack_complex_double* v, lapack_int ldv, + const lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work, + lapack_int ldwork ); + +lapack_int LAPACKE_slarfg_work( lapack_int n, float* alpha, float* x, + lapack_int incx, float* tau ); +lapack_int LAPACKE_dlarfg_work( lapack_int n, double* alpha, double* x, + lapack_int incx, double* tau ); +lapack_int LAPACKE_clarfg_work( lapack_int n, lapack_complex_float* alpha, + lapack_complex_float* x, lapack_int incx, + lapack_complex_float* tau ); +lapack_int LAPACKE_zlarfg_work( lapack_int n, lapack_complex_double* alpha, + lapack_complex_double* x, lapack_int incx, + lapack_complex_double* tau ); + +lapack_int LAPACKE_slarft_work( int matrix_order, char direct, char storev, + lapack_int n, lapack_int k, const float* v, + lapack_int ldv, const float* tau, float* t, + lapack_int ldt ); +lapack_int LAPACKE_dlarft_work( int matrix_order, char direct, char storev, + lapack_int n, lapack_int k, const double* v, + lapack_int ldv, const double* tau, double* t, + lapack_int ldt ); +lapack_int LAPACKE_clarft_work( int matrix_order, char direct, char storev, + lapack_int n, lapack_int k, + const lapack_complex_float* v, lapack_int ldv, + const lapack_complex_float* tau, + lapack_complex_float* t, lapack_int ldt ); +lapack_int LAPACKE_zlarft_work( int matrix_order, char direct, char storev, + lapack_int n, lapack_int k, + const lapack_complex_double* v, lapack_int ldv, + const lapack_complex_double* tau, + lapack_complex_double* t, lapack_int ldt ); + +lapack_int LAPACKE_slarfx_work( int matrix_order, char side, lapack_int m, + lapack_int n, const float* v, float tau, + float* c, lapack_int ldc, float* work ); +lapack_int LAPACKE_dlarfx_work( int matrix_order, char side, lapack_int m, + lapack_int n, const double* v, double tau, + double* c, lapack_int ldc, double* work ); +lapack_int LAPACKE_clarfx_work( int matrix_order, char side, lapack_int m, + lapack_int n, const lapack_complex_float* v, + lapack_complex_float tau, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work ); +lapack_int LAPACKE_zlarfx_work( int matrix_order, char side, lapack_int m, + lapack_int n, const lapack_complex_double* v, + lapack_complex_double tau, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work ); + +lapack_int LAPACKE_slarnv_work( lapack_int idist, lapack_int* iseed, + lapack_int n, float* x ); +lapack_int LAPACKE_dlarnv_work( lapack_int idist, lapack_int* iseed, + lapack_int n, double* x ); +lapack_int LAPACKE_clarnv_work( lapack_int idist, lapack_int* iseed, + lapack_int n, lapack_complex_float* x ); +lapack_int LAPACKE_zlarnv_work( lapack_int idist, lapack_int* iseed, + lapack_int n, lapack_complex_double* x ); + +lapack_int LAPACKE_slaset_work( int matrix_order, char uplo, lapack_int m, + lapack_int n, float alpha, float beta, float* a, + lapack_int lda ); +lapack_int LAPACKE_dlaset_work( int matrix_order, char uplo, lapack_int m, + lapack_int n, double alpha, double beta, + double* a, lapack_int lda ); +lapack_int LAPACKE_claset_work( int matrix_order, char uplo, lapack_int m, + lapack_int n, lapack_complex_float alpha, + lapack_complex_float beta, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_zlaset_work( int matrix_order, char uplo, lapack_int m, + lapack_int n, lapack_complex_double alpha, + lapack_complex_double beta, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_slasrt_work( char id, lapack_int n, float* d ); +lapack_int LAPACKE_dlasrt_work( char id, lapack_int n, double* d ); + +lapack_int LAPACKE_slaswp_work( int matrix_order, lapack_int n, float* a, + lapack_int lda, lapack_int k1, lapack_int k2, + const lapack_int* ipiv, lapack_int incx ); +lapack_int LAPACKE_dlaswp_work( int matrix_order, lapack_int n, double* a, + lapack_int lda, lapack_int k1, lapack_int k2, + const lapack_int* ipiv, lapack_int incx ); +lapack_int LAPACKE_claswp_work( int matrix_order, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int k1, lapack_int k2, + const lapack_int* ipiv, lapack_int incx ); +lapack_int LAPACKE_zlaswp_work( int matrix_order, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int k1, lapack_int k2, + const lapack_int* ipiv, lapack_int incx ); + +lapack_int LAPACKE_slatms_work( int matrix_order, lapack_int m, lapack_int n, + char dist, lapack_int* iseed, char sym, + float* d, lapack_int mode, float cond, + float dmax, lapack_int kl, lapack_int ku, + char pack, float* a, lapack_int lda, + float* work ); +lapack_int LAPACKE_dlatms_work( int matrix_order, lapack_int m, lapack_int n, + char dist, lapack_int* iseed, char sym, + double* d, lapack_int mode, double cond, + double dmax, lapack_int kl, lapack_int ku, + char pack, double* a, lapack_int lda, + double* work ); +lapack_int LAPACKE_clatms_work( int matrix_order, lapack_int m, lapack_int n, + char dist, lapack_int* iseed, char sym, + float* d, lapack_int mode, float cond, + float dmax, lapack_int kl, lapack_int ku, + char pack, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* work ); +lapack_int LAPACKE_zlatms_work( int matrix_order, lapack_int m, lapack_int n, + char dist, lapack_int* iseed, char sym, + double* d, lapack_int mode, double cond, + double dmax, lapack_int kl, lapack_int ku, + char pack, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* work ); + +lapack_int LAPACKE_slauum_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda ); +lapack_int LAPACKE_dlauum_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda ); +lapack_int LAPACKE_clauum_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_zlauum_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_sopgtr_work( int matrix_order, char uplo, lapack_int n, + const float* ap, const float* tau, float* q, + lapack_int ldq, float* work ); +lapack_int LAPACKE_dopgtr_work( int matrix_order, char uplo, lapack_int n, + const double* ap, const double* tau, double* q, + lapack_int ldq, double* work ); + +lapack_int LAPACKE_sopmtr_work( int matrix_order, char side, char uplo, + char trans, lapack_int m, lapack_int n, + const float* ap, const float* tau, float* c, + lapack_int ldc, float* work ); +lapack_int LAPACKE_dopmtr_work( int matrix_order, char side, char uplo, + char trans, lapack_int m, lapack_int n, + const double* ap, const double* tau, double* c, + lapack_int ldc, double* work ); + +lapack_int LAPACKE_sorgbr_work( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int k, float* a, + lapack_int lda, const float* tau, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dorgbr_work( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int k, double* a, + lapack_int lda, const double* tau, double* work, + lapack_int lwork ); + +lapack_int LAPACKE_sorghr_work( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, float* a, lapack_int lda, + const float* tau, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dorghr_work( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, double* a, lapack_int lda, + const double* tau, double* work, + lapack_int lwork ); + +lapack_int LAPACKE_sorglq_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, float* a, lapack_int lda, + const float* tau, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dorglq_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, double* a, lapack_int lda, + const double* tau, double* work, + lapack_int lwork ); + +lapack_int LAPACKE_sorgql_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, float* a, lapack_int lda, + const float* tau, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dorgql_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, double* a, lapack_int lda, + const double* tau, double* work, + lapack_int lwork ); + +lapack_int LAPACKE_sorgqr_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, float* a, lapack_int lda, + const float* tau, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dorgqr_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, double* a, lapack_int lda, + const double* tau, double* work, + lapack_int lwork ); + +lapack_int LAPACKE_sorgrq_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, float* a, lapack_int lda, + const float* tau, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dorgrq_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, double* a, lapack_int lda, + const double* tau, double* work, + lapack_int lwork ); + +lapack_int LAPACKE_sorgtr_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda, const float* tau, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dorgtr_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda, const double* tau, + double* work, lapack_int lwork ); + +lapack_int LAPACKE_sormbr_work( int matrix_order, char vect, char side, + char trans, lapack_int m, lapack_int n, + lapack_int k, const float* a, lapack_int lda, + const float* tau, float* c, lapack_int ldc, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dormbr_work( int matrix_order, char vect, char side, + char trans, lapack_int m, lapack_int n, + lapack_int k, const double* a, lapack_int lda, + const double* tau, double* c, lapack_int ldc, + double* work, lapack_int lwork ); + +lapack_int LAPACKE_sormhr_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int ilo, + lapack_int ihi, const float* a, lapack_int lda, + const float* tau, float* c, lapack_int ldc, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dormhr_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int ilo, + lapack_int ihi, const double* a, lapack_int lda, + const double* tau, double* c, lapack_int ldc, + double* work, lapack_int lwork ); + +lapack_int LAPACKE_sormlq_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const float* a, lapack_int lda, + const float* tau, float* c, lapack_int ldc, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dormlq_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const double* a, lapack_int lda, + const double* tau, double* c, lapack_int ldc, + double* work, lapack_int lwork ); + +lapack_int LAPACKE_sormql_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const float* a, lapack_int lda, + const float* tau, float* c, lapack_int ldc, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dormql_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const double* a, lapack_int lda, + const double* tau, double* c, lapack_int ldc, + double* work, lapack_int lwork ); + +lapack_int LAPACKE_sormqr_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const float* a, lapack_int lda, + const float* tau, float* c, lapack_int ldc, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dormqr_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const double* a, lapack_int lda, + const double* tau, double* c, lapack_int ldc, + double* work, lapack_int lwork ); + +lapack_int LAPACKE_sormrq_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const float* a, lapack_int lda, + const float* tau, float* c, lapack_int ldc, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dormrq_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const double* a, lapack_int lda, + const double* tau, double* c, lapack_int ldc, + double* work, lapack_int lwork ); + +lapack_int LAPACKE_sormrz_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, const float* a, lapack_int lda, + const float* tau, float* c, lapack_int ldc, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dormrz_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, const double* a, lapack_int lda, + const double* tau, double* c, lapack_int ldc, + double* work, lapack_int lwork ); + +lapack_int LAPACKE_sormtr_work( int matrix_order, char side, char uplo, + char trans, lapack_int m, lapack_int n, + const float* a, lapack_int lda, + const float* tau, float* c, lapack_int ldc, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dormtr_work( int matrix_order, char side, char uplo, + char trans, lapack_int m, lapack_int n, + const double* a, lapack_int lda, + const double* tau, double* c, lapack_int ldc, + double* work, lapack_int lwork ); + +lapack_int LAPACKE_spbcon_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const float* ab, lapack_int ldab, + float anorm, float* rcond, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dpbcon_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const double* ab, + lapack_int ldab, double anorm, double* rcond, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_cpbcon_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const lapack_complex_float* ab, + lapack_int ldab, float anorm, float* rcond, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zpbcon_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const lapack_complex_double* ab, + lapack_int ldab, double anorm, double* rcond, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_spbequ_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const float* ab, lapack_int ldab, + float* s, float* scond, float* amax ); +lapack_int LAPACKE_dpbequ_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const double* ab, + lapack_int ldab, double* s, double* scond, + double* amax ); +lapack_int LAPACKE_cpbequ_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const lapack_complex_float* ab, + lapack_int ldab, float* s, float* scond, + float* amax ); +lapack_int LAPACKE_zpbequ_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, const lapack_complex_double* ab, + lapack_int ldab, double* s, double* scond, + double* amax ); + +lapack_int LAPACKE_spbrfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, const float* ab, + lapack_int ldab, const float* afb, + lapack_int ldafb, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dpbrfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + const double* ab, lapack_int ldab, + const double* afb, lapack_int ldafb, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* ferr, double* berr, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_cpbrfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + const lapack_complex_float* ab, lapack_int ldab, + const lapack_complex_float* afb, + lapack_int ldafb, const lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zpbrfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + const lapack_complex_double* ab, + lapack_int ldab, + const lapack_complex_double* afb, + lapack_int ldafb, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_spbstf_work( int matrix_order, char uplo, lapack_int n, + lapack_int kb, float* bb, lapack_int ldbb ); +lapack_int LAPACKE_dpbstf_work( int matrix_order, char uplo, lapack_int n, + lapack_int kb, double* bb, lapack_int ldbb ); +lapack_int LAPACKE_cpbstf_work( int matrix_order, char uplo, lapack_int n, + lapack_int kb, lapack_complex_float* bb, + lapack_int ldbb ); +lapack_int LAPACKE_zpbstf_work( int matrix_order, char uplo, lapack_int n, + lapack_int kb, lapack_complex_double* bb, + lapack_int ldbb ); + +lapack_int LAPACKE_spbsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, float* ab, + lapack_int ldab, float* b, lapack_int ldb ); +lapack_int LAPACKE_dpbsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, double* ab, + lapack_int ldab, double* b, lapack_int ldb ); +lapack_int LAPACKE_cpbsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zpbsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_spbsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int kd, lapack_int nrhs, + float* ab, lapack_int ldab, float* afb, + lapack_int ldafb, char* equed, float* s, + float* b, lapack_int ldb, float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr, float* work, lapack_int* iwork ); +lapack_int LAPACKE_dpbsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int kd, lapack_int nrhs, + double* ab, lapack_int ldab, double* afb, + lapack_int ldafb, char* equed, double* s, + double* b, lapack_int ldb, double* x, + lapack_int ldx, double* rcond, double* ferr, + double* berr, double* work, lapack_int* iwork ); +lapack_int LAPACKE_cpbsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int kd, lapack_int nrhs, + lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* afb, lapack_int ldafb, + char* equed, float* s, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zpbsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int kd, lapack_int nrhs, + lapack_complex_double* ab, lapack_int ldab, + lapack_complex_double* afb, lapack_int ldafb, + char* equed, double* s, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_spbtrf_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, float* ab, lapack_int ldab ); +lapack_int LAPACKE_dpbtrf_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, double* ab, lapack_int ldab ); +lapack_int LAPACKE_cpbtrf_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_complex_float* ab, + lapack_int ldab ); +lapack_int LAPACKE_zpbtrf_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_complex_double* ab, + lapack_int ldab ); + +lapack_int LAPACKE_spbtrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, const float* ab, + lapack_int ldab, float* b, lapack_int ldb ); +lapack_int LAPACKE_dpbtrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + const double* ab, lapack_int ldab, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cpbtrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + const lapack_complex_float* ab, lapack_int ldab, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zpbtrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int kd, lapack_int nrhs, + const lapack_complex_double* ab, + lapack_int ldab, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_spftrf_work( int matrix_order, char transr, char uplo, + lapack_int n, float* a ); +lapack_int LAPACKE_dpftrf_work( int matrix_order, char transr, char uplo, + lapack_int n, double* a ); +lapack_int LAPACKE_cpftrf_work( int matrix_order, char transr, char uplo, + lapack_int n, lapack_complex_float* a ); +lapack_int LAPACKE_zpftrf_work( int matrix_order, char transr, char uplo, + lapack_int n, lapack_complex_double* a ); + +lapack_int LAPACKE_spftri_work( int matrix_order, char transr, char uplo, + lapack_int n, float* a ); +lapack_int LAPACKE_dpftri_work( int matrix_order, char transr, char uplo, + lapack_int n, double* a ); +lapack_int LAPACKE_cpftri_work( int matrix_order, char transr, char uplo, + lapack_int n, lapack_complex_float* a ); +lapack_int LAPACKE_zpftri_work( int matrix_order, char transr, char uplo, + lapack_int n, lapack_complex_double* a ); + +lapack_int LAPACKE_spftrs_work( int matrix_order, char transr, char uplo, + lapack_int n, lapack_int nrhs, const float* a, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dpftrs_work( int matrix_order, char transr, char uplo, + lapack_int n, lapack_int nrhs, const double* a, + double* b, lapack_int ldb ); +lapack_int LAPACKE_cpftrs_work( int matrix_order, char transr, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zpftrs_work( int matrix_order, char transr, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_spocon_work( int matrix_order, char uplo, lapack_int n, + const float* a, lapack_int lda, float anorm, + float* rcond, float* work, lapack_int* iwork ); +lapack_int LAPACKE_dpocon_work( int matrix_order, char uplo, lapack_int n, + const double* a, lapack_int lda, double anorm, + double* rcond, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cpocon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float anorm, float* rcond, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zpocon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double anorm, double* rcond, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_spoequ_work( int matrix_order, lapack_int n, const float* a, + lapack_int lda, float* s, float* scond, + float* amax ); +lapack_int LAPACKE_dpoequ_work( int matrix_order, lapack_int n, const double* a, + lapack_int lda, double* s, double* scond, + double* amax ); +lapack_int LAPACKE_cpoequ_work( int matrix_order, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* s, float* scond, float* amax ); +lapack_int LAPACKE_zpoequ_work( int matrix_order, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* s, double* scond, double* amax ); + +lapack_int LAPACKE_spoequb_work( int matrix_order, lapack_int n, const float* a, + lapack_int lda, float* s, float* scond, + float* amax ); +lapack_int LAPACKE_dpoequb_work( int matrix_order, lapack_int n, + const double* a, lapack_int lda, double* s, + double* scond, double* amax ); +lapack_int LAPACKE_cpoequb_work( int matrix_order, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* s, float* scond, float* amax ); +lapack_int LAPACKE_zpoequb_work( int matrix_order, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* s, double* scond, double* amax ); + +lapack_int LAPACKE_sporfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const float* af, lapack_int ldaf, + const float* b, lapack_int ldb, float* x, + lapack_int ldx, float* ferr, float* berr, + float* work, lapack_int* iwork ); +lapack_int LAPACKE_dporfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* a, + lapack_int lda, const double* af, + lapack_int ldaf, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* ferr, double* berr, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cporfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* af, + lapack_int ldaf, const lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zporfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* af, + lapack_int ldaf, const lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sporfsx_work( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, const float* a, + lapack_int lda, const float* af, + lapack_int ldaf, const float* s, + const float* b, lapack_int ldb, float* x, + lapack_int ldx, float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dporfsx_work( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, const double* a, + lapack_int lda, const double* af, + lapack_int ldaf, const double* s, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cporfsx_work( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* af, + lapack_int ldaf, const float* s, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zporfsx_work( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* af, + lapack_int ldaf, const double* s, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_sposv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dposv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + double* b, lapack_int ldb ); +lapack_int LAPACKE_cposv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zposv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb ); +lapack_int LAPACKE_dsposv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + double* b, lapack_int ldb, double* x, + lapack_int ldx, double* work, float* swork, + lapack_int* iter ); +lapack_int LAPACKE_zcposv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, lapack_complex_double* work, + lapack_complex_float* swork, double* rwork, + lapack_int* iter ); + +lapack_int LAPACKE_sposvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* af, lapack_int ldaf, + char* equed, float* s, float* b, lapack_int ldb, + float* x, lapack_int ldx, float* rcond, + float* ferr, float* berr, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dposvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* af, lapack_int ldaf, + char* equed, double* s, double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_cposvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + char* equed, float* s, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zposvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + char* equed, double* s, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sposvxx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* af, lapack_int ldaf, + char* equed, float* s, float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dposvxx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* af, lapack_int ldaf, + char* equed, double* s, double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cposvxx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + char* equed, float* s, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* rpvgrw, + float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zposvxx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + char* equed, double* s, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_spotrf_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda ); +lapack_int LAPACKE_dpotrf_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda ); +lapack_int LAPACKE_cpotrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_zpotrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_spotri_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda ); +lapack_int LAPACKE_dpotri_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda ); +lapack_int LAPACKE_cpotri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_zpotri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_spotrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dpotrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* a, + lapack_int lda, double* b, lapack_int ldb ); +lapack_int LAPACKE_cpotrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zpotrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_sppcon_work( int matrix_order, char uplo, lapack_int n, + const float* ap, float anorm, float* rcond, + float* work, lapack_int* iwork ); +lapack_int LAPACKE_dppcon_work( int matrix_order, char uplo, lapack_int n, + const double* ap, double anorm, double* rcond, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_cppcon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, float anorm, + float* rcond, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zppcon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, double anorm, + double* rcond, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_sppequ_work( int matrix_order, char uplo, lapack_int n, + const float* ap, float* s, float* scond, + float* amax ); +lapack_int LAPACKE_dppequ_work( int matrix_order, char uplo, lapack_int n, + const double* ap, double* s, double* scond, + double* amax ); +lapack_int LAPACKE_cppequ_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, float* s, + float* scond, float* amax ); +lapack_int LAPACKE_zppequ_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, double* s, + double* scond, double* amax ); + +lapack_int LAPACKE_spprfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* ap, + const float* afp, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dpprfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* ap, + const double* afp, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* ferr, double* berr, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cpprfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + const lapack_complex_float* afp, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zpprfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, + const lapack_complex_double* ap, + const lapack_complex_double* afp, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sppsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, float* ap, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dppsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, double* ap, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cppsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* ap, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zppsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* ap, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_sppsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, float* ap, + float* afp, char* equed, float* s, float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + float* work, lapack_int* iwork ); +lapack_int LAPACKE_dppsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, double* ap, + double* afp, char* equed, double* s, double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_cppsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_float* ap, + lapack_complex_float* afp, char* equed, + float* s, lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zppsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_double* ap, + lapack_complex_double* afp, char* equed, + double* s, lapack_complex_double* b, + lapack_int ldb, lapack_complex_double* x, + lapack_int ldx, double* rcond, double* ferr, + double* berr, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_spptrf_work( int matrix_order, char uplo, lapack_int n, + float* ap ); +lapack_int LAPACKE_dpptrf_work( int matrix_order, char uplo, lapack_int n, + double* ap ); +lapack_int LAPACKE_cpptrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap ); +lapack_int LAPACKE_zpptrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap ); + +lapack_int LAPACKE_spptri_work( int matrix_order, char uplo, lapack_int n, + float* ap ); +lapack_int LAPACKE_dpptri_work( int matrix_order, char uplo, lapack_int n, + double* ap ); +lapack_int LAPACKE_cpptri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap ); +lapack_int LAPACKE_zpptri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap ); + +lapack_int LAPACKE_spptrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* ap, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dpptrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* ap, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cpptrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zpptrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, + const lapack_complex_double* ap, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_spstrf_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda, lapack_int* piv, + lapack_int* rank, float tol, float* work ); +lapack_int LAPACKE_dpstrf_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda, lapack_int* piv, + lapack_int* rank, double tol, double* work ); +lapack_int LAPACKE_cpstrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* piv, lapack_int* rank, float tol, + float* work ); +lapack_int LAPACKE_zpstrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* piv, lapack_int* rank, double tol, + double* work ); + +lapack_int LAPACKE_sptcon_work( lapack_int n, const float* d, const float* e, + float anorm, float* rcond, float* work ); +lapack_int LAPACKE_dptcon_work( lapack_int n, const double* d, const double* e, + double anorm, double* rcond, double* work ); +lapack_int LAPACKE_cptcon_work( lapack_int n, const float* d, + const lapack_complex_float* e, float anorm, + float* rcond, float* work ); +lapack_int LAPACKE_zptcon_work( lapack_int n, const double* d, + const lapack_complex_double* e, double anorm, + double* rcond, double* work ); + +lapack_int LAPACKE_spteqr_work( int matrix_order, char compz, lapack_int n, + float* d, float* e, float* z, lapack_int ldz, + float* work ); +lapack_int LAPACKE_dpteqr_work( int matrix_order, char compz, lapack_int n, + double* d, double* e, double* z, lapack_int ldz, + double* work ); +lapack_int LAPACKE_cpteqr_work( int matrix_order, char compz, lapack_int n, + float* d, float* e, lapack_complex_float* z, + lapack_int ldz, float* work ); +lapack_int LAPACKE_zpteqr_work( int matrix_order, char compz, lapack_int n, + double* d, double* e, lapack_complex_double* z, + lapack_int ldz, double* work ); + +lapack_int LAPACKE_sptrfs_work( int matrix_order, lapack_int n, lapack_int nrhs, + const float* d, const float* e, const float* df, + const float* ef, const float* b, lapack_int ldb, + float* x, lapack_int ldx, float* ferr, + float* berr, float* work ); +lapack_int LAPACKE_dptrfs_work( int matrix_order, lapack_int n, lapack_int nrhs, + const double* d, const double* e, + const double* df, const double* ef, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* ferr, double* berr, + double* work ); +lapack_int LAPACKE_cptrfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* d, + const lapack_complex_float* e, const float* df, + const lapack_complex_float* ef, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zptrfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* d, + const lapack_complex_double* e, + const double* df, + const lapack_complex_double* ef, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sptsv_work( int matrix_order, lapack_int n, lapack_int nrhs, + float* d, float* e, float* b, lapack_int ldb ); +lapack_int LAPACKE_dptsv_work( int matrix_order, lapack_int n, lapack_int nrhs, + double* d, double* e, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cptsv_work( int matrix_order, lapack_int n, lapack_int nrhs, + float* d, lapack_complex_float* e, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zptsv_work( int matrix_order, lapack_int n, lapack_int nrhs, + double* d, lapack_complex_double* e, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_sptsvx_work( int matrix_order, char fact, lapack_int n, + lapack_int nrhs, const float* d, const float* e, + float* df, float* ef, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + float* work ); +lapack_int LAPACKE_dptsvx_work( int matrix_order, char fact, lapack_int n, + lapack_int nrhs, const double* d, + const double* e, double* df, double* ef, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* rcond, double* ferr, + double* berr, double* work ); +lapack_int LAPACKE_cptsvx_work( int matrix_order, char fact, lapack_int n, + lapack_int nrhs, const float* d, + const lapack_complex_float* e, float* df, + lapack_complex_float* ef, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zptsvx_work( int matrix_order, char fact, lapack_int n, + lapack_int nrhs, const double* d, + const lapack_complex_double* e, double* df, + lapack_complex_double* ef, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_spttrf_work( lapack_int n, float* d, float* e ); +lapack_int LAPACKE_dpttrf_work( lapack_int n, double* d, double* e ); +lapack_int LAPACKE_cpttrf_work( lapack_int n, float* d, + lapack_complex_float* e ); +lapack_int LAPACKE_zpttrf_work( lapack_int n, double* d, + lapack_complex_double* e ); + +lapack_int LAPACKE_spttrs_work( int matrix_order, lapack_int n, lapack_int nrhs, + const float* d, const float* e, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dpttrs_work( int matrix_order, lapack_int n, lapack_int nrhs, + const double* d, const double* e, double* b, + lapack_int ldb ); +lapack_int LAPACKE_cpttrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* d, + const lapack_complex_float* e, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zpttrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* d, + const lapack_complex_double* e, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_ssbev_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int kd, float* ab, + lapack_int ldab, float* w, float* z, + lapack_int ldz, float* work ); +lapack_int LAPACKE_dsbev_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int kd, double* ab, + lapack_int ldab, double* w, double* z, + lapack_int ldz, double* work ); + +lapack_int LAPACKE_ssbevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int kd, float* ab, + lapack_int ldab, float* w, float* z, + lapack_int ldz, float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_dsbevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int kd, double* ab, + lapack_int ldab, double* w, double* z, + lapack_int ldz, double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_ssbevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, lapack_int kd, + float* ab, lapack_int ldab, float* q, + lapack_int ldq, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, + lapack_int ldz, float* work, lapack_int* iwork, + lapack_int* ifail ); +lapack_int LAPACKE_dsbevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, lapack_int kd, + double* ab, lapack_int ldab, double* q, + lapack_int ldq, double vl, double vu, + lapack_int il, lapack_int iu, double abstol, + lapack_int* m, double* w, double* z, + lapack_int ldz, double* work, lapack_int* iwork, + lapack_int* ifail ); + +lapack_int LAPACKE_ssbgst_work( int matrix_order, char vect, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + float* ab, lapack_int ldab, const float* bb, + lapack_int ldbb, float* x, lapack_int ldx, + float* work ); +lapack_int LAPACKE_dsbgst_work( int matrix_order, char vect, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + double* ab, lapack_int ldab, const double* bb, + lapack_int ldbb, double* x, lapack_int ldx, + double* work ); + +lapack_int LAPACKE_ssbgv_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + float* ab, lapack_int ldab, float* bb, + lapack_int ldbb, float* w, float* z, + lapack_int ldz, float* work ); +lapack_int LAPACKE_dsbgv_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + double* ab, lapack_int ldab, double* bb, + lapack_int ldbb, double* w, double* z, + lapack_int ldz, double* work ); + +lapack_int LAPACKE_ssbgvd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + float* ab, lapack_int ldab, float* bb, + lapack_int ldbb, float* w, float* z, + lapack_int ldz, float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_dsbgvd_work( int matrix_order, char jobz, char uplo, + lapack_int n, lapack_int ka, lapack_int kb, + double* ab, lapack_int ldab, double* bb, + lapack_int ldbb, double* w, double* z, + lapack_int ldz, double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_ssbgvx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, lapack_int ka, + lapack_int kb, float* ab, lapack_int ldab, + float* bb, lapack_int ldbb, float* q, + lapack_int ldq, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, + lapack_int ldz, float* work, lapack_int* iwork, + lapack_int* ifail ); +lapack_int LAPACKE_dsbgvx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, lapack_int ka, + lapack_int kb, double* ab, lapack_int ldab, + double* bb, lapack_int ldbb, double* q, + lapack_int ldq, double vl, double vu, + lapack_int il, lapack_int iu, double abstol, + lapack_int* m, double* w, double* z, + lapack_int ldz, double* work, lapack_int* iwork, + lapack_int* ifail ); + +lapack_int LAPACKE_ssbtrd_work( int matrix_order, char vect, char uplo, + lapack_int n, lapack_int kd, float* ab, + lapack_int ldab, float* d, float* e, float* q, + lapack_int ldq, float* work ); +lapack_int LAPACKE_dsbtrd_work( int matrix_order, char vect, char uplo, + lapack_int n, lapack_int kd, double* ab, + lapack_int ldab, double* d, double* e, + double* q, lapack_int ldq, double* work ); + +lapack_int LAPACKE_ssfrk_work( int matrix_order, char transr, char uplo, + char trans, lapack_int n, lapack_int k, + float alpha, const float* a, lapack_int lda, + float beta, float* c ); +lapack_int LAPACKE_dsfrk_work( int matrix_order, char transr, char uplo, + char trans, lapack_int n, lapack_int k, + double alpha, const double* a, lapack_int lda, + double beta, double* c ); + +lapack_int LAPACKE_sspcon_work( int matrix_order, char uplo, lapack_int n, + const float* ap, const lapack_int* ipiv, + float anorm, float* rcond, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dspcon_work( int matrix_order, char uplo, lapack_int n, + const double* ap, const lapack_int* ipiv, + double anorm, double* rcond, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_cspcon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, + const lapack_int* ipiv, float anorm, + float* rcond, lapack_complex_float* work ); +lapack_int LAPACKE_zspcon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, + const lapack_int* ipiv, double anorm, + double* rcond, lapack_complex_double* work ); + +lapack_int LAPACKE_sspev_work( int matrix_order, char jobz, char uplo, + lapack_int n, float* ap, float* w, float* z, + lapack_int ldz, float* work ); +lapack_int LAPACKE_dspev_work( int matrix_order, char jobz, char uplo, + lapack_int n, double* ap, double* w, double* z, + lapack_int ldz, double* work ); + +lapack_int LAPACKE_sspevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, float* ap, float* w, float* z, + lapack_int ldz, float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_dspevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, double* ap, double* w, double* z, + lapack_int ldz, double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_sspevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, float* ap, float vl, + float vu, lapack_int il, lapack_int iu, + float abstol, lapack_int* m, float* w, float* z, + lapack_int ldz, float* work, lapack_int* iwork, + lapack_int* ifail ); +lapack_int LAPACKE_dspevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, double* ap, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + double* z, lapack_int ldz, double* work, + lapack_int* iwork, lapack_int* ifail ); + +lapack_int LAPACKE_sspgst_work( int matrix_order, lapack_int itype, char uplo, + lapack_int n, float* ap, const float* bp ); +lapack_int LAPACKE_dspgst_work( int matrix_order, lapack_int itype, char uplo, + lapack_int n, double* ap, const double* bp ); + +lapack_int LAPACKE_sspgv_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, float* ap, float* bp, + float* w, float* z, lapack_int ldz, + float* work ); +lapack_int LAPACKE_dspgv_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, double* ap, double* bp, + double* w, double* z, lapack_int ldz, + double* work ); + +lapack_int LAPACKE_sspgvd_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, float* ap, float* bp, + float* w, float* z, lapack_int ldz, float* work, + lapack_int lwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_dspgvd_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, double* ap, double* bp, + double* w, double* z, lapack_int ldz, + double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_sspgvx_work( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, float* ap, + float* bp, float vl, float vu, lapack_int il, + lapack_int iu, float abstol, lapack_int* m, + float* w, float* z, lapack_int ldz, float* work, + lapack_int* iwork, lapack_int* ifail ); +lapack_int LAPACKE_dspgvx_work( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, double* ap, + double* bp, double vl, double vu, lapack_int il, + lapack_int iu, double abstol, lapack_int* m, + double* w, double* z, lapack_int ldz, + double* work, lapack_int* iwork, + lapack_int* ifail ); + +lapack_int LAPACKE_ssprfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* ap, + const float* afp, const lapack_int* ipiv, + const float* b, lapack_int ldb, float* x, + lapack_int ldx, float* ferr, float* berr, + float* work, lapack_int* iwork ); +lapack_int LAPACKE_dsprfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* ap, + const double* afp, const lapack_int* ipiv, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* ferr, double* berr, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_csprfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + const lapack_complex_float* afp, + const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zsprfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, + const lapack_complex_double* ap, + const lapack_complex_double* afp, + const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_sspsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, float* ap, lapack_int* ipiv, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dspsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, double* ap, lapack_int* ipiv, + double* b, lapack_int ldb ); +lapack_int LAPACKE_cspsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* ap, + lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zspsv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* ap, + lapack_int* ipiv, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_sspsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, const float* ap, + float* afp, lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + float* work, lapack_int* iwork ); +lapack_int LAPACKE_dspsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, const double* ap, + double* afp, lapack_int* ipiv, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_cspsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* ap, + lapack_complex_float* afp, lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zspsvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* ap, + lapack_complex_double* afp, lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_ssptrd_work( int matrix_order, char uplo, lapack_int n, + float* ap, float* d, float* e, float* tau ); +lapack_int LAPACKE_dsptrd_work( int matrix_order, char uplo, lapack_int n, + double* ap, double* d, double* e, double* tau ); + +lapack_int LAPACKE_ssptrf_work( int matrix_order, char uplo, lapack_int n, + float* ap, lapack_int* ipiv ); +lapack_int LAPACKE_dsptrf_work( int matrix_order, char uplo, lapack_int n, + double* ap, lapack_int* ipiv ); +lapack_int LAPACKE_csptrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap, lapack_int* ipiv ); +lapack_int LAPACKE_zsptrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap, lapack_int* ipiv ); + +lapack_int LAPACKE_ssptri_work( int matrix_order, char uplo, lapack_int n, + float* ap, const lapack_int* ipiv, + float* work ); +lapack_int LAPACKE_dsptri_work( int matrix_order, char uplo, lapack_int n, + double* ap, const lapack_int* ipiv, + double* work ); +lapack_int LAPACKE_csptri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* ap, + const lapack_int* ipiv, + lapack_complex_float* work ); +lapack_int LAPACKE_zsptri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* ap, + const lapack_int* ipiv, + lapack_complex_double* work ); + +lapack_int LAPACKE_ssptrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* ap, + const lapack_int* ipiv, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dsptrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* ap, + const lapack_int* ipiv, double* b, + lapack_int ldb ); +lapack_int LAPACKE_csptrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* ap, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_zsptrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, + const lapack_complex_double* ap, + const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_sstebz_work( char range, char order, lapack_int n, float vl, + float vu, lapack_int il, lapack_int iu, + float abstol, const float* d, const float* e, + lapack_int* m, lapack_int* nsplit, float* w, + lapack_int* iblock, lapack_int* isplit, + float* work, lapack_int* iwork ); +lapack_int LAPACKE_dstebz_work( char range, char order, lapack_int n, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, const double* d, const double* e, + lapack_int* m, lapack_int* nsplit, double* w, + lapack_int* iblock, lapack_int* isplit, + double* work, lapack_int* iwork ); + +lapack_int LAPACKE_sstedc_work( int matrix_order, char compz, lapack_int n, + float* d, float* e, float* z, lapack_int ldz, + float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_dstedc_work( int matrix_order, char compz, lapack_int n, + double* d, double* e, double* z, lapack_int ldz, + double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_cstedc_work( int matrix_order, char compz, lapack_int n, + float* d, float* e, lapack_complex_float* z, + lapack_int ldz, lapack_complex_float* work, + lapack_int lwork, float* rwork, + lapack_int lrwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_zstedc_work( int matrix_order, char compz, lapack_int n, + double* d, double* e, lapack_complex_double* z, + lapack_int ldz, lapack_complex_double* work, + lapack_int lwork, double* rwork, + lapack_int lrwork, lapack_int* iwork, + lapack_int liwork ); + +lapack_int LAPACKE_sstegr_work( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, + float vu, lapack_int il, lapack_int iu, + float abstol, lapack_int* m, float* w, float* z, + lapack_int ldz, lapack_int* isuppz, float* work, + lapack_int lwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_dstegr_work( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + double* z, lapack_int ldz, lapack_int* isuppz, + double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_cstegr_work( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, + float vu, lapack_int il, lapack_int iu, + float abstol, lapack_int* m, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_int* isuppz, float* work, + lapack_int lwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_zstegr_work( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_int* isuppz, double* work, + lapack_int lwork, lapack_int* iwork, + lapack_int liwork ); + +lapack_int LAPACKE_sstein_work( int matrix_order, lapack_int n, const float* d, + const float* e, lapack_int m, const float* w, + const lapack_int* iblock, + const lapack_int* isplit, float* z, + lapack_int ldz, float* work, lapack_int* iwork, + lapack_int* ifailv ); +lapack_int LAPACKE_dstein_work( int matrix_order, lapack_int n, const double* d, + const double* e, lapack_int m, const double* w, + const lapack_int* iblock, + const lapack_int* isplit, double* z, + lapack_int ldz, double* work, lapack_int* iwork, + lapack_int* ifailv ); +lapack_int LAPACKE_cstein_work( int matrix_order, lapack_int n, const float* d, + const float* e, lapack_int m, const float* w, + const lapack_int* iblock, + const lapack_int* isplit, + lapack_complex_float* z, lapack_int ldz, + float* work, lapack_int* iwork, + lapack_int* ifailv ); +lapack_int LAPACKE_zstein_work( int matrix_order, lapack_int n, const double* d, + const double* e, lapack_int m, const double* w, + const lapack_int* iblock, + const lapack_int* isplit, + lapack_complex_double* z, lapack_int ldz, + double* work, lapack_int* iwork, + lapack_int* ifailv ); + +lapack_int LAPACKE_sstemr_work( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, + float vu, lapack_int il, lapack_int iu, + lapack_int* m, float* w, float* z, + lapack_int ldz, lapack_int nzc, + lapack_int* isuppz, lapack_logical* tryrac, + float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_dstemr_work( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + lapack_int* m, double* w, double* z, + lapack_int ldz, lapack_int nzc, + lapack_int* isuppz, lapack_logical* tryrac, + double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_cstemr_work( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, + float vu, lapack_int il, lapack_int iu, + lapack_int* m, float* w, + lapack_complex_float* z, lapack_int ldz, + lapack_int nzc, lapack_int* isuppz, + lapack_logical* tryrac, float* work, + lapack_int lwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_zstemr_work( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + lapack_int* m, double* w, + lapack_complex_double* z, lapack_int ldz, + lapack_int nzc, lapack_int* isuppz, + lapack_logical* tryrac, double* work, + lapack_int lwork, lapack_int* iwork, + lapack_int liwork ); + +lapack_int LAPACKE_ssteqr_work( int matrix_order, char compz, lapack_int n, + float* d, float* e, float* z, lapack_int ldz, + float* work ); +lapack_int LAPACKE_dsteqr_work( int matrix_order, char compz, lapack_int n, + double* d, double* e, double* z, lapack_int ldz, + double* work ); +lapack_int LAPACKE_csteqr_work( int matrix_order, char compz, lapack_int n, + float* d, float* e, lapack_complex_float* z, + lapack_int ldz, float* work ); +lapack_int LAPACKE_zsteqr_work( int matrix_order, char compz, lapack_int n, + double* d, double* e, lapack_complex_double* z, + lapack_int ldz, double* work ); + +lapack_int LAPACKE_ssterf_work( lapack_int n, float* d, float* e ); +lapack_int LAPACKE_dsterf_work( lapack_int n, double* d, double* e ); + +lapack_int LAPACKE_sstev_work( int matrix_order, char jobz, lapack_int n, + float* d, float* e, float* z, lapack_int ldz, + float* work ); +lapack_int LAPACKE_dstev_work( int matrix_order, char jobz, lapack_int n, + double* d, double* e, double* z, lapack_int ldz, + double* work ); + +lapack_int LAPACKE_sstevd_work( int matrix_order, char jobz, lapack_int n, + float* d, float* e, float* z, lapack_int ldz, + float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_dstevd_work( int matrix_order, char jobz, lapack_int n, + double* d, double* e, double* z, lapack_int ldz, + double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_sstevr_work( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, + float vu, lapack_int il, lapack_int iu, + float abstol, lapack_int* m, float* w, float* z, + lapack_int ldz, lapack_int* isuppz, float* work, + lapack_int lwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_dstevr_work( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + double* z, lapack_int ldz, lapack_int* isuppz, + double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_sstevx_work( int matrix_order, char jobz, char range, + lapack_int n, float* d, float* e, float vl, + float vu, lapack_int il, lapack_int iu, + float abstol, lapack_int* m, float* w, float* z, + lapack_int ldz, float* work, lapack_int* iwork, + lapack_int* ifail ); +lapack_int LAPACKE_dstevx_work( int matrix_order, char jobz, char range, + lapack_int n, double* d, double* e, double vl, + double vu, lapack_int il, lapack_int iu, + double abstol, lapack_int* m, double* w, + double* z, lapack_int ldz, double* work, + lapack_int* iwork, lapack_int* ifail ); + +lapack_int LAPACKE_ssycon_work( int matrix_order, char uplo, lapack_int n, + const float* a, lapack_int lda, + const lapack_int* ipiv, float anorm, + float* rcond, float* work, lapack_int* iwork ); +lapack_int LAPACKE_dsycon_work( int matrix_order, char uplo, lapack_int n, + const double* a, lapack_int lda, + const lapack_int* ipiv, double anorm, + double* rcond, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_csycon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, float anorm, + float* rcond, lapack_complex_float* work ); +lapack_int LAPACKE_zsycon_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, double anorm, + double* rcond, lapack_complex_double* work ); + +lapack_int LAPACKE_ssyequb_work( int matrix_order, char uplo, lapack_int n, + const float* a, lapack_int lda, float* s, + float* scond, float* amax, float* work ); +lapack_int LAPACKE_dsyequb_work( int matrix_order, char uplo, lapack_int n, + const double* a, lapack_int lda, double* s, + double* scond, double* amax, double* work ); +lapack_int LAPACKE_csyequb_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* s, float* scond, float* amax, + lapack_complex_float* work ); +lapack_int LAPACKE_zsyequb_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* s, double* scond, double* amax, + lapack_complex_double* work ); + +lapack_int LAPACKE_ssyev_work( int matrix_order, char jobz, char uplo, + lapack_int n, float* a, lapack_int lda, float* w, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dsyev_work( int matrix_order, char jobz, char uplo, + lapack_int n, double* a, lapack_int lda, + double* w, double* work, lapack_int lwork ); + +lapack_int LAPACKE_ssyevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, float* a, lapack_int lda, + float* w, float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_dsyevd_work( int matrix_order, char jobz, char uplo, + lapack_int n, double* a, lapack_int lda, + double* w, double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_ssyevr_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, float* a, + lapack_int lda, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, + lapack_int ldz, lapack_int* isuppz, float* work, + lapack_int lwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_dsyevr_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, double* a, + lapack_int lda, double vl, double vu, + lapack_int il, lapack_int iu, double abstol, + lapack_int* m, double* w, double* z, + lapack_int ldz, lapack_int* isuppz, + double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_ssyevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, float* a, + lapack_int lda, float vl, float vu, + lapack_int il, lapack_int iu, float abstol, + lapack_int* m, float* w, float* z, + lapack_int ldz, float* work, lapack_int lwork, + lapack_int* iwork, lapack_int* ifail ); +lapack_int LAPACKE_dsyevx_work( int matrix_order, char jobz, char range, + char uplo, lapack_int n, double* a, + lapack_int lda, double vl, double vu, + lapack_int il, lapack_int iu, double abstol, + lapack_int* m, double* w, double* z, + lapack_int ldz, double* work, lapack_int lwork, + lapack_int* iwork, lapack_int* ifail ); + +lapack_int LAPACKE_ssygst_work( int matrix_order, lapack_int itype, char uplo, + lapack_int n, float* a, lapack_int lda, + const float* b, lapack_int ldb ); +lapack_int LAPACKE_dsygst_work( int matrix_order, lapack_int itype, char uplo, + lapack_int n, double* a, lapack_int lda, + const double* b, lapack_int ldb ); + +lapack_int LAPACKE_ssygv_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, + float* w, float* work, lapack_int lwork ); +lapack_int LAPACKE_dsygv_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, + double* w, double* work, lapack_int lwork ); + +lapack_int LAPACKE_ssygvd_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, + float* w, float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_dsygvd_work( int matrix_order, lapack_int itype, char jobz, + char uplo, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, + double* w, double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); + +lapack_int LAPACKE_ssygvx_work( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, + float vl, float vu, lapack_int il, + lapack_int iu, float abstol, lapack_int* m, + float* w, float* z, lapack_int ldz, float* work, + lapack_int lwork, lapack_int* iwork, + lapack_int* ifail ); +lapack_int LAPACKE_dsygvx_work( int matrix_order, lapack_int itype, char jobz, + char range, char uplo, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, + double vl, double vu, lapack_int il, + lapack_int iu, double abstol, lapack_int* m, + double* w, double* z, lapack_int ldz, + double* work, lapack_int lwork, + lapack_int* iwork, lapack_int* ifail ); + +lapack_int LAPACKE_ssyrfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const float* af, lapack_int ldaf, + const lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* ferr, float* berr, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dsyrfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* a, + lapack_int lda, const double* af, + lapack_int ldaf, const lapack_int* ipiv, + const double* b, lapack_int ldb, double* x, + lapack_int ldx, double* ferr, double* berr, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_csyrfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_zsyrfs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_complex_double* af, + lapack_int ldaf, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_ssyrfsx_work( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, const float* a, + lapack_int lda, const float* af, + lapack_int ldaf, const lapack_int* ipiv, + const float* s, const float* b, lapack_int ldb, + float* x, lapack_int ldx, float* rcond, + float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dsyrfsx_work( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, const double* a, + lapack_int lda, const double* af, + lapack_int ldaf, const lapack_int* ipiv, + const double* s, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_csyrfsx_work( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* af, + lapack_int ldaf, const lapack_int* ipiv, + const float* s, const lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zsyrfsx_work( int matrix_order, char uplo, char equed, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* af, + lapack_int ldaf, const lapack_int* ipiv, + const double* s, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_ssysv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, float* a, lapack_int lda, + lapack_int* ipiv, float* b, lapack_int ldb, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dsysv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, double* a, lapack_int lda, + lapack_int* ipiv, double* b, lapack_int ldb, + double* work, lapack_int lwork ); +lapack_int LAPACKE_csysv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_float* a, + lapack_int lda, lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zsysv_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, lapack_complex_double* a, + lapack_int lda, lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_ssysvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, const float* a, + lapack_int lda, float* af, lapack_int ldaf, + lapack_int* ipiv, const float* b, + lapack_int ldb, float* x, lapack_int ldx, + float* rcond, float* ferr, float* berr, + float* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_dsysvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, const double* a, + lapack_int lda, double* af, lapack_int ldaf, + lapack_int* ipiv, const double* b, + lapack_int ldb, double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + double* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_csysvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + lapack_int* ipiv, const lapack_complex_float* b, + lapack_int ldb, lapack_complex_float* x, + lapack_int ldx, float* rcond, float* ferr, + float* berr, lapack_complex_float* work, + lapack_int lwork, float* rwork ); +lapack_int LAPACKE_zsysvx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + lapack_int* ipiv, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, lapack_int lwork, + double* rwork ); + +lapack_int LAPACKE_ssysvxx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, float* a, + lapack_int lda, float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* s, + float* b, lapack_int ldb, float* x, + lapack_int ldx, float* rcond, float* rpvgrw, + float* berr, lapack_int n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int nparams, float* params, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dsysvxx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, double* a, + lapack_int lda, double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* s, + double* b, lapack_int ldb, double* x, + lapack_int ldx, double* rcond, double* rpvgrw, + double* berr, lapack_int n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int nparams, double* params, + double* work, lapack_int* iwork ); +lapack_int LAPACKE_csysvxx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, float* s, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* x, lapack_int ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int nparams, + float* params, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_zsysvxx_work( int matrix_order, char fact, char uplo, + lapack_int n, lapack_int nrhs, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* af, lapack_int ldaf, + lapack_int* ipiv, char* equed, double* s, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* x, lapack_int ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int nparams, + double* params, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_ssytrd_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda, float* d, float* e, + float* tau, float* work, lapack_int lwork ); +lapack_int LAPACKE_dsytrd_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda, double* d, double* e, + double* tau, double* work, lapack_int lwork ); + +lapack_int LAPACKE_ssytrf_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda, lapack_int* ipiv, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dsytrf_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda, lapack_int* ipiv, + double* work, lapack_int lwork ); +lapack_int LAPACKE_csytrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_int* ipiv, lapack_complex_float* work, + lapack_int lwork ); +lapack_int LAPACKE_zsytrf_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_int* ipiv, lapack_complex_double* work, + lapack_int lwork ); + +lapack_int LAPACKE_ssytri_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda, + const lapack_int* ipiv, float* work ); +lapack_int LAPACKE_dsytri_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda, + const lapack_int* ipiv, double* work ); +lapack_int LAPACKE_csytri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_float* work ); +lapack_int LAPACKE_zsytri_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_double* work ); + +lapack_int LAPACKE_ssytrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const lapack_int* ipiv, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dsytrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* a, + lapack_int lda, const lapack_int* ipiv, + double* b, lapack_int ldb ); +lapack_int LAPACKE_csytrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_zsytrs_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_stbcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, lapack_int kd, + const float* ab, lapack_int ldab, float* rcond, + float* work, lapack_int* iwork ); +lapack_int LAPACKE_dtbcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, lapack_int kd, + const double* ab, lapack_int ldab, + double* rcond, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_ctbcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, lapack_int kd, + const lapack_complex_float* ab, lapack_int ldab, + float* rcond, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_ztbcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, lapack_int kd, + const lapack_complex_double* ab, + lapack_int ldab, double* rcond, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_stbrfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int kd, + lapack_int nrhs, const float* ab, + lapack_int ldab, const float* b, lapack_int ldb, + const float* x, lapack_int ldx, float* ferr, + float* berr, float* work, lapack_int* iwork ); +lapack_int LAPACKE_dtbrfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int kd, + lapack_int nrhs, const double* ab, + lapack_int ldab, const double* b, + lapack_int ldb, const double* x, lapack_int ldx, + double* ferr, double* berr, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_ctbrfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int kd, + lapack_int nrhs, const lapack_complex_float* ab, + lapack_int ldab, const lapack_complex_float* b, + lapack_int ldb, const lapack_complex_float* x, + lapack_int ldx, float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_ztbrfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int kd, + lapack_int nrhs, + const lapack_complex_double* ab, + lapack_int ldab, const lapack_complex_double* b, + lapack_int ldb, const lapack_complex_double* x, + lapack_int ldx, double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_stbtrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int kd, + lapack_int nrhs, const float* ab, + lapack_int ldab, float* b, lapack_int ldb ); +lapack_int LAPACKE_dtbtrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int kd, + lapack_int nrhs, const double* ab, + lapack_int ldab, double* b, lapack_int ldb ); +lapack_int LAPACKE_ctbtrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int kd, + lapack_int nrhs, const lapack_complex_float* ab, + lapack_int ldab, lapack_complex_float* b, + lapack_int ldb ); +lapack_int LAPACKE_ztbtrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int kd, + lapack_int nrhs, + const lapack_complex_double* ab, + lapack_int ldab, lapack_complex_double* b, + lapack_int ldb ); + +lapack_int LAPACKE_stfsm_work( int matrix_order, char transr, char side, + char uplo, char trans, char diag, lapack_int m, + lapack_int n, float alpha, const float* a, + float* b, lapack_int ldb ); +lapack_int LAPACKE_dtfsm_work( int matrix_order, char transr, char side, + char uplo, char trans, char diag, lapack_int m, + lapack_int n, double alpha, const double* a, + double* b, lapack_int ldb ); +lapack_int LAPACKE_ctfsm_work( int matrix_order, char transr, char side, + char uplo, char trans, char diag, lapack_int m, + lapack_int n, lapack_complex_float alpha, + const lapack_complex_float* a, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_ztfsm_work( int matrix_order, char transr, char side, + char uplo, char trans, char diag, lapack_int m, + lapack_int n, lapack_complex_double alpha, + const lapack_complex_double* a, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_stftri_work( int matrix_order, char transr, char uplo, + char diag, lapack_int n, float* a ); +lapack_int LAPACKE_dtftri_work( int matrix_order, char transr, char uplo, + char diag, lapack_int n, double* a ); +lapack_int LAPACKE_ctftri_work( int matrix_order, char transr, char uplo, + char diag, lapack_int n, + lapack_complex_float* a ); +lapack_int LAPACKE_ztftri_work( int matrix_order, char transr, char uplo, + char diag, lapack_int n, + lapack_complex_double* a ); + +lapack_int LAPACKE_stfttp_work( int matrix_order, char transr, char uplo, + lapack_int n, const float* arf, float* ap ); +lapack_int LAPACKE_dtfttp_work( int matrix_order, char transr, char uplo, + lapack_int n, const double* arf, double* ap ); +lapack_int LAPACKE_ctfttp_work( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_float* arf, + lapack_complex_float* ap ); +lapack_int LAPACKE_ztfttp_work( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_double* arf, + lapack_complex_double* ap ); + +lapack_int LAPACKE_stfttr_work( int matrix_order, char transr, char uplo, + lapack_int n, const float* arf, float* a, + lapack_int lda ); +lapack_int LAPACKE_dtfttr_work( int matrix_order, char transr, char uplo, + lapack_int n, const double* arf, double* a, + lapack_int lda ); +lapack_int LAPACKE_ctfttr_work( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_float* arf, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_ztfttr_work( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_double* arf, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_stgevc_work( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + const float* s, lapack_int lds, const float* p, + lapack_int ldp, float* vl, lapack_int ldvl, + float* vr, lapack_int ldvr, lapack_int mm, + lapack_int* m, float* work ); +lapack_int LAPACKE_dtgevc_work( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + const double* s, lapack_int lds, + const double* p, lapack_int ldp, double* vl, + lapack_int ldvl, double* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, double* work ); +lapack_int LAPACKE_ctgevc_work( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_float* s, lapack_int lds, + const lapack_complex_float* p, lapack_int ldp, + lapack_complex_float* vl, lapack_int ldvl, + lapack_complex_float* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_ztgevc_work( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_double* s, lapack_int lds, + const lapack_complex_double* p, lapack_int ldp, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_stgexc_work( int matrix_order, lapack_logical wantq, + lapack_logical wantz, lapack_int n, float* a, + lapack_int lda, float* b, lapack_int ldb, + float* q, lapack_int ldq, float* z, + lapack_int ldz, lapack_int* ifst, + lapack_int* ilst, float* work, + lapack_int lwork ); +lapack_int LAPACKE_dtgexc_work( int matrix_order, lapack_logical wantq, + lapack_logical wantz, lapack_int n, double* a, + lapack_int lda, double* b, lapack_int ldb, + double* q, lapack_int ldq, double* z, + lapack_int ldz, lapack_int* ifst, + lapack_int* ilst, double* work, + lapack_int lwork ); +lapack_int LAPACKE_ctgexc_work( int matrix_order, lapack_logical wantq, + lapack_logical wantz, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* z, lapack_int ldz, + lapack_int ifst, lapack_int ilst ); +lapack_int LAPACKE_ztgexc_work( int matrix_order, lapack_logical wantq, + lapack_logical wantz, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* z, lapack_int ldz, + lapack_int ifst, lapack_int ilst ); + +lapack_int LAPACKE_stgsen_work( int matrix_order, lapack_int ijob, + lapack_logical wantq, lapack_logical wantz, + const lapack_logical* select, lapack_int n, + float* a, lapack_int lda, float* b, + lapack_int ldb, float* alphar, float* alphai, + float* beta, float* q, lapack_int ldq, float* z, + lapack_int ldz, lapack_int* m, float* pl, + float* pr, float* dif, float* work, + lapack_int lwork, lapack_int* iwork, + lapack_int liwork ); +lapack_int LAPACKE_dtgsen_work( int matrix_order, lapack_int ijob, + lapack_logical wantq, lapack_logical wantz, + const lapack_logical* select, lapack_int n, + double* a, lapack_int lda, double* b, + lapack_int ldb, double* alphar, double* alphai, + double* beta, double* q, lapack_int ldq, + double* z, lapack_int ldz, lapack_int* m, + double* pl, double* pr, double* dif, + double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_ctgsen_work( int matrix_order, lapack_int ijob, + lapack_logical wantq, lapack_logical wantz, + const lapack_logical* select, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* z, lapack_int ldz, + lapack_int* m, float* pl, float* pr, float* dif, + lapack_complex_float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_ztgsen_work( int matrix_order, lapack_int ijob, + lapack_logical wantq, lapack_logical wantz, + const lapack_logical* select, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* z, lapack_int ldz, + lapack_int* m, double* pl, double* pr, + double* dif, lapack_complex_double* work, + lapack_int lwork, lapack_int* iwork, + lapack_int liwork ); + +lapack_int LAPACKE_stgsja_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int p, + lapack_int n, lapack_int k, lapack_int l, + float* a, lapack_int lda, float* b, + lapack_int ldb, float tola, float tolb, + float* alpha, float* beta, float* u, + lapack_int ldu, float* v, lapack_int ldv, + float* q, lapack_int ldq, float* work, + lapack_int* ncycle ); +lapack_int LAPACKE_dtgsja_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int p, + lapack_int n, lapack_int k, lapack_int l, + double* a, lapack_int lda, double* b, + lapack_int ldb, double tola, double tolb, + double* alpha, double* beta, double* u, + lapack_int ldu, double* v, lapack_int ldv, + double* q, lapack_int ldq, double* work, + lapack_int* ncycle ); +lapack_int LAPACKE_ctgsja_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int p, + lapack_int n, lapack_int k, lapack_int l, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + float tola, float tolb, float* alpha, + float* beta, lapack_complex_float* u, + lapack_int ldu, lapack_complex_float* v, + lapack_int ldv, lapack_complex_float* q, + lapack_int ldq, lapack_complex_float* work, + lapack_int* ncycle ); +lapack_int LAPACKE_ztgsja_work( int matrix_order, char jobu, char jobv, + char jobq, lapack_int m, lapack_int p, + lapack_int n, lapack_int k, lapack_int l, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + double tola, double tolb, double* alpha, + double* beta, lapack_complex_double* u, + lapack_int ldu, lapack_complex_double* v, + lapack_int ldv, lapack_complex_double* q, + lapack_int ldq, lapack_complex_double* work, + lapack_int* ncycle ); + +lapack_int LAPACKE_stgsna_work( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const float* a, lapack_int lda, const float* b, + lapack_int ldb, const float* vl, + lapack_int ldvl, const float* vr, + lapack_int ldvr, float* s, float* dif, + lapack_int mm, lapack_int* m, float* work, + lapack_int lwork, lapack_int* iwork ); +lapack_int LAPACKE_dtgsna_work( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const double* a, lapack_int lda, + const double* b, lapack_int ldb, + const double* vl, lapack_int ldvl, + const double* vr, lapack_int ldvr, double* s, + double* dif, lapack_int mm, lapack_int* m, + double* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_ctgsna_work( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* b, lapack_int ldb, + const lapack_complex_float* vl, lapack_int ldvl, + const lapack_complex_float* vr, lapack_int ldvr, + float* s, float* dif, lapack_int mm, + lapack_int* m, lapack_complex_float* work, + lapack_int lwork, lapack_int* iwork ); +lapack_int LAPACKE_ztgsna_work( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* b, lapack_int ldb, + const lapack_complex_double* vl, + lapack_int ldvl, + const lapack_complex_double* vr, + lapack_int ldvr, double* s, double* dif, + lapack_int mm, lapack_int* m, + lapack_complex_double* work, lapack_int lwork, + lapack_int* iwork ); + +lapack_int LAPACKE_stgsyl_work( int matrix_order, char trans, lapack_int ijob, + lapack_int m, lapack_int n, const float* a, + lapack_int lda, const float* b, lapack_int ldb, + float* c, lapack_int ldc, const float* d, + lapack_int ldd, const float* e, lapack_int lde, + float* f, lapack_int ldf, float* scale, + float* dif, float* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_dtgsyl_work( int matrix_order, char trans, lapack_int ijob, + lapack_int m, lapack_int n, const double* a, + lapack_int lda, const double* b, lapack_int ldb, + double* c, lapack_int ldc, const double* d, + lapack_int ldd, const double* e, lapack_int lde, + double* f, lapack_int ldf, double* scale, + double* dif, double* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_ctgsyl_work( int matrix_order, char trans, lapack_int ijob, + lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* c, lapack_int ldc, + const lapack_complex_float* d, lapack_int ldd, + const lapack_complex_float* e, lapack_int lde, + lapack_complex_float* f, lapack_int ldf, + float* scale, float* dif, + lapack_complex_float* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_ztgsyl_work( int matrix_order, char trans, lapack_int ijob, + lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* c, lapack_int ldc, + const lapack_complex_double* d, lapack_int ldd, + const lapack_complex_double* e, lapack_int lde, + lapack_complex_double* f, lapack_int ldf, + double* scale, double* dif, + lapack_complex_double* work, lapack_int lwork, + lapack_int* iwork ); + +lapack_int LAPACKE_stpcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, const float* ap, + float* rcond, float* work, lapack_int* iwork ); +lapack_int LAPACKE_dtpcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, const double* ap, + double* rcond, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_ctpcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, + const lapack_complex_float* ap, float* rcond, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_ztpcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, + const lapack_complex_double* ap, double* rcond, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_stprfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const float* ap, const float* b, lapack_int ldb, + const float* x, lapack_int ldx, float* ferr, + float* berr, float* work, lapack_int* iwork ); +lapack_int LAPACKE_dtprfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const double* ap, const double* b, + lapack_int ldb, const double* x, lapack_int ldx, + double* ferr, double* berr, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_ctprfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const lapack_complex_float* ap, + const lapack_complex_float* b, lapack_int ldb, + const lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_ztprfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const lapack_complex_double* ap, + const lapack_complex_double* b, lapack_int ldb, + const lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_stptri_work( int matrix_order, char uplo, char diag, + lapack_int n, float* ap ); +lapack_int LAPACKE_dtptri_work( int matrix_order, char uplo, char diag, + lapack_int n, double* ap ); +lapack_int LAPACKE_ctptri_work( int matrix_order, char uplo, char diag, + lapack_int n, lapack_complex_float* ap ); +lapack_int LAPACKE_ztptri_work( int matrix_order, char uplo, char diag, + lapack_int n, lapack_complex_double* ap ); + +lapack_int LAPACKE_stptrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const float* ap, float* b, lapack_int ldb ); +lapack_int LAPACKE_dtptrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const double* ap, double* b, lapack_int ldb ); +lapack_int LAPACKE_ctptrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const lapack_complex_float* ap, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_ztptrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const lapack_complex_double* ap, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_stpttf_work( int matrix_order, char transr, char uplo, + lapack_int n, const float* ap, float* arf ); +lapack_int LAPACKE_dtpttf_work( int matrix_order, char transr, char uplo, + lapack_int n, const double* ap, double* arf ); +lapack_int LAPACKE_ctpttf_work( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_float* ap, + lapack_complex_float* arf ); +lapack_int LAPACKE_ztpttf_work( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_double* ap, + lapack_complex_double* arf ); + +lapack_int LAPACKE_stpttr_work( int matrix_order, char uplo, lapack_int n, + const float* ap, float* a, lapack_int lda ); +lapack_int LAPACKE_dtpttr_work( int matrix_order, char uplo, lapack_int n, + const double* ap, double* a, lapack_int lda ); +lapack_int LAPACKE_ctpttr_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_ztpttr_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_strcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, const float* a, + lapack_int lda, float* rcond, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dtrcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, const double* a, + lapack_int lda, double* rcond, double* work, + lapack_int* iwork ); +lapack_int LAPACKE_ctrcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + float* rcond, lapack_complex_float* work, + float* rwork ); +lapack_int LAPACKE_ztrcon_work( int matrix_order, char norm, char uplo, + char diag, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + double* rcond, lapack_complex_double* work, + double* rwork ); + +lapack_int LAPACKE_strevc_work( int matrix_order, char side, char howmny, + lapack_logical* select, lapack_int n, + const float* t, lapack_int ldt, float* vl, + lapack_int ldvl, float* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, float* work ); +lapack_int LAPACKE_dtrevc_work( int matrix_order, char side, char howmny, + lapack_logical* select, lapack_int n, + const double* t, lapack_int ldt, double* vl, + lapack_int ldvl, double* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, double* work ); +lapack_int LAPACKE_ctrevc_work( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* vl, lapack_int ldvl, + lapack_complex_float* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_ztrevc_work( int matrix_order, char side, char howmny, + const lapack_logical* select, lapack_int n, + lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* vl, lapack_int ldvl, + lapack_complex_double* vr, lapack_int ldvr, + lapack_int mm, lapack_int* m, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_strexc_work( int matrix_order, char compq, lapack_int n, + float* t, lapack_int ldt, float* q, + lapack_int ldq, lapack_int* ifst, + lapack_int* ilst, float* work ); +lapack_int LAPACKE_dtrexc_work( int matrix_order, char compq, lapack_int n, + double* t, lapack_int ldt, double* q, + lapack_int ldq, lapack_int* ifst, + lapack_int* ilst, double* work ); +lapack_int LAPACKE_ctrexc_work( int matrix_order, char compq, lapack_int n, + lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* q, lapack_int ldq, + lapack_int ifst, lapack_int ilst ); +lapack_int LAPACKE_ztrexc_work( int matrix_order, char compq, lapack_int n, + lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* q, lapack_int ldq, + lapack_int ifst, lapack_int ilst ); + +lapack_int LAPACKE_strrfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const float* a, lapack_int lda, const float* b, + lapack_int ldb, const float* x, lapack_int ldx, + float* ferr, float* berr, float* work, + lapack_int* iwork ); +lapack_int LAPACKE_dtrrfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const double* a, lapack_int lda, + const double* b, lapack_int ldb, + const double* x, lapack_int ldx, double* ferr, + double* berr, double* work, lapack_int* iwork ); +lapack_int LAPACKE_ctrrfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* b, lapack_int ldb, + const lapack_complex_float* x, lapack_int ldx, + float* ferr, float* berr, + lapack_complex_float* work, float* rwork ); +lapack_int LAPACKE_ztrrfs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* b, lapack_int ldb, + const lapack_complex_double* x, lapack_int ldx, + double* ferr, double* berr, + lapack_complex_double* work, double* rwork ); + +lapack_int LAPACKE_strsen_work( int matrix_order, char job, char compq, + const lapack_logical* select, lapack_int n, + float* t, lapack_int ldt, float* q, + lapack_int ldq, float* wr, float* wi, + lapack_int* m, float* s, float* sep, + float* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_dtrsen_work( int matrix_order, char job, char compq, + const lapack_logical* select, lapack_int n, + double* t, lapack_int ldt, double* q, + lapack_int ldq, double* wr, double* wi, + lapack_int* m, double* s, double* sep, + double* work, lapack_int lwork, + lapack_int* iwork, lapack_int liwork ); +lapack_int LAPACKE_ctrsen_work( int matrix_order, char job, char compq, + const lapack_logical* select, lapack_int n, + lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* w, lapack_int* m, + float* s, float* sep, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_ztrsen_work( int matrix_order, char job, char compq, + const lapack_logical* select, lapack_int n, + lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* w, lapack_int* m, + double* s, double* sep, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_strsna_work( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const float* t, lapack_int ldt, const float* vl, + lapack_int ldvl, const float* vr, + lapack_int ldvr, float* s, float* sep, + lapack_int mm, lapack_int* m, float* work, + lapack_int ldwork, lapack_int* iwork ); +lapack_int LAPACKE_dtrsna_work( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const double* t, lapack_int ldt, + const double* vl, lapack_int ldvl, + const double* vr, lapack_int ldvr, double* s, + double* sep, lapack_int mm, lapack_int* m, + double* work, lapack_int ldwork, + lapack_int* iwork ); +lapack_int LAPACKE_ctrsna_work( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_float* t, lapack_int ldt, + const lapack_complex_float* vl, lapack_int ldvl, + const lapack_complex_float* vr, lapack_int ldvr, + float* s, float* sep, lapack_int mm, + lapack_int* m, lapack_complex_float* work, + lapack_int ldwork, float* rwork ); +lapack_int LAPACKE_ztrsna_work( int matrix_order, char job, char howmny, + const lapack_logical* select, lapack_int n, + const lapack_complex_double* t, lapack_int ldt, + const lapack_complex_double* vl, + lapack_int ldvl, + const lapack_complex_double* vr, + lapack_int ldvr, double* s, double* sep, + lapack_int mm, lapack_int* m, + lapack_complex_double* work, lapack_int ldwork, + double* rwork ); + +lapack_int LAPACKE_strsyl_work( int matrix_order, char trana, char tranb, + lapack_int isgn, lapack_int m, lapack_int n, + const float* a, lapack_int lda, const float* b, + lapack_int ldb, float* c, lapack_int ldc, + float* scale ); +lapack_int LAPACKE_dtrsyl_work( int matrix_order, char trana, char tranb, + lapack_int isgn, lapack_int m, lapack_int n, + const double* a, lapack_int lda, + const double* b, lapack_int ldb, double* c, + lapack_int ldc, double* scale ); +lapack_int LAPACKE_ctrsyl_work( int matrix_order, char trana, char tranb, + lapack_int isgn, lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* c, lapack_int ldc, + float* scale ); +lapack_int LAPACKE_ztrsyl_work( int matrix_order, char trana, char tranb, + lapack_int isgn, lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* c, lapack_int ldc, + double* scale ); + +lapack_int LAPACKE_strtri_work( int matrix_order, char uplo, char diag, + lapack_int n, float* a, lapack_int lda ); +lapack_int LAPACKE_dtrtri_work( int matrix_order, char uplo, char diag, + lapack_int n, double* a, lapack_int lda ); +lapack_int LAPACKE_ctrtri_work( int matrix_order, char uplo, char diag, + lapack_int n, lapack_complex_float* a, + lapack_int lda ); +lapack_int LAPACKE_ztrtri_work( int matrix_order, char uplo, char diag, + lapack_int n, lapack_complex_double* a, + lapack_int lda ); + +lapack_int LAPACKE_strtrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const float* a, lapack_int lda, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dtrtrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const double* a, lapack_int lda, double* b, + lapack_int ldb ); +lapack_int LAPACKE_ctrtrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_ztrtrs_work( int matrix_order, char uplo, char trans, + char diag, lapack_int n, lapack_int nrhs, + const lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_strttf_work( int matrix_order, char transr, char uplo, + lapack_int n, const float* a, lapack_int lda, + float* arf ); +lapack_int LAPACKE_dtrttf_work( int matrix_order, char transr, char uplo, + lapack_int n, const double* a, lapack_int lda, + double* arf ); +lapack_int LAPACKE_ctrttf_work( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_float* a, + lapack_int lda, lapack_complex_float* arf ); +lapack_int LAPACKE_ztrttf_work( int matrix_order, char transr, char uplo, + lapack_int n, const lapack_complex_double* a, + lapack_int lda, lapack_complex_double* arf ); + +lapack_int LAPACKE_strttp_work( int matrix_order, char uplo, lapack_int n, + const float* a, lapack_int lda, float* ap ); +lapack_int LAPACKE_dtrttp_work( int matrix_order, char uplo, lapack_int n, + const double* a, lapack_int lda, double* ap ); +lapack_int LAPACKE_ctrttp_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + lapack_complex_float* ap ); +lapack_int LAPACKE_ztrttp_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + lapack_complex_double* ap ); + +lapack_int LAPACKE_stzrzf_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* tau, + float* work, lapack_int lwork ); +lapack_int LAPACKE_dtzrzf_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* tau, + double* work, lapack_int lwork ); +lapack_int LAPACKE_ctzrzf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_ztzrzf_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cungbr_work( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int k, + lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zungbr_work( int matrix_order, char vect, lapack_int m, + lapack_int n, lapack_int k, + lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cunghr_work( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zunghr_work( int matrix_order, lapack_int n, lapack_int ilo, + lapack_int ihi, lapack_complex_double* a, + lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cunglq_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zunglq_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_double* a, + lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cungql_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zungql_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_double* a, + lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cungqr_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zungqr_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_double* a, + lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cungrq_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zungrq_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int k, lapack_complex_double* a, + lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cungtr_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zungtr_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cunmbr_work( int matrix_order, char vect, char side, + char trans, lapack_int m, lapack_int n, + lapack_int k, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zunmbr_work( int matrix_order, char vect, char side, + char trans, lapack_int m, lapack_int n, + lapack_int k, const lapack_complex_double* a, + lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cunmhr_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int ilo, + lapack_int ihi, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zunmhr_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int ilo, + lapack_int ihi, const lapack_complex_double* a, + lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cunmlq_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zunmlq_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cunmql_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zunmql_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cunmqr_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zunmqr_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cunmrq_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zunmrq_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cunmrz_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, const lapack_complex_float* a, + lapack_int lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zunmrz_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, const lapack_complex_double* a, + lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cunmtr_work( int matrix_order, char side, char uplo, + char trans, lapack_int m, lapack_int n, + const lapack_complex_float* a, lapack_int lda, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_zunmtr_work( int matrix_order, char side, char uplo, + char trans, lapack_int m, lapack_int n, + const lapack_complex_double* a, lapack_int lda, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work, lapack_int lwork ); + +lapack_int LAPACKE_cupgtr_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_float* ap, + const lapack_complex_float* tau, + lapack_complex_float* q, lapack_int ldq, + lapack_complex_float* work ); +lapack_int LAPACKE_zupgtr_work( int matrix_order, char uplo, lapack_int n, + const lapack_complex_double* ap, + const lapack_complex_double* tau, + lapack_complex_double* q, lapack_int ldq, + lapack_complex_double* work ); + +lapack_int LAPACKE_cupmtr_work( int matrix_order, char side, char uplo, + char trans, lapack_int m, lapack_int n, + const lapack_complex_float* ap, + const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int ldc, + lapack_complex_float* work ); +lapack_int LAPACKE_zupmtr_work( int matrix_order, char side, char uplo, + char trans, lapack_int m, lapack_int n, + const lapack_complex_double* ap, + const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int ldc, + lapack_complex_double* work ); + +lapack_int LAPACKE_claghe( int matrix_order, lapack_int n, lapack_int k, + const float* d, lapack_complex_float* a, + lapack_int lda, lapack_int* iseed ); +lapack_int LAPACKE_zlaghe( int matrix_order, lapack_int n, lapack_int k, + const double* d, lapack_complex_double* a, + lapack_int lda, lapack_int* iseed ); + +lapack_int LAPACKE_slagsy( int matrix_order, lapack_int n, lapack_int k, + const float* d, float* a, lapack_int lda, + lapack_int* iseed ); +lapack_int LAPACKE_dlagsy( int matrix_order, lapack_int n, lapack_int k, + const double* d, double* a, lapack_int lda, + lapack_int* iseed ); +lapack_int LAPACKE_clagsy( int matrix_order, lapack_int n, lapack_int k, + const float* d, lapack_complex_float* a, + lapack_int lda, lapack_int* iseed ); +lapack_int LAPACKE_zlagsy( int matrix_order, lapack_int n, lapack_int k, + const double* d, lapack_complex_double* a, + lapack_int lda, lapack_int* iseed ); + +lapack_int LAPACKE_slapmr( int matrix_order, lapack_logical forwrd, + lapack_int m, lapack_int n, float* x, lapack_int ldx, + lapack_int* k ); +lapack_int LAPACKE_dlapmr( int matrix_order, lapack_logical forwrd, + lapack_int m, lapack_int n, double* x, + lapack_int ldx, lapack_int* k ); +lapack_int LAPACKE_clapmr( int matrix_order, lapack_logical forwrd, + lapack_int m, lapack_int n, lapack_complex_float* x, + lapack_int ldx, lapack_int* k ); +lapack_int LAPACKE_zlapmr( int matrix_order, lapack_logical forwrd, + lapack_int m, lapack_int n, lapack_complex_double* x, + lapack_int ldx, lapack_int* k ); + + +float LAPACKE_slapy2( float x, float y ); +double LAPACKE_dlapy2( double x, double y ); + +float LAPACKE_slapy3( float x, float y, float z ); +double LAPACKE_dlapy3( double x, double y, double z ); + +lapack_int LAPACKE_slartgp( float f, float g, float* cs, float* sn, float* r ); +lapack_int LAPACKE_dlartgp( double f, double g, double* cs, double* sn, + double* r ); + +lapack_int LAPACKE_slartgs( float x, float y, float sigma, float* cs, + float* sn ); +lapack_int LAPACKE_dlartgs( double x, double y, double sigma, double* cs, + double* sn ); + + +//LAPACK 3.3.0 +lapack_int LAPACKE_cbbcsd( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, lapack_int m, + lapack_int p, lapack_int q, float* theta, float* phi, + lapack_complex_float* u1, lapack_int ldu1, + lapack_complex_float* u2, lapack_int ldu2, + lapack_complex_float* v1t, lapack_int ldv1t, + lapack_complex_float* v2t, lapack_int ldv2t, + float* b11d, float* b11e, float* b12d, float* b12e, + float* b21d, float* b21e, float* b22d, float* b22e ); +lapack_int LAPACKE_cbbcsd_work( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, + lapack_int m, lapack_int p, lapack_int q, + float* theta, float* phi, + lapack_complex_float* u1, lapack_int ldu1, + lapack_complex_float* u2, lapack_int ldu2, + lapack_complex_float* v1t, lapack_int ldv1t, + lapack_complex_float* v2t, lapack_int ldv2t, + float* b11d, float* b11e, float* b12d, + float* b12e, float* b21d, float* b21e, + float* b22d, float* b22e, float* rwork, + lapack_int lrwork ); +lapack_int LAPACKE_cheswapr( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int i1, + lapack_int i2 ); +lapack_int LAPACKE_cheswapr_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int i1, + lapack_int i2 ); +lapack_int LAPACKE_chetri2( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv ); +lapack_int LAPACKE_chetri2_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_chetri2x( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, lapack_int nb ); +lapack_int LAPACKE_chetri2x_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int nb ); +lapack_int LAPACKE_chetrs2( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_chetrs2_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* work ); +lapack_int LAPACKE_csyconv( int matrix_order, char uplo, char way, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv ); +lapack_int LAPACKE_csyconv_work( int matrix_order, char uplo, char way, + lapack_int n, lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* work ); +lapack_int LAPACKE_csyswapr( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int i1, + lapack_int i2 ); +lapack_int LAPACKE_csyswapr_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int i1, + lapack_int i2 ); +lapack_int LAPACKE_csytri2( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv ); +lapack_int LAPACKE_csytri2_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_csytri2x( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, lapack_int nb ); +lapack_int LAPACKE_csytri2x_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int nb ); +lapack_int LAPACKE_csytrs2( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_csytrs2_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_float* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* work ); +lapack_int LAPACKE_cunbdb( int matrix_order, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, + lapack_complex_float* x11, lapack_int ldx11, + lapack_complex_float* x12, lapack_int ldx12, + lapack_complex_float* x21, lapack_int ldx21, + lapack_complex_float* x22, lapack_int ldx22, + float* theta, float* phi, + lapack_complex_float* taup1, + lapack_complex_float* taup2, + lapack_complex_float* tauq1, + lapack_complex_float* tauq2 ); +lapack_int LAPACKE_cunbdb_work( int matrix_order, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, + lapack_complex_float* x11, lapack_int ldx11, + lapack_complex_float* x12, lapack_int ldx12, + lapack_complex_float* x21, lapack_int ldx21, + lapack_complex_float* x22, lapack_int ldx22, + float* theta, float* phi, + lapack_complex_float* taup1, + lapack_complex_float* taup2, + lapack_complex_float* tauq1, + lapack_complex_float* tauq2, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_cuncsd( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, + lapack_complex_float* x11, lapack_int ldx11, + lapack_complex_float* x12, lapack_int ldx12, + lapack_complex_float* x21, lapack_int ldx21, + lapack_complex_float* x22, lapack_int ldx22, + float* theta, lapack_complex_float* u1, + lapack_int ldu1, lapack_complex_float* u2, + lapack_int ldu2, lapack_complex_float* v1t, + lapack_int ldv1t, lapack_complex_float* v2t, + lapack_int ldv2t ); +lapack_int LAPACKE_cuncsd_work( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, + char signs, lapack_int m, lapack_int p, + lapack_int q, lapack_complex_float* x11, + lapack_int ldx11, lapack_complex_float* x12, + lapack_int ldx12, lapack_complex_float* x21, + lapack_int ldx21, lapack_complex_float* x22, + lapack_int ldx22, float* theta, + lapack_complex_float* u1, lapack_int ldu1, + lapack_complex_float* u2, lapack_int ldu2, + lapack_complex_float* v1t, lapack_int ldv1t, + lapack_complex_float* v2t, lapack_int ldv2t, + lapack_complex_float* work, lapack_int lwork, + float* rwork, lapack_int lrwork, + lapack_int* iwork ); +lapack_int LAPACKE_dbbcsd( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, lapack_int m, + lapack_int p, lapack_int q, double* theta, + double* phi, double* u1, lapack_int ldu1, double* u2, + lapack_int ldu2, double* v1t, lapack_int ldv1t, + double* v2t, lapack_int ldv2t, double* b11d, + double* b11e, double* b12d, double* b12e, + double* b21d, double* b21e, double* b22d, + double* b22e ); +lapack_int LAPACKE_dbbcsd_work( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, + lapack_int m, lapack_int p, lapack_int q, + double* theta, double* phi, double* u1, + lapack_int ldu1, double* u2, lapack_int ldu2, + double* v1t, lapack_int ldv1t, double* v2t, + lapack_int ldv2t, double* b11d, double* b11e, + double* b12d, double* b12e, double* b21d, + double* b21e, double* b22d, double* b22e, + double* work, lapack_int lwork ); +lapack_int LAPACKE_dorbdb( int matrix_order, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, + double* x11, lapack_int ldx11, double* x12, + lapack_int ldx12, double* x21, lapack_int ldx21, + double* x22, lapack_int ldx22, double* theta, + double* phi, double* taup1, double* taup2, + double* tauq1, double* tauq2 ); +lapack_int LAPACKE_dorbdb_work( int matrix_order, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, + double* x11, lapack_int ldx11, double* x12, + lapack_int ldx12, double* x21, lapack_int ldx21, + double* x22, lapack_int ldx22, double* theta, + double* phi, double* taup1, double* taup2, + double* tauq1, double* tauq2, double* work, + lapack_int lwork ); +lapack_int LAPACKE_dorcsd( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, + double* x11, lapack_int ldx11, double* x12, + lapack_int ldx12, double* x21, lapack_int ldx21, + double* x22, lapack_int ldx22, double* theta, + double* u1, lapack_int ldu1, double* u2, + lapack_int ldu2, double* v1t, lapack_int ldv1t, + double* v2t, lapack_int ldv2t ); +lapack_int LAPACKE_dorcsd_work( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, + char signs, lapack_int m, lapack_int p, + lapack_int q, double* x11, lapack_int ldx11, + double* x12, lapack_int ldx12, double* x21, + lapack_int ldx21, double* x22, lapack_int ldx22, + double* theta, double* u1, lapack_int ldu1, + double* u2, lapack_int ldu2, double* v1t, + lapack_int ldv1t, double* v2t, lapack_int ldv2t, + double* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_dsyconv( int matrix_order, char uplo, char way, lapack_int n, + double* a, lapack_int lda, const lapack_int* ipiv ); +lapack_int LAPACKE_dsyconv_work( int matrix_order, char uplo, char way, + lapack_int n, double* a, lapack_int lda, + const lapack_int* ipiv, double* work ); +lapack_int LAPACKE_dsyswapr( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int i1, lapack_int i2 ); +lapack_int LAPACKE_dsyswapr_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int i1, lapack_int i2 ); +lapack_int LAPACKE_dsytri2( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda, const lapack_int* ipiv ); +lapack_int LAPACKE_dsytri2_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int lwork ); +lapack_int LAPACKE_dsytri2x( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda, const lapack_int* ipiv, + lapack_int nb ); +lapack_int LAPACKE_dsytri2x_work( int matrix_order, char uplo, lapack_int n, + double* a, lapack_int lda, + const lapack_int* ipiv, double* work, + lapack_int nb ); +lapack_int LAPACKE_dsytrs2( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* a, lapack_int lda, + const lapack_int* ipiv, double* b, lapack_int ldb ); +lapack_int LAPACKE_dsytrs2_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const double* a, + lapack_int lda, const lapack_int* ipiv, + double* b, lapack_int ldb, double* work ); +lapack_int LAPACKE_sbbcsd( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, lapack_int m, + lapack_int p, lapack_int q, float* theta, float* phi, + float* u1, lapack_int ldu1, float* u2, + lapack_int ldu2, float* v1t, lapack_int ldv1t, + float* v2t, lapack_int ldv2t, float* b11d, + float* b11e, float* b12d, float* b12e, float* b21d, + float* b21e, float* b22d, float* b22e ); +lapack_int LAPACKE_sbbcsd_work( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, + lapack_int m, lapack_int p, lapack_int q, + float* theta, float* phi, float* u1, + lapack_int ldu1, float* u2, lapack_int ldu2, + float* v1t, lapack_int ldv1t, float* v2t, + lapack_int ldv2t, float* b11d, float* b11e, + float* b12d, float* b12e, float* b21d, + float* b21e, float* b22d, float* b22e, + float* work, lapack_int lwork ); +lapack_int LAPACKE_sorbdb( int matrix_order, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, float* x11, + lapack_int ldx11, float* x12, lapack_int ldx12, + float* x21, lapack_int ldx21, float* x22, + lapack_int ldx22, float* theta, float* phi, + float* taup1, float* taup2, float* tauq1, + float* tauq2 ); +lapack_int LAPACKE_sorbdb_work( int matrix_order, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, + float* x11, lapack_int ldx11, float* x12, + lapack_int ldx12, float* x21, lapack_int ldx21, + float* x22, lapack_int ldx22, float* theta, + float* phi, float* taup1, float* taup2, + float* tauq1, float* tauq2, float* work, + lapack_int lwork ); +lapack_int LAPACKE_sorcsd( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, float* x11, + lapack_int ldx11, float* x12, lapack_int ldx12, + float* x21, lapack_int ldx21, float* x22, + lapack_int ldx22, float* theta, float* u1, + lapack_int ldu1, float* u2, lapack_int ldu2, + float* v1t, lapack_int ldv1t, float* v2t, + lapack_int ldv2t ); +lapack_int LAPACKE_sorcsd_work( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, + char signs, lapack_int m, lapack_int p, + lapack_int q, float* x11, lapack_int ldx11, + float* x12, lapack_int ldx12, float* x21, + lapack_int ldx21, float* x22, lapack_int ldx22, + float* theta, float* u1, lapack_int ldu1, + float* u2, lapack_int ldu2, float* v1t, + lapack_int ldv1t, float* v2t, lapack_int ldv2t, + float* work, lapack_int lwork, + lapack_int* iwork ); +lapack_int LAPACKE_ssyconv( int matrix_order, char uplo, char way, lapack_int n, + float* a, lapack_int lda, const lapack_int* ipiv ); +lapack_int LAPACKE_ssyconv_work( int matrix_order, char uplo, char way, + lapack_int n, float* a, lapack_int lda, + const lapack_int* ipiv, float* work ); +lapack_int LAPACKE_ssyswapr( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int i1, lapack_int i2 ); +lapack_int LAPACKE_ssyswapr_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int i1, lapack_int i2 ); +lapack_int LAPACKE_ssytri2( int matrix_order, char uplo, lapack_int n, float* a, + lapack_int lda, const lapack_int* ipiv ); +lapack_int LAPACKE_ssytri2_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int lwork ); +lapack_int LAPACKE_ssytri2x( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda, const lapack_int* ipiv, + lapack_int nb ); +lapack_int LAPACKE_ssytri2x_work( int matrix_order, char uplo, lapack_int n, + float* a, lapack_int lda, + const lapack_int* ipiv, float* work, + lapack_int nb ); +lapack_int LAPACKE_ssytrs2( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* a, lapack_int lda, + const lapack_int* ipiv, float* b, lapack_int ldb ); +lapack_int LAPACKE_ssytrs2_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const float* a, + lapack_int lda, const lapack_int* ipiv, + float* b, lapack_int ldb, float* work ); +lapack_int LAPACKE_zbbcsd( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, lapack_int m, + lapack_int p, lapack_int q, double* theta, + double* phi, lapack_complex_double* u1, + lapack_int ldu1, lapack_complex_double* u2, + lapack_int ldu2, lapack_complex_double* v1t, + lapack_int ldv1t, lapack_complex_double* v2t, + lapack_int ldv2t, double* b11d, double* b11e, + double* b12d, double* b12e, double* b21d, + double* b21e, double* b22d, double* b22e ); +lapack_int LAPACKE_zbbcsd_work( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, + lapack_int m, lapack_int p, lapack_int q, + double* theta, double* phi, + lapack_complex_double* u1, lapack_int ldu1, + lapack_complex_double* u2, lapack_int ldu2, + lapack_complex_double* v1t, lapack_int ldv1t, + lapack_complex_double* v2t, lapack_int ldv2t, + double* b11d, double* b11e, double* b12d, + double* b12e, double* b21d, double* b21e, + double* b22d, double* b22e, double* rwork, + lapack_int lrwork ); +lapack_int LAPACKE_zheswapr( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int i1, + lapack_int i2 ); +lapack_int LAPACKE_zheswapr_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int i1, + lapack_int i2 ); +lapack_int LAPACKE_zhetri2( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv ); +lapack_int LAPACKE_zhetri2_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int lwork ); +lapack_int LAPACKE_zhetri2x( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, lapack_int nb ); +lapack_int LAPACKE_zhetri2x_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int nb ); +lapack_int LAPACKE_zhetrs2( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); +lapack_int LAPACKE_zhetrs2_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* work ); +lapack_int LAPACKE_zsyconv( int matrix_order, char uplo, char way, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv ); +lapack_int LAPACKE_zsyconv_work( int matrix_order, char uplo, char way, + lapack_int n, lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* work ); +lapack_int LAPACKE_zsyswapr( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int i1, + lapack_int i2 ); +lapack_int LAPACKE_zsyswapr_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int i1, + lapack_int i2 ); +lapack_int LAPACKE_zsytri2( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv ); +lapack_int LAPACKE_zsytri2_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int lwork ); +lapack_int LAPACKE_zsytri2x( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, lapack_int nb ); +lapack_int LAPACKE_zsytri2x_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double* a, lapack_int lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int nb ); +lapack_int LAPACKE_zsytrs2( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb ); +lapack_int LAPACKE_zsytrs2_work( int matrix_order, char uplo, lapack_int n, + lapack_int nrhs, const lapack_complex_double* a, + lapack_int lda, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* work ); +lapack_int LAPACKE_zunbdb( int matrix_order, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, + lapack_complex_double* x11, lapack_int ldx11, + lapack_complex_double* x12, lapack_int ldx12, + lapack_complex_double* x21, lapack_int ldx21, + lapack_complex_double* x22, lapack_int ldx22, + double* theta, double* phi, + lapack_complex_double* taup1, + lapack_complex_double* taup2, + lapack_complex_double* tauq1, + lapack_complex_double* tauq2 ); +lapack_int LAPACKE_zunbdb_work( int matrix_order, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, + lapack_complex_double* x11, lapack_int ldx11, + lapack_complex_double* x12, lapack_int ldx12, + lapack_complex_double* x21, lapack_int ldx21, + lapack_complex_double* x22, lapack_int ldx22, + double* theta, double* phi, + lapack_complex_double* taup1, + lapack_complex_double* taup2, + lapack_complex_double* tauq1, + lapack_complex_double* tauq2, + lapack_complex_double* work, lapack_int lwork ); +lapack_int LAPACKE_zuncsd( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, char signs, + lapack_int m, lapack_int p, lapack_int q, + lapack_complex_double* x11, lapack_int ldx11, + lapack_complex_double* x12, lapack_int ldx12, + lapack_complex_double* x21, lapack_int ldx21, + lapack_complex_double* x22, lapack_int ldx22, + double* theta, lapack_complex_double* u1, + lapack_int ldu1, lapack_complex_double* u2, + lapack_int ldu2, lapack_complex_double* v1t, + lapack_int ldv1t, lapack_complex_double* v2t, + lapack_int ldv2t ); +lapack_int LAPACKE_zuncsd_work( int matrix_order, char jobu1, char jobu2, + char jobv1t, char jobv2t, char trans, + char signs, lapack_int m, lapack_int p, + lapack_int q, lapack_complex_double* x11, + lapack_int ldx11, lapack_complex_double* x12, + lapack_int ldx12, lapack_complex_double* x21, + lapack_int ldx21, lapack_complex_double* x22, + lapack_int ldx22, double* theta, + lapack_complex_double* u1, lapack_int ldu1, + lapack_complex_double* u2, lapack_int ldu2, + lapack_complex_double* v1t, lapack_int ldv1t, + lapack_complex_double* v2t, lapack_int ldv2t, + lapack_complex_double* work, lapack_int lwork, + double* rwork, lapack_int lrwork, + lapack_int* iwork ); +//LAPACK 3.4.0 +lapack_int LAPACKE_sgemqrt( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int nb, const float* v, lapack_int ldv, + const float* t, lapack_int ldt, float* c, + lapack_int ldc ); +lapack_int LAPACKE_dgemqrt( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int nb, const double* v, lapack_int ldv, + const double* t, lapack_int ldt, double* c, + lapack_int ldc ); +lapack_int LAPACKE_cgemqrt( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int nb, const lapack_complex_float* v, + lapack_int ldv, const lapack_complex_float* t, + lapack_int ldt, lapack_complex_float* c, + lapack_int ldc ); +lapack_int LAPACKE_zgemqrt( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int nb, const lapack_complex_double* v, + lapack_int ldv, const lapack_complex_double* t, + lapack_int ldt, lapack_complex_double* c, + lapack_int ldc ); + +lapack_int LAPACKE_sgeqrt( int matrix_order, lapack_int m, lapack_int n, + lapack_int nb, float* a, lapack_int lda, float* t, + lapack_int ldt ); +lapack_int LAPACKE_dgeqrt( int matrix_order, lapack_int m, lapack_int n, + lapack_int nb, double* a, lapack_int lda, double* t, + lapack_int ldt ); +lapack_int LAPACKE_cgeqrt( int matrix_order, lapack_int m, lapack_int n, + lapack_int nb, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* t, + lapack_int ldt ); +lapack_int LAPACKE_zgeqrt( int matrix_order, lapack_int m, lapack_int n, + lapack_int nb, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* t, + lapack_int ldt ); + +lapack_int LAPACKE_sgeqrt2( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* t, + lapack_int ldt ); +lapack_int LAPACKE_dgeqrt2( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* t, + lapack_int ldt ); +lapack_int LAPACKE_cgeqrt2( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* t, lapack_int ldt ); +lapack_int LAPACKE_zgeqrt2( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* t, lapack_int ldt ); + +lapack_int LAPACKE_sgeqrt3( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* t, + lapack_int ldt ); +lapack_int LAPACKE_dgeqrt3( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* t, + lapack_int ldt ); +lapack_int LAPACKE_cgeqrt3( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* t, lapack_int ldt ); +lapack_int LAPACKE_zgeqrt3( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* t, lapack_int ldt ); + +lapack_int LAPACKE_stpmqrt( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, lapack_int nb, const float* v, + lapack_int ldv, const float* t, lapack_int ldt, + float* a, lapack_int lda, float* b, + lapack_int ldb ); +lapack_int LAPACKE_dtpmqrt( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, lapack_int nb, const double* v, + lapack_int ldv, const double* t, lapack_int ldt, + double* a, lapack_int lda, double* b, + lapack_int ldb ); +lapack_int LAPACKE_ctpmqrt( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, lapack_int nb, + const lapack_complex_float* v, lapack_int ldv, + const lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb ); +lapack_int LAPACKE_ztpmqrt( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, lapack_int nb, + const lapack_complex_double* v, lapack_int ldv, + const lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb ); + +lapack_int LAPACKE_dtpqrt( int matrix_order, lapack_int m, lapack_int n, + lapack_int l, lapack_int nb, double* a, + lapack_int lda, double* b, lapack_int ldb, double* t, + lapack_int ldt ); +lapack_int LAPACKE_ctpqrt( int matrix_order, lapack_int m, lapack_int n, + lapack_int l, lapack_int nb, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* t, + lapack_complex_float* b, lapack_int ldb, + lapack_int ldt ); +lapack_int LAPACKE_ztpqrt( int matrix_order, lapack_int m, lapack_int n, + lapack_int l, lapack_int nb, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* t, lapack_int ldt ); + +lapack_int LAPACKE_stpqrt2( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* b, lapack_int ldb, + float* t, lapack_int ldt ); +lapack_int LAPACKE_dtpqrt2( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* b, + lapack_int ldb, double* t, lapack_int ldt ); +lapack_int LAPACKE_ctpqrt2( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* t, lapack_int ldt ); +lapack_int LAPACKE_ztpqrt2( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* t, lapack_int ldt ); + +lapack_int LAPACKE_stprfb( int matrix_order, char side, char trans, char direct, + char storev, lapack_int m, lapack_int n, + lapack_int k, lapack_int l, const float* v, + lapack_int ldv, const float* t, lapack_int ldt, + float* a, lapack_int lda, float* b, lapack_int ldb, + lapack_int myldwork ); +lapack_int LAPACKE_dtprfb( int matrix_order, char side, char trans, char direct, + char storev, lapack_int m, lapack_int n, + lapack_int k, lapack_int l, const double* v, + lapack_int ldv, const double* t, lapack_int ldt, + double* a, lapack_int lda, double* b, lapack_int ldb, + lapack_int myldwork ); +lapack_int LAPACKE_ctprfb( int matrix_order, char side, char trans, char direct, + char storev, lapack_int m, lapack_int n, + lapack_int k, lapack_int l, + const lapack_complex_float* v, lapack_int ldv, + const lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_int myldwork ); +lapack_int LAPACKE_ztprfb( int matrix_order, char side, char trans, char direct, + char storev, lapack_int m, lapack_int n, + lapack_int k, lapack_int l, + const lapack_complex_double* v, lapack_int ldv, + const lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_int myldwork ); + +lapack_int LAPACKE_sgemqrt_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int nb, const float* v, lapack_int ldv, + const float* t, lapack_int ldt, float* c, + lapack_int ldc, float* work ); +lapack_int LAPACKE_dgemqrt_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int nb, const double* v, lapack_int ldv, + const double* t, lapack_int ldt, double* c, + lapack_int ldc, double* work ); +lapack_int LAPACKE_cgemqrt_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int nb, const lapack_complex_float* v, + lapack_int ldv, const lapack_complex_float* t, + lapack_int ldt, lapack_complex_float* c, + lapack_int ldc, lapack_complex_float* work ); +lapack_int LAPACKE_zgemqrt_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int nb, const lapack_complex_double* v, + lapack_int ldv, const lapack_complex_double* t, + lapack_int ldt, lapack_complex_double* c, + lapack_int ldc, lapack_complex_double* work ); + +lapack_int LAPACKE_sgeqrt_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nb, float* a, lapack_int lda, + float* t, lapack_int ldt, float* work ); +lapack_int LAPACKE_dgeqrt_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nb, double* a, lapack_int lda, + double* t, lapack_int ldt, double* work ); +lapack_int LAPACKE_cgeqrt_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nb, lapack_complex_float* a, + lapack_int lda, lapack_complex_float* t, + lapack_int ldt, lapack_complex_float* work ); +lapack_int LAPACKE_zgeqrt_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int nb, lapack_complex_double* a, + lapack_int lda, lapack_complex_double* t, + lapack_int ldt, lapack_complex_double* work ); + +lapack_int LAPACKE_sgeqrt2_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* t, + lapack_int ldt ); +lapack_int LAPACKE_dgeqrt2_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* t, + lapack_int ldt ); +lapack_int LAPACKE_cgeqrt2_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* t, lapack_int ldt ); +lapack_int LAPACKE_zgeqrt2_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* t, lapack_int ldt ); + +lapack_int LAPACKE_sgeqrt3_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* t, + lapack_int ldt ); +lapack_int LAPACKE_dgeqrt3_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* t, + lapack_int ldt ); +lapack_int LAPACKE_cgeqrt3_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* t, lapack_int ldt ); +lapack_int LAPACKE_zgeqrt3_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* t, lapack_int ldt ); + +lapack_int LAPACKE_stpmqrt_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, lapack_int nb, const float* v, + lapack_int ldv, const float* t, lapack_int ldt, + float* a, lapack_int lda, float* b, + lapack_int ldb, float* work ); +lapack_int LAPACKE_dtpmqrt_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, lapack_int nb, const double* v, + lapack_int ldv, const double* t, + lapack_int ldt, double* a, lapack_int lda, + double* b, lapack_int ldb, double* work ); +lapack_int LAPACKE_ctpmqrt_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, lapack_int nb, + const lapack_complex_float* v, lapack_int ldv, + const lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* work ); +lapack_int LAPACKE_ztpmqrt_work( int matrix_order, char side, char trans, + lapack_int m, lapack_int n, lapack_int k, + lapack_int l, lapack_int nb, + const lapack_complex_double* v, lapack_int ldv, + const lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* work ); + +lapack_int LAPACKE_dtpqrt_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int l, lapack_int nb, double* a, + lapack_int lda, double* b, lapack_int ldb, + double* t, lapack_int ldt, double* work ); +lapack_int LAPACKE_ctpqrt_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int l, lapack_int nb, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* t, + lapack_complex_float* b, lapack_int ldb, + lapack_int ldt, lapack_complex_float* work ); +lapack_int LAPACKE_ztpqrt_work( int matrix_order, lapack_int m, lapack_int n, + lapack_int l, lapack_int nb, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* work ); + +lapack_int LAPACKE_stpqrt2_work( int matrix_order, lapack_int m, lapack_int n, + float* a, lapack_int lda, float* b, + lapack_int ldb, float* t, lapack_int ldt ); +lapack_int LAPACKE_dtpqrt2_work( int matrix_order, lapack_int m, lapack_int n, + double* a, lapack_int lda, double* b, + lapack_int ldb, double* t, lapack_int ldt ); +lapack_int LAPACKE_ctpqrt2_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + lapack_complex_float* t, lapack_int ldt ); +lapack_int LAPACKE_ztpqrt2_work( int matrix_order, lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + lapack_complex_double* t, lapack_int ldt ); + +lapack_int LAPACKE_stprfb_work( int matrix_order, char side, char trans, + char direct, char storev, lapack_int m, + lapack_int n, lapack_int k, lapack_int l, + const float* v, lapack_int ldv, const float* t, + lapack_int ldt, float* a, lapack_int lda, + float* b, lapack_int ldb, const float* mywork, + lapack_int myldwork ); +lapack_int LAPACKE_dtprfb_work( int matrix_order, char side, char trans, + char direct, char storev, lapack_int m, + lapack_int n, lapack_int k, lapack_int l, + const double* v, lapack_int ldv, + const double* t, lapack_int ldt, double* a, + lapack_int lda, double* b, lapack_int ldb, + const double* mywork, lapack_int myldwork ); +lapack_int LAPACKE_ctprfb_work( int matrix_order, char side, char trans, + char direct, char storev, lapack_int m, + lapack_int n, lapack_int k, lapack_int l, + const lapack_complex_float* v, lapack_int ldv, + const lapack_complex_float* t, lapack_int ldt, + lapack_complex_float* a, lapack_int lda, + lapack_complex_float* b, lapack_int ldb, + const float* mywork, lapack_int myldwork ); +lapack_int LAPACKE_ztprfb_work( int matrix_order, char side, char trans, + char direct, char storev, lapack_int m, + lapack_int n, lapack_int k, lapack_int l, + const lapack_complex_double* v, lapack_int ldv, + const lapack_complex_double* t, lapack_int ldt, + lapack_complex_double* a, lapack_int lda, + lapack_complex_double* b, lapack_int ldb, + const double* mywork, lapack_int myldwork ); +//LAPACK 3.X.X +lapack_int LAPACKE_csyr( int matrix_order, char uplo, lapack_int n, + lapack_complex_float alpha, + const lapack_complex_float* x, lapack_int incx, + lapack_complex_float* a, lapack_int lda ); +lapack_int LAPACKE_zsyr( int matrix_order, char uplo, lapack_int n, + lapack_complex_double alpha, + const lapack_complex_double* x, lapack_int incx, + lapack_complex_double* a, lapack_int lda ); + +lapack_int LAPACKE_csyr_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_float alpha, + const lapack_complex_float* x, + lapack_int incx, lapack_complex_float* a, + lapack_int lda ); +lapack_int LAPACKE_zsyr_work( int matrix_order, char uplo, lapack_int n, + lapack_complex_double alpha, + const lapack_complex_double* x, + lapack_int incx, lapack_complex_double* a, + lapack_int lda ); + + + +#define LAPACK_sgetrf LAPACK_GLOBAL(sgetrf,SGETRF) +#define LAPACK_dgetrf LAPACK_GLOBAL(dgetrf,DGETRF) +#define LAPACK_cgetrf LAPACK_GLOBAL(cgetrf,CGETRF) +#define LAPACK_zgetrf LAPACK_GLOBAL(zgetrf,ZGETRF) +#define LAPACK_sgbtrf LAPACK_GLOBAL(sgbtrf,SGBTRF) +#define LAPACK_dgbtrf LAPACK_GLOBAL(dgbtrf,DGBTRF) +#define LAPACK_cgbtrf LAPACK_GLOBAL(cgbtrf,CGBTRF) +#define LAPACK_zgbtrf LAPACK_GLOBAL(zgbtrf,ZGBTRF) +#define LAPACK_sgttrf LAPACK_GLOBAL(sgttrf,SGTTRF) +#define LAPACK_dgttrf LAPACK_GLOBAL(dgttrf,DGTTRF) +#define LAPACK_cgttrf LAPACK_GLOBAL(cgttrf,CGTTRF) +#define LAPACK_zgttrf LAPACK_GLOBAL(zgttrf,ZGTTRF) +#define LAPACK_spotrf LAPACK_GLOBAL(spotrf,SPOTRF) +#define LAPACK_dpotrf LAPACK_GLOBAL(dpotrf,DPOTRF) +#define LAPACK_cpotrf LAPACK_GLOBAL(cpotrf,CPOTRF) +#define LAPACK_zpotrf LAPACK_GLOBAL(zpotrf,ZPOTRF) +#define LAPACK_dpstrf LAPACK_GLOBAL(dpstrf,DPSTRF) +#define LAPACK_spstrf LAPACK_GLOBAL(spstrf,SPSTRF) +#define LAPACK_zpstrf LAPACK_GLOBAL(zpstrf,ZPSTRF) +#define LAPACK_cpstrf LAPACK_GLOBAL(cpstrf,CPSTRF) +#define LAPACK_dpftrf LAPACK_GLOBAL(dpftrf,DPFTRF) +#define LAPACK_spftrf LAPACK_GLOBAL(spftrf,SPFTRF) +#define LAPACK_zpftrf LAPACK_GLOBAL(zpftrf,ZPFTRF) +#define LAPACK_cpftrf LAPACK_GLOBAL(cpftrf,CPFTRF) +#define LAPACK_spptrf LAPACK_GLOBAL(spptrf,SPPTRF) +#define LAPACK_dpptrf LAPACK_GLOBAL(dpptrf,DPPTRF) +#define LAPACK_cpptrf LAPACK_GLOBAL(cpptrf,CPPTRF) +#define LAPACK_zpptrf LAPACK_GLOBAL(zpptrf,ZPPTRF) +#define LAPACK_spbtrf LAPACK_GLOBAL(spbtrf,SPBTRF) +#define LAPACK_dpbtrf LAPACK_GLOBAL(dpbtrf,DPBTRF) +#define LAPACK_cpbtrf LAPACK_GLOBAL(cpbtrf,CPBTRF) +#define LAPACK_zpbtrf LAPACK_GLOBAL(zpbtrf,ZPBTRF) +#define LAPACK_spttrf LAPACK_GLOBAL(spttrf,SPTTRF) +#define LAPACK_dpttrf LAPACK_GLOBAL(dpttrf,DPTTRF) +#define LAPACK_cpttrf LAPACK_GLOBAL(cpttrf,CPTTRF) +#define LAPACK_zpttrf LAPACK_GLOBAL(zpttrf,ZPTTRF) +#define LAPACK_ssytrf LAPACK_GLOBAL(ssytrf,SSYTRF) +#define LAPACK_dsytrf LAPACK_GLOBAL(dsytrf,DSYTRF) +#define LAPACK_csytrf LAPACK_GLOBAL(csytrf,CSYTRF) +#define LAPACK_zsytrf LAPACK_GLOBAL(zsytrf,ZSYTRF) +#define LAPACK_chetrf LAPACK_GLOBAL(chetrf,CHETRF) +#define LAPACK_zhetrf LAPACK_GLOBAL(zhetrf,ZHETRF) +#define LAPACK_ssptrf LAPACK_GLOBAL(ssptrf,SSPTRF) +#define LAPACK_dsptrf LAPACK_GLOBAL(dsptrf,DSPTRF) +#define LAPACK_csptrf LAPACK_GLOBAL(csptrf,CSPTRF) +#define LAPACK_zsptrf LAPACK_GLOBAL(zsptrf,ZSPTRF) +#define LAPACK_chptrf LAPACK_GLOBAL(chptrf,CHPTRF) +#define LAPACK_zhptrf LAPACK_GLOBAL(zhptrf,ZHPTRF) +#define LAPACK_sgetrs LAPACK_GLOBAL(sgetrs,SGETRS) +#define LAPACK_dgetrs LAPACK_GLOBAL(dgetrs,DGETRS) +#define LAPACK_cgetrs LAPACK_GLOBAL(cgetrs,CGETRS) +#define LAPACK_zgetrs LAPACK_GLOBAL(zgetrs,ZGETRS) +#define LAPACK_sgbtrs LAPACK_GLOBAL(sgbtrs,SGBTRS) +#define LAPACK_dgbtrs LAPACK_GLOBAL(dgbtrs,DGBTRS) +#define LAPACK_cgbtrs LAPACK_GLOBAL(cgbtrs,CGBTRS) +#define LAPACK_zgbtrs LAPACK_GLOBAL(zgbtrs,ZGBTRS) +#define LAPACK_sgttrs LAPACK_GLOBAL(sgttrs,SGTTRS) +#define LAPACK_dgttrs LAPACK_GLOBAL(dgttrs,DGTTRS) +#define LAPACK_cgttrs LAPACK_GLOBAL(cgttrs,CGTTRS) +#define LAPACK_zgttrs LAPACK_GLOBAL(zgttrs,ZGTTRS) +#define LAPACK_spotrs LAPACK_GLOBAL(spotrs,SPOTRS) +#define LAPACK_dpotrs LAPACK_GLOBAL(dpotrs,DPOTRS) +#define LAPACK_cpotrs LAPACK_GLOBAL(cpotrs,CPOTRS) +#define LAPACK_zpotrs LAPACK_GLOBAL(zpotrs,ZPOTRS) +#define LAPACK_dpftrs LAPACK_GLOBAL(dpftrs,DPFTRS) +#define LAPACK_spftrs LAPACK_GLOBAL(spftrs,SPFTRS) +#define LAPACK_zpftrs LAPACK_GLOBAL(zpftrs,ZPFTRS) +#define LAPACK_cpftrs LAPACK_GLOBAL(cpftrs,CPFTRS) +#define LAPACK_spptrs LAPACK_GLOBAL(spptrs,SPPTRS) +#define LAPACK_dpptrs LAPACK_GLOBAL(dpptrs,DPPTRS) +#define LAPACK_cpptrs LAPACK_GLOBAL(cpptrs,CPPTRS) +#define LAPACK_zpptrs LAPACK_GLOBAL(zpptrs,ZPPTRS) +#define LAPACK_spbtrs LAPACK_GLOBAL(spbtrs,SPBTRS) +#define LAPACK_dpbtrs LAPACK_GLOBAL(dpbtrs,DPBTRS) +#define LAPACK_cpbtrs LAPACK_GLOBAL(cpbtrs,CPBTRS) +#define LAPACK_zpbtrs LAPACK_GLOBAL(zpbtrs,ZPBTRS) +#define LAPACK_spttrs LAPACK_GLOBAL(spttrs,SPTTRS) +#define LAPACK_dpttrs LAPACK_GLOBAL(dpttrs,DPTTRS) +#define LAPACK_cpttrs LAPACK_GLOBAL(cpttrs,CPTTRS) +#define LAPACK_zpttrs LAPACK_GLOBAL(zpttrs,ZPTTRS) +#define LAPACK_ssytrs LAPACK_GLOBAL(ssytrs,SSYTRS) +#define LAPACK_dsytrs LAPACK_GLOBAL(dsytrs,DSYTRS) +#define LAPACK_csytrs LAPACK_GLOBAL(csytrs,CSYTRS) +#define LAPACK_zsytrs LAPACK_GLOBAL(zsytrs,ZSYTRS) +#define LAPACK_chetrs LAPACK_GLOBAL(chetrs,CHETRS) +#define LAPACK_zhetrs LAPACK_GLOBAL(zhetrs,ZHETRS) +#define LAPACK_ssptrs LAPACK_GLOBAL(ssptrs,SSPTRS) +#define LAPACK_dsptrs LAPACK_GLOBAL(dsptrs,DSPTRS) +#define LAPACK_csptrs LAPACK_GLOBAL(csptrs,CSPTRS) +#define LAPACK_zsptrs LAPACK_GLOBAL(zsptrs,ZSPTRS) +#define LAPACK_chptrs LAPACK_GLOBAL(chptrs,CHPTRS) +#define LAPACK_zhptrs LAPACK_GLOBAL(zhptrs,ZHPTRS) +#define LAPACK_strtrs LAPACK_GLOBAL(strtrs,STRTRS) +#define LAPACK_dtrtrs LAPACK_GLOBAL(dtrtrs,DTRTRS) +#define LAPACK_ctrtrs LAPACK_GLOBAL(ctrtrs,CTRTRS) +#define LAPACK_ztrtrs LAPACK_GLOBAL(ztrtrs,ZTRTRS) +#define LAPACK_stptrs LAPACK_GLOBAL(stptrs,STPTRS) +#define LAPACK_dtptrs LAPACK_GLOBAL(dtptrs,DTPTRS) +#define LAPACK_ctptrs LAPACK_GLOBAL(ctptrs,CTPTRS) +#define LAPACK_ztptrs LAPACK_GLOBAL(ztptrs,ZTPTRS) +#define LAPACK_stbtrs LAPACK_GLOBAL(stbtrs,STBTRS) +#define LAPACK_dtbtrs LAPACK_GLOBAL(dtbtrs,DTBTRS) +#define LAPACK_ctbtrs LAPACK_GLOBAL(ctbtrs,CTBTRS) +#define LAPACK_ztbtrs LAPACK_GLOBAL(ztbtrs,ZTBTRS) +#define LAPACK_sgecon LAPACK_GLOBAL(sgecon,SGECON) +#define LAPACK_dgecon LAPACK_GLOBAL(dgecon,DGECON) +#define LAPACK_cgecon LAPACK_GLOBAL(cgecon,CGECON) +#define LAPACK_zgecon LAPACK_GLOBAL(zgecon,ZGECON) +#define LAPACK_sgbcon LAPACK_GLOBAL(sgbcon,SGBCON) +#define LAPACK_dgbcon LAPACK_GLOBAL(dgbcon,DGBCON) +#define LAPACK_cgbcon LAPACK_GLOBAL(cgbcon,CGBCON) +#define LAPACK_zgbcon LAPACK_GLOBAL(zgbcon,ZGBCON) +#define LAPACK_sgtcon LAPACK_GLOBAL(sgtcon,SGTCON) +#define LAPACK_dgtcon LAPACK_GLOBAL(dgtcon,DGTCON) +#define LAPACK_cgtcon LAPACK_GLOBAL(cgtcon,CGTCON) +#define LAPACK_zgtcon LAPACK_GLOBAL(zgtcon,ZGTCON) +#define LAPACK_spocon LAPACK_GLOBAL(spocon,SPOCON) +#define LAPACK_dpocon LAPACK_GLOBAL(dpocon,DPOCON) +#define LAPACK_cpocon LAPACK_GLOBAL(cpocon,CPOCON) +#define LAPACK_zpocon LAPACK_GLOBAL(zpocon,ZPOCON) +#define LAPACK_sppcon LAPACK_GLOBAL(sppcon,SPPCON) +#define LAPACK_dppcon LAPACK_GLOBAL(dppcon,DPPCON) +#define LAPACK_cppcon LAPACK_GLOBAL(cppcon,CPPCON) +#define LAPACK_zppcon LAPACK_GLOBAL(zppcon,ZPPCON) +#define LAPACK_spbcon LAPACK_GLOBAL(spbcon,SPBCON) +#define LAPACK_dpbcon LAPACK_GLOBAL(dpbcon,DPBCON) +#define LAPACK_cpbcon LAPACK_GLOBAL(cpbcon,CPBCON) +#define LAPACK_zpbcon LAPACK_GLOBAL(zpbcon,ZPBCON) +#define LAPACK_sptcon LAPACK_GLOBAL(sptcon,SPTCON) +#define LAPACK_dptcon LAPACK_GLOBAL(dptcon,DPTCON) +#define LAPACK_cptcon LAPACK_GLOBAL(cptcon,CPTCON) +#define LAPACK_zptcon LAPACK_GLOBAL(zptcon,ZPTCON) +#define LAPACK_ssycon LAPACK_GLOBAL(ssycon,SSYCON) +#define LAPACK_dsycon LAPACK_GLOBAL(dsycon,DSYCON) +#define LAPACK_csycon LAPACK_GLOBAL(csycon,CSYCON) +#define LAPACK_zsycon LAPACK_GLOBAL(zsycon,ZSYCON) +#define LAPACK_checon LAPACK_GLOBAL(checon,CHECON) +#define LAPACK_zhecon LAPACK_GLOBAL(zhecon,ZHECON) +#define LAPACK_sspcon LAPACK_GLOBAL(sspcon,SSPCON) +#define LAPACK_dspcon LAPACK_GLOBAL(dspcon,DSPCON) +#define LAPACK_cspcon LAPACK_GLOBAL(cspcon,CSPCON) +#define LAPACK_zspcon LAPACK_GLOBAL(zspcon,ZSPCON) +#define LAPACK_chpcon LAPACK_GLOBAL(chpcon,CHPCON) +#define LAPACK_zhpcon LAPACK_GLOBAL(zhpcon,ZHPCON) +#define LAPACK_strcon LAPACK_GLOBAL(strcon,STRCON) +#define LAPACK_dtrcon LAPACK_GLOBAL(dtrcon,DTRCON) +#define LAPACK_ctrcon LAPACK_GLOBAL(ctrcon,CTRCON) +#define LAPACK_ztrcon LAPACK_GLOBAL(ztrcon,ZTRCON) +#define LAPACK_stpcon LAPACK_GLOBAL(stpcon,STPCON) +#define LAPACK_dtpcon LAPACK_GLOBAL(dtpcon,DTPCON) +#define LAPACK_ctpcon LAPACK_GLOBAL(ctpcon,CTPCON) +#define LAPACK_ztpcon LAPACK_GLOBAL(ztpcon,ZTPCON) +#define LAPACK_stbcon LAPACK_GLOBAL(stbcon,STBCON) +#define LAPACK_dtbcon LAPACK_GLOBAL(dtbcon,DTBCON) +#define LAPACK_ctbcon LAPACK_GLOBAL(ctbcon,CTBCON) +#define LAPACK_ztbcon LAPACK_GLOBAL(ztbcon,ZTBCON) +#define LAPACK_sgerfs LAPACK_GLOBAL(sgerfs,SGERFS) +#define LAPACK_dgerfs LAPACK_GLOBAL(dgerfs,DGERFS) +#define LAPACK_cgerfs LAPACK_GLOBAL(cgerfs,CGERFS) +#define LAPACK_zgerfs LAPACK_GLOBAL(zgerfs,ZGERFS) +#define LAPACK_dgerfsx LAPACK_GLOBAL(dgerfsx,DGERFSX) +#define LAPACK_sgerfsx LAPACK_GLOBAL(sgerfsx,SGERFSX) +#define LAPACK_zgerfsx LAPACK_GLOBAL(zgerfsx,ZGERFSX) +#define LAPACK_cgerfsx LAPACK_GLOBAL(cgerfsx,CGERFSX) +#define LAPACK_sgbrfs LAPACK_GLOBAL(sgbrfs,SGBRFS) +#define LAPACK_dgbrfs LAPACK_GLOBAL(dgbrfs,DGBRFS) +#define LAPACK_cgbrfs LAPACK_GLOBAL(cgbrfs,CGBRFS) +#define LAPACK_zgbrfs LAPACK_GLOBAL(zgbrfs,ZGBRFS) +#define LAPACK_dgbrfsx LAPACK_GLOBAL(dgbrfsx,DGBRFSX) +#define LAPACK_sgbrfsx LAPACK_GLOBAL(sgbrfsx,SGBRFSX) +#define LAPACK_zgbrfsx LAPACK_GLOBAL(zgbrfsx,ZGBRFSX) +#define LAPACK_cgbrfsx LAPACK_GLOBAL(cgbrfsx,CGBRFSX) +#define LAPACK_sgtrfs LAPACK_GLOBAL(sgtrfs,SGTRFS) +#define LAPACK_dgtrfs LAPACK_GLOBAL(dgtrfs,DGTRFS) +#define LAPACK_cgtrfs LAPACK_GLOBAL(cgtrfs,CGTRFS) +#define LAPACK_zgtrfs LAPACK_GLOBAL(zgtrfs,ZGTRFS) +#define LAPACK_sporfs LAPACK_GLOBAL(sporfs,SPORFS) +#define LAPACK_dporfs LAPACK_GLOBAL(dporfs,DPORFS) +#define LAPACK_cporfs LAPACK_GLOBAL(cporfs,CPORFS) +#define LAPACK_zporfs LAPACK_GLOBAL(zporfs,ZPORFS) +#define LAPACK_dporfsx LAPACK_GLOBAL(dporfsx,DPORFSX) +#define LAPACK_sporfsx LAPACK_GLOBAL(sporfsx,SPORFSX) +#define LAPACK_zporfsx LAPACK_GLOBAL(zporfsx,ZPORFSX) +#define LAPACK_cporfsx LAPACK_GLOBAL(cporfsx,CPORFSX) +#define LAPACK_spprfs LAPACK_GLOBAL(spprfs,SPPRFS) +#define LAPACK_dpprfs LAPACK_GLOBAL(dpprfs,DPPRFS) +#define LAPACK_cpprfs LAPACK_GLOBAL(cpprfs,CPPRFS) +#define LAPACK_zpprfs LAPACK_GLOBAL(zpprfs,ZPPRFS) +#define LAPACK_spbrfs LAPACK_GLOBAL(spbrfs,SPBRFS) +#define LAPACK_dpbrfs LAPACK_GLOBAL(dpbrfs,DPBRFS) +#define LAPACK_cpbrfs LAPACK_GLOBAL(cpbrfs,CPBRFS) +#define LAPACK_zpbrfs LAPACK_GLOBAL(zpbrfs,ZPBRFS) +#define LAPACK_sptrfs LAPACK_GLOBAL(sptrfs,SPTRFS) +#define LAPACK_dptrfs LAPACK_GLOBAL(dptrfs,DPTRFS) +#define LAPACK_cptrfs LAPACK_GLOBAL(cptrfs,CPTRFS) +#define LAPACK_zptrfs LAPACK_GLOBAL(zptrfs,ZPTRFS) +#define LAPACK_ssyrfs LAPACK_GLOBAL(ssyrfs,SSYRFS) +#define LAPACK_dsyrfs LAPACK_GLOBAL(dsyrfs,DSYRFS) +#define LAPACK_csyrfs LAPACK_GLOBAL(csyrfs,CSYRFS) +#define LAPACK_zsyrfs LAPACK_GLOBAL(zsyrfs,ZSYRFS) +#define LAPACK_dsyrfsx LAPACK_GLOBAL(dsyrfsx,DSYRFSX) +#define LAPACK_ssyrfsx LAPACK_GLOBAL(ssyrfsx,SSYRFSX) +#define LAPACK_zsyrfsx LAPACK_GLOBAL(zsyrfsx,ZSYRFSX) +#define LAPACK_csyrfsx LAPACK_GLOBAL(csyrfsx,CSYRFSX) +#define LAPACK_cherfs LAPACK_GLOBAL(cherfs,CHERFS) +#define LAPACK_zherfs LAPACK_GLOBAL(zherfs,ZHERFS) +#define LAPACK_zherfsx LAPACK_GLOBAL(zherfsx,ZHERFSX) +#define LAPACK_cherfsx LAPACK_GLOBAL(cherfsx,CHERFSX) +#define LAPACK_ssprfs LAPACK_GLOBAL(ssprfs,SSPRFS) +#define LAPACK_dsprfs LAPACK_GLOBAL(dsprfs,DSPRFS) +#define LAPACK_csprfs LAPACK_GLOBAL(csprfs,CSPRFS) +#define LAPACK_zsprfs LAPACK_GLOBAL(zsprfs,ZSPRFS) +#define LAPACK_chprfs LAPACK_GLOBAL(chprfs,CHPRFS) +#define LAPACK_zhprfs LAPACK_GLOBAL(zhprfs,ZHPRFS) +#define LAPACK_strrfs LAPACK_GLOBAL(strrfs,STRRFS) +#define LAPACK_dtrrfs LAPACK_GLOBAL(dtrrfs,DTRRFS) +#define LAPACK_ctrrfs LAPACK_GLOBAL(ctrrfs,CTRRFS) +#define LAPACK_ztrrfs LAPACK_GLOBAL(ztrrfs,ZTRRFS) +#define LAPACK_stprfs LAPACK_GLOBAL(stprfs,STPRFS) +#define LAPACK_dtprfs LAPACK_GLOBAL(dtprfs,DTPRFS) +#define LAPACK_ctprfs LAPACK_GLOBAL(ctprfs,CTPRFS) +#define LAPACK_ztprfs LAPACK_GLOBAL(ztprfs,ZTPRFS) +#define LAPACK_stbrfs LAPACK_GLOBAL(stbrfs,STBRFS) +#define LAPACK_dtbrfs LAPACK_GLOBAL(dtbrfs,DTBRFS) +#define LAPACK_ctbrfs LAPACK_GLOBAL(ctbrfs,CTBRFS) +#define LAPACK_ztbrfs LAPACK_GLOBAL(ztbrfs,ZTBRFS) +#define LAPACK_sgetri LAPACK_GLOBAL(sgetri,SGETRI) +#define LAPACK_dgetri LAPACK_GLOBAL(dgetri,DGETRI) +#define LAPACK_cgetri LAPACK_GLOBAL(cgetri,CGETRI) +#define LAPACK_zgetri LAPACK_GLOBAL(zgetri,ZGETRI) +#define LAPACK_spotri LAPACK_GLOBAL(spotri,SPOTRI) +#define LAPACK_dpotri LAPACK_GLOBAL(dpotri,DPOTRI) +#define LAPACK_cpotri LAPACK_GLOBAL(cpotri,CPOTRI) +#define LAPACK_zpotri LAPACK_GLOBAL(zpotri,ZPOTRI) +#define LAPACK_dpftri LAPACK_GLOBAL(dpftri,DPFTRI) +#define LAPACK_spftri LAPACK_GLOBAL(spftri,SPFTRI) +#define LAPACK_zpftri LAPACK_GLOBAL(zpftri,ZPFTRI) +#define LAPACK_cpftri LAPACK_GLOBAL(cpftri,CPFTRI) +#define LAPACK_spptri LAPACK_GLOBAL(spptri,SPPTRI) +#define LAPACK_dpptri LAPACK_GLOBAL(dpptri,DPPTRI) +#define LAPACK_cpptri LAPACK_GLOBAL(cpptri,CPPTRI) +#define LAPACK_zpptri LAPACK_GLOBAL(zpptri,ZPPTRI) +#define LAPACK_ssytri LAPACK_GLOBAL(ssytri,SSYTRI) +#define LAPACK_dsytri LAPACK_GLOBAL(dsytri,DSYTRI) +#define LAPACK_csytri LAPACK_GLOBAL(csytri,CSYTRI) +#define LAPACK_zsytri LAPACK_GLOBAL(zsytri,ZSYTRI) +#define LAPACK_chetri LAPACK_GLOBAL(chetri,CHETRI) +#define LAPACK_zhetri LAPACK_GLOBAL(zhetri,ZHETRI) +#define LAPACK_ssptri LAPACK_GLOBAL(ssptri,SSPTRI) +#define LAPACK_dsptri LAPACK_GLOBAL(dsptri,DSPTRI) +#define LAPACK_csptri LAPACK_GLOBAL(csptri,CSPTRI) +#define LAPACK_zsptri LAPACK_GLOBAL(zsptri,ZSPTRI) +#define LAPACK_chptri LAPACK_GLOBAL(chptri,CHPTRI) +#define LAPACK_zhptri LAPACK_GLOBAL(zhptri,ZHPTRI) +#define LAPACK_strtri LAPACK_GLOBAL(strtri,STRTRI) +#define LAPACK_dtrtri LAPACK_GLOBAL(dtrtri,DTRTRI) +#define LAPACK_ctrtri LAPACK_GLOBAL(ctrtri,CTRTRI) +#define LAPACK_ztrtri LAPACK_GLOBAL(ztrtri,ZTRTRI) +#define LAPACK_dtftri LAPACK_GLOBAL(dtftri,DTFTRI) +#define LAPACK_stftri LAPACK_GLOBAL(stftri,STFTRI) +#define LAPACK_ztftri LAPACK_GLOBAL(ztftri,ZTFTRI) +#define LAPACK_ctftri LAPACK_GLOBAL(ctftri,CTFTRI) +#define LAPACK_stptri LAPACK_GLOBAL(stptri,STPTRI) +#define LAPACK_dtptri LAPACK_GLOBAL(dtptri,DTPTRI) +#define LAPACK_ctptri LAPACK_GLOBAL(ctptri,CTPTRI) +#define LAPACK_ztptri LAPACK_GLOBAL(ztptri,ZTPTRI) +#define LAPACK_sgeequ LAPACK_GLOBAL(sgeequ,SGEEQU) +#define LAPACK_dgeequ LAPACK_GLOBAL(dgeequ,DGEEQU) +#define LAPACK_cgeequ LAPACK_GLOBAL(cgeequ,CGEEQU) +#define LAPACK_zgeequ LAPACK_GLOBAL(zgeequ,ZGEEQU) +#define LAPACK_dgeequb LAPACK_GLOBAL(dgeequb,DGEEQUB) +#define LAPACK_sgeequb LAPACK_GLOBAL(sgeequb,SGEEQUB) +#define LAPACK_zgeequb LAPACK_GLOBAL(zgeequb,ZGEEQUB) +#define LAPACK_cgeequb LAPACK_GLOBAL(cgeequb,CGEEQUB) +#define LAPACK_sgbequ LAPACK_GLOBAL(sgbequ,SGBEQU) +#define LAPACK_dgbequ LAPACK_GLOBAL(dgbequ,DGBEQU) +#define LAPACK_cgbequ LAPACK_GLOBAL(cgbequ,CGBEQU) +#define LAPACK_zgbequ LAPACK_GLOBAL(zgbequ,ZGBEQU) +#define LAPACK_dgbequb LAPACK_GLOBAL(dgbequb,DGBEQUB) +#define LAPACK_sgbequb LAPACK_GLOBAL(sgbequb,SGBEQUB) +#define LAPACK_zgbequb LAPACK_GLOBAL(zgbequb,ZGBEQUB) +#define LAPACK_cgbequb LAPACK_GLOBAL(cgbequb,CGBEQUB) +#define LAPACK_spoequ LAPACK_GLOBAL(spoequ,SPOEQU) +#define LAPACK_dpoequ LAPACK_GLOBAL(dpoequ,DPOEQU) +#define LAPACK_cpoequ LAPACK_GLOBAL(cpoequ,CPOEQU) +#define LAPACK_zpoequ LAPACK_GLOBAL(zpoequ,ZPOEQU) +#define LAPACK_dpoequb LAPACK_GLOBAL(dpoequb,DPOEQUB) +#define LAPACK_spoequb LAPACK_GLOBAL(spoequb,SPOEQUB) +#define LAPACK_zpoequb LAPACK_GLOBAL(zpoequb,ZPOEQUB) +#define LAPACK_cpoequb LAPACK_GLOBAL(cpoequb,CPOEQUB) +#define LAPACK_sppequ LAPACK_GLOBAL(sppequ,SPPEQU) +#define LAPACK_dppequ LAPACK_GLOBAL(dppequ,DPPEQU) +#define LAPACK_cppequ LAPACK_GLOBAL(cppequ,CPPEQU) +#define LAPACK_zppequ LAPACK_GLOBAL(zppequ,ZPPEQU) +#define LAPACK_spbequ LAPACK_GLOBAL(spbequ,SPBEQU) +#define LAPACK_dpbequ LAPACK_GLOBAL(dpbequ,DPBEQU) +#define LAPACK_cpbequ LAPACK_GLOBAL(cpbequ,CPBEQU) +#define LAPACK_zpbequ LAPACK_GLOBAL(zpbequ,ZPBEQU) +#define LAPACK_dsyequb LAPACK_GLOBAL(dsyequb,DSYEQUB) +#define LAPACK_ssyequb LAPACK_GLOBAL(ssyequb,SSYEQUB) +#define LAPACK_zsyequb LAPACK_GLOBAL(zsyequb,ZSYEQUB) +#define LAPACK_csyequb LAPACK_GLOBAL(csyequb,CSYEQUB) +#define LAPACK_zheequb LAPACK_GLOBAL(zheequb,ZHEEQUB) +#define LAPACK_cheequb LAPACK_GLOBAL(cheequb,CHEEQUB) +#define LAPACK_sgesv LAPACK_GLOBAL(sgesv,SGESV) +#define LAPACK_dgesv LAPACK_GLOBAL(dgesv,DGESV) +#define LAPACK_cgesv LAPACK_GLOBAL(cgesv,CGESV) +#define LAPACK_zgesv LAPACK_GLOBAL(zgesv,ZGESV) +#define LAPACK_dsgesv LAPACK_GLOBAL(dsgesv,DSGESV) +#define LAPACK_zcgesv LAPACK_GLOBAL(zcgesv,ZCGESV) +#define LAPACK_sgesvx LAPACK_GLOBAL(sgesvx,SGESVX) +#define LAPACK_dgesvx LAPACK_GLOBAL(dgesvx,DGESVX) +#define LAPACK_cgesvx LAPACK_GLOBAL(cgesvx,CGESVX) +#define LAPACK_zgesvx LAPACK_GLOBAL(zgesvx,ZGESVX) +#define LAPACK_dgesvxx LAPACK_GLOBAL(dgesvxx,DGESVXX) +#define LAPACK_sgesvxx LAPACK_GLOBAL(sgesvxx,SGESVXX) +#define LAPACK_zgesvxx LAPACK_GLOBAL(zgesvxx,ZGESVXX) +#define LAPACK_cgesvxx LAPACK_GLOBAL(cgesvxx,CGESVXX) +#define LAPACK_sgbsv LAPACK_GLOBAL(sgbsv,SGBSV) +#define LAPACK_dgbsv LAPACK_GLOBAL(dgbsv,DGBSV) +#define LAPACK_cgbsv LAPACK_GLOBAL(cgbsv,CGBSV) +#define LAPACK_zgbsv LAPACK_GLOBAL(zgbsv,ZGBSV) +#define LAPACK_sgbsvx LAPACK_GLOBAL(sgbsvx,SGBSVX) +#define LAPACK_dgbsvx LAPACK_GLOBAL(dgbsvx,DGBSVX) +#define LAPACK_cgbsvx LAPACK_GLOBAL(cgbsvx,CGBSVX) +#define LAPACK_zgbsvx LAPACK_GLOBAL(zgbsvx,ZGBSVX) +#define LAPACK_dgbsvxx LAPACK_GLOBAL(dgbsvxx,DGBSVXX) +#define LAPACK_sgbsvxx LAPACK_GLOBAL(sgbsvxx,SGBSVXX) +#define LAPACK_zgbsvxx LAPACK_GLOBAL(zgbsvxx,ZGBSVXX) +#define LAPACK_cgbsvxx LAPACK_GLOBAL(cgbsvxx,CGBSVXX) +#define LAPACK_sgtsv LAPACK_GLOBAL(sgtsv,SGTSV) +#define LAPACK_dgtsv LAPACK_GLOBAL(dgtsv,DGTSV) +#define LAPACK_cgtsv LAPACK_GLOBAL(cgtsv,CGTSV) +#define LAPACK_zgtsv LAPACK_GLOBAL(zgtsv,ZGTSV) +#define LAPACK_sgtsvx LAPACK_GLOBAL(sgtsvx,SGTSVX) +#define LAPACK_dgtsvx LAPACK_GLOBAL(dgtsvx,DGTSVX) +#define LAPACK_cgtsvx LAPACK_GLOBAL(cgtsvx,CGTSVX) +#define LAPACK_zgtsvx LAPACK_GLOBAL(zgtsvx,ZGTSVX) +#define LAPACK_sposv LAPACK_GLOBAL(sposv,SPOSV) +#define LAPACK_dposv LAPACK_GLOBAL(dposv,DPOSV) +#define LAPACK_cposv LAPACK_GLOBAL(cposv,CPOSV) +#define LAPACK_zposv LAPACK_GLOBAL(zposv,ZPOSV) +#define LAPACK_dsposv LAPACK_GLOBAL(dsposv,DSPOSV) +#define LAPACK_zcposv LAPACK_GLOBAL(zcposv,ZCPOSV) +#define LAPACK_sposvx LAPACK_GLOBAL(sposvx,SPOSVX) +#define LAPACK_dposvx LAPACK_GLOBAL(dposvx,DPOSVX) +#define LAPACK_cposvx LAPACK_GLOBAL(cposvx,CPOSVX) +#define LAPACK_zposvx LAPACK_GLOBAL(zposvx,ZPOSVX) +#define LAPACK_dposvxx LAPACK_GLOBAL(dposvxx,DPOSVXX) +#define LAPACK_sposvxx LAPACK_GLOBAL(sposvxx,SPOSVXX) +#define LAPACK_zposvxx LAPACK_GLOBAL(zposvxx,ZPOSVXX) +#define LAPACK_cposvxx LAPACK_GLOBAL(cposvxx,CPOSVXX) +#define LAPACK_sppsv LAPACK_GLOBAL(sppsv,SPPSV) +#define LAPACK_dppsv LAPACK_GLOBAL(dppsv,DPPSV) +#define LAPACK_cppsv LAPACK_GLOBAL(cppsv,CPPSV) +#define LAPACK_zppsv LAPACK_GLOBAL(zppsv,ZPPSV) +#define LAPACK_sppsvx LAPACK_GLOBAL(sppsvx,SPPSVX) +#define LAPACK_dppsvx LAPACK_GLOBAL(dppsvx,DPPSVX) +#define LAPACK_cppsvx LAPACK_GLOBAL(cppsvx,CPPSVX) +#define LAPACK_zppsvx LAPACK_GLOBAL(zppsvx,ZPPSVX) +#define LAPACK_spbsv LAPACK_GLOBAL(spbsv,SPBSV) +#define LAPACK_dpbsv LAPACK_GLOBAL(dpbsv,DPBSV) +#define LAPACK_cpbsv LAPACK_GLOBAL(cpbsv,CPBSV) +#define LAPACK_zpbsv LAPACK_GLOBAL(zpbsv,ZPBSV) +#define LAPACK_spbsvx LAPACK_GLOBAL(spbsvx,SPBSVX) +#define LAPACK_dpbsvx LAPACK_GLOBAL(dpbsvx,DPBSVX) +#define LAPACK_cpbsvx LAPACK_GLOBAL(cpbsvx,CPBSVX) +#define LAPACK_zpbsvx LAPACK_GLOBAL(zpbsvx,ZPBSVX) +#define LAPACK_sptsv LAPACK_GLOBAL(sptsv,SPTSV) +#define LAPACK_dptsv LAPACK_GLOBAL(dptsv,DPTSV) +#define LAPACK_cptsv LAPACK_GLOBAL(cptsv,CPTSV) +#define LAPACK_zptsv LAPACK_GLOBAL(zptsv,ZPTSV) +#define LAPACK_sptsvx LAPACK_GLOBAL(sptsvx,SPTSVX) +#define LAPACK_dptsvx LAPACK_GLOBAL(dptsvx,DPTSVX) +#define LAPACK_cptsvx LAPACK_GLOBAL(cptsvx,CPTSVX) +#define LAPACK_zptsvx LAPACK_GLOBAL(zptsvx,ZPTSVX) +#define LAPACK_ssysv LAPACK_GLOBAL(ssysv,SSYSV) +#define LAPACK_dsysv LAPACK_GLOBAL(dsysv,DSYSV) +#define LAPACK_csysv LAPACK_GLOBAL(csysv,CSYSV) +#define LAPACK_zsysv LAPACK_GLOBAL(zsysv,ZSYSV) +#define LAPACK_ssysvx LAPACK_GLOBAL(ssysvx,SSYSVX) +#define LAPACK_dsysvx LAPACK_GLOBAL(dsysvx,DSYSVX) +#define LAPACK_csysvx LAPACK_GLOBAL(csysvx,CSYSVX) +#define LAPACK_zsysvx LAPACK_GLOBAL(zsysvx,ZSYSVX) +#define LAPACK_dsysvxx LAPACK_GLOBAL(dsysvxx,DSYSVXX) +#define LAPACK_ssysvxx LAPACK_GLOBAL(ssysvxx,SSYSVXX) +#define LAPACK_zsysvxx LAPACK_GLOBAL(zsysvxx,ZSYSVXX) +#define LAPACK_csysvxx LAPACK_GLOBAL(csysvxx,CSYSVXX) +#define LAPACK_chesv LAPACK_GLOBAL(chesv,CHESV) +#define LAPACK_zhesv LAPACK_GLOBAL(zhesv,ZHESV) +#define LAPACK_chesvx LAPACK_GLOBAL(chesvx,CHESVX) +#define LAPACK_zhesvx LAPACK_GLOBAL(zhesvx,ZHESVX) +#define LAPACK_zhesvxx LAPACK_GLOBAL(zhesvxx,ZHESVXX) +#define LAPACK_chesvxx LAPACK_GLOBAL(chesvxx,CHESVXX) +#define LAPACK_sspsv LAPACK_GLOBAL(sspsv,SSPSV) +#define LAPACK_dspsv LAPACK_GLOBAL(dspsv,DSPSV) +#define LAPACK_cspsv LAPACK_GLOBAL(cspsv,CSPSV) +#define LAPACK_zspsv LAPACK_GLOBAL(zspsv,ZSPSV) +#define LAPACK_sspsvx LAPACK_GLOBAL(sspsvx,SSPSVX) +#define LAPACK_dspsvx LAPACK_GLOBAL(dspsvx,DSPSVX) +#define LAPACK_cspsvx LAPACK_GLOBAL(cspsvx,CSPSVX) +#define LAPACK_zspsvx LAPACK_GLOBAL(zspsvx,ZSPSVX) +#define LAPACK_chpsv LAPACK_GLOBAL(chpsv,CHPSV) +#define LAPACK_zhpsv LAPACK_GLOBAL(zhpsv,ZHPSV) +#define LAPACK_chpsvx LAPACK_GLOBAL(chpsvx,CHPSVX) +#define LAPACK_zhpsvx LAPACK_GLOBAL(zhpsvx,ZHPSVX) +#define LAPACK_sgeqrf LAPACK_GLOBAL(sgeqrf,SGEQRF) +#define LAPACK_dgeqrf LAPACK_GLOBAL(dgeqrf,DGEQRF) +#define LAPACK_cgeqrf LAPACK_GLOBAL(cgeqrf,CGEQRF) +#define LAPACK_zgeqrf LAPACK_GLOBAL(zgeqrf,ZGEQRF) +#define LAPACK_sgeqpf LAPACK_GLOBAL(sgeqpf,SGEQPF) +#define LAPACK_dgeqpf LAPACK_GLOBAL(dgeqpf,DGEQPF) +#define LAPACK_cgeqpf LAPACK_GLOBAL(cgeqpf,CGEQPF) +#define LAPACK_zgeqpf LAPACK_GLOBAL(zgeqpf,ZGEQPF) +#define LAPACK_sgeqp3 LAPACK_GLOBAL(sgeqp3,SGEQP3) +#define LAPACK_dgeqp3 LAPACK_GLOBAL(dgeqp3,DGEQP3) +#define LAPACK_cgeqp3 LAPACK_GLOBAL(cgeqp3,CGEQP3) +#define LAPACK_zgeqp3 LAPACK_GLOBAL(zgeqp3,ZGEQP3) +#define LAPACK_sorgqr LAPACK_GLOBAL(sorgqr,SORGQR) +#define LAPACK_dorgqr LAPACK_GLOBAL(dorgqr,DORGQR) +#define LAPACK_sormqr LAPACK_GLOBAL(sormqr,SORMQR) +#define LAPACK_dormqr LAPACK_GLOBAL(dormqr,DORMQR) +#define LAPACK_cungqr LAPACK_GLOBAL(cungqr,CUNGQR) +#define LAPACK_zungqr LAPACK_GLOBAL(zungqr,ZUNGQR) +#define LAPACK_cunmqr LAPACK_GLOBAL(cunmqr,CUNMQR) +#define LAPACK_zunmqr LAPACK_GLOBAL(zunmqr,ZUNMQR) +#define LAPACK_sgelqf LAPACK_GLOBAL(sgelqf,SGELQF) +#define LAPACK_dgelqf LAPACK_GLOBAL(dgelqf,DGELQF) +#define LAPACK_cgelqf LAPACK_GLOBAL(cgelqf,CGELQF) +#define LAPACK_zgelqf LAPACK_GLOBAL(zgelqf,ZGELQF) +#define LAPACK_sorglq LAPACK_GLOBAL(sorglq,SORGLQ) +#define LAPACK_dorglq LAPACK_GLOBAL(dorglq,DORGLQ) +#define LAPACK_sormlq LAPACK_GLOBAL(sormlq,SORMLQ) +#define LAPACK_dormlq LAPACK_GLOBAL(dormlq,DORMLQ) +#define LAPACK_cunglq LAPACK_GLOBAL(cunglq,CUNGLQ) +#define LAPACK_zunglq LAPACK_GLOBAL(zunglq,ZUNGLQ) +#define LAPACK_cunmlq LAPACK_GLOBAL(cunmlq,CUNMLQ) +#define LAPACK_zunmlq LAPACK_GLOBAL(zunmlq,ZUNMLQ) +#define LAPACK_sgeqlf LAPACK_GLOBAL(sgeqlf,SGEQLF) +#define LAPACK_dgeqlf LAPACK_GLOBAL(dgeqlf,DGEQLF) +#define LAPACK_cgeqlf LAPACK_GLOBAL(cgeqlf,CGEQLF) +#define LAPACK_zgeqlf LAPACK_GLOBAL(zgeqlf,ZGEQLF) +#define LAPACK_sorgql LAPACK_GLOBAL(sorgql,SORGQL) +#define LAPACK_dorgql LAPACK_GLOBAL(dorgql,DORGQL) +#define LAPACK_cungql LAPACK_GLOBAL(cungql,CUNGQL) +#define LAPACK_zungql LAPACK_GLOBAL(zungql,ZUNGQL) +#define LAPACK_sormql LAPACK_GLOBAL(sormql,SORMQL) +#define LAPACK_dormql LAPACK_GLOBAL(dormql,DORMQL) +#define LAPACK_cunmql LAPACK_GLOBAL(cunmql,CUNMQL) +#define LAPACK_zunmql LAPACK_GLOBAL(zunmql,ZUNMQL) +#define LAPACK_sgerqf LAPACK_GLOBAL(sgerqf,SGERQF) +#define LAPACK_dgerqf LAPACK_GLOBAL(dgerqf,DGERQF) +#define LAPACK_cgerqf LAPACK_GLOBAL(cgerqf,CGERQF) +#define LAPACK_zgerqf LAPACK_GLOBAL(zgerqf,ZGERQF) +#define LAPACK_sorgrq LAPACK_GLOBAL(sorgrq,SORGRQ) +#define LAPACK_dorgrq LAPACK_GLOBAL(dorgrq,DORGRQ) +#define LAPACK_cungrq LAPACK_GLOBAL(cungrq,CUNGRQ) +#define LAPACK_zungrq LAPACK_GLOBAL(zungrq,ZUNGRQ) +#define LAPACK_sormrq LAPACK_GLOBAL(sormrq,SORMRQ) +#define LAPACK_dormrq LAPACK_GLOBAL(dormrq,DORMRQ) +#define LAPACK_cunmrq LAPACK_GLOBAL(cunmrq,CUNMRQ) +#define LAPACK_zunmrq LAPACK_GLOBAL(zunmrq,ZUNMRQ) +#define LAPACK_stzrzf LAPACK_GLOBAL(stzrzf,STZRZF) +#define LAPACK_dtzrzf LAPACK_GLOBAL(dtzrzf,DTZRZF) +#define LAPACK_ctzrzf LAPACK_GLOBAL(ctzrzf,CTZRZF) +#define LAPACK_ztzrzf LAPACK_GLOBAL(ztzrzf,ZTZRZF) +#define LAPACK_sormrz LAPACK_GLOBAL(sormrz,SORMRZ) +#define LAPACK_dormrz LAPACK_GLOBAL(dormrz,DORMRZ) +#define LAPACK_cunmrz LAPACK_GLOBAL(cunmrz,CUNMRZ) +#define LAPACK_zunmrz LAPACK_GLOBAL(zunmrz,ZUNMRZ) +#define LAPACK_sggqrf LAPACK_GLOBAL(sggqrf,SGGQRF) +#define LAPACK_dggqrf LAPACK_GLOBAL(dggqrf,DGGQRF) +#define LAPACK_cggqrf LAPACK_GLOBAL(cggqrf,CGGQRF) +#define LAPACK_zggqrf LAPACK_GLOBAL(zggqrf,ZGGQRF) +#define LAPACK_sggrqf LAPACK_GLOBAL(sggrqf,SGGRQF) +#define LAPACK_dggrqf LAPACK_GLOBAL(dggrqf,DGGRQF) +#define LAPACK_cggrqf LAPACK_GLOBAL(cggrqf,CGGRQF) +#define LAPACK_zggrqf LAPACK_GLOBAL(zggrqf,ZGGRQF) +#define LAPACK_sgebrd LAPACK_GLOBAL(sgebrd,SGEBRD) +#define LAPACK_dgebrd LAPACK_GLOBAL(dgebrd,DGEBRD) +#define LAPACK_cgebrd LAPACK_GLOBAL(cgebrd,CGEBRD) +#define LAPACK_zgebrd LAPACK_GLOBAL(zgebrd,ZGEBRD) +#define LAPACK_sgbbrd LAPACK_GLOBAL(sgbbrd,SGBBRD) +#define LAPACK_dgbbrd LAPACK_GLOBAL(dgbbrd,DGBBRD) +#define LAPACK_cgbbrd LAPACK_GLOBAL(cgbbrd,CGBBRD) +#define LAPACK_zgbbrd LAPACK_GLOBAL(zgbbrd,ZGBBRD) +#define LAPACK_sorgbr LAPACK_GLOBAL(sorgbr,SORGBR) +#define LAPACK_dorgbr LAPACK_GLOBAL(dorgbr,DORGBR) +#define LAPACK_sormbr LAPACK_GLOBAL(sormbr,SORMBR) +#define LAPACK_dormbr LAPACK_GLOBAL(dormbr,DORMBR) +#define LAPACK_cungbr LAPACK_GLOBAL(cungbr,CUNGBR) +#define LAPACK_zungbr LAPACK_GLOBAL(zungbr,ZUNGBR) +#define LAPACK_cunmbr LAPACK_GLOBAL(cunmbr,CUNMBR) +#define LAPACK_zunmbr LAPACK_GLOBAL(zunmbr,ZUNMBR) +#define LAPACK_sbdsqr LAPACK_GLOBAL(sbdsqr,SBDSQR) +#define LAPACK_dbdsqr LAPACK_GLOBAL(dbdsqr,DBDSQR) +#define LAPACK_cbdsqr LAPACK_GLOBAL(cbdsqr,CBDSQR) +#define LAPACK_zbdsqr LAPACK_GLOBAL(zbdsqr,ZBDSQR) +#define LAPACK_sbdsdc LAPACK_GLOBAL(sbdsdc,SBDSDC) +#define LAPACK_dbdsdc LAPACK_GLOBAL(dbdsdc,DBDSDC) +#define LAPACK_ssytrd LAPACK_GLOBAL(ssytrd,SSYTRD) +#define LAPACK_dsytrd LAPACK_GLOBAL(dsytrd,DSYTRD) +#define LAPACK_sorgtr LAPACK_GLOBAL(sorgtr,SORGTR) +#define LAPACK_dorgtr LAPACK_GLOBAL(dorgtr,DORGTR) +#define LAPACK_sormtr LAPACK_GLOBAL(sormtr,SORMTR) +#define LAPACK_dormtr LAPACK_GLOBAL(dormtr,DORMTR) +#define LAPACK_chetrd LAPACK_GLOBAL(chetrd,CHETRD) +#define LAPACK_zhetrd LAPACK_GLOBAL(zhetrd,ZHETRD) +#define LAPACK_cungtr LAPACK_GLOBAL(cungtr,CUNGTR) +#define LAPACK_zungtr LAPACK_GLOBAL(zungtr,ZUNGTR) +#define LAPACK_cunmtr LAPACK_GLOBAL(cunmtr,CUNMTR) +#define LAPACK_zunmtr LAPACK_GLOBAL(zunmtr,ZUNMTR) +#define LAPACK_ssptrd LAPACK_GLOBAL(ssptrd,SSPTRD) +#define LAPACK_dsptrd LAPACK_GLOBAL(dsptrd,DSPTRD) +#define LAPACK_sopgtr LAPACK_GLOBAL(sopgtr,SOPGTR) +#define LAPACK_dopgtr LAPACK_GLOBAL(dopgtr,DOPGTR) +#define LAPACK_sopmtr LAPACK_GLOBAL(sopmtr,SOPMTR) +#define LAPACK_dopmtr LAPACK_GLOBAL(dopmtr,DOPMTR) +#define LAPACK_chptrd LAPACK_GLOBAL(chptrd,CHPTRD) +#define LAPACK_zhptrd LAPACK_GLOBAL(zhptrd,ZHPTRD) +#define LAPACK_cupgtr LAPACK_GLOBAL(cupgtr,CUPGTR) +#define LAPACK_zupgtr LAPACK_GLOBAL(zupgtr,ZUPGTR) +#define LAPACK_cupmtr LAPACK_GLOBAL(cupmtr,CUPMTR) +#define LAPACK_zupmtr LAPACK_GLOBAL(zupmtr,ZUPMTR) +#define LAPACK_ssbtrd LAPACK_GLOBAL(ssbtrd,SSBTRD) +#define LAPACK_dsbtrd LAPACK_GLOBAL(dsbtrd,DSBTRD) +#define LAPACK_chbtrd LAPACK_GLOBAL(chbtrd,CHBTRD) +#define LAPACK_zhbtrd LAPACK_GLOBAL(zhbtrd,ZHBTRD) +#define LAPACK_ssterf LAPACK_GLOBAL(ssterf,SSTERF) +#define LAPACK_dsterf LAPACK_GLOBAL(dsterf,DSTERF) +#define LAPACK_ssteqr LAPACK_GLOBAL(ssteqr,SSTEQR) +#define LAPACK_dsteqr LAPACK_GLOBAL(dsteqr,DSTEQR) +#define LAPACK_csteqr LAPACK_GLOBAL(csteqr,CSTEQR) +#define LAPACK_zsteqr LAPACK_GLOBAL(zsteqr,ZSTEQR) +#define LAPACK_sstemr LAPACK_GLOBAL(sstemr,SSTEMR) +#define LAPACK_dstemr LAPACK_GLOBAL(dstemr,DSTEMR) +#define LAPACK_cstemr LAPACK_GLOBAL(cstemr,CSTEMR) +#define LAPACK_zstemr LAPACK_GLOBAL(zstemr,ZSTEMR) +#define LAPACK_sstedc LAPACK_GLOBAL(sstedc,SSTEDC) +#define LAPACK_dstedc LAPACK_GLOBAL(dstedc,DSTEDC) +#define LAPACK_cstedc LAPACK_GLOBAL(cstedc,CSTEDC) +#define LAPACK_zstedc LAPACK_GLOBAL(zstedc,ZSTEDC) +#define LAPACK_sstegr LAPACK_GLOBAL(sstegr,SSTEGR) +#define LAPACK_dstegr LAPACK_GLOBAL(dstegr,DSTEGR) +#define LAPACK_cstegr LAPACK_GLOBAL(cstegr,CSTEGR) +#define LAPACK_zstegr LAPACK_GLOBAL(zstegr,ZSTEGR) +#define LAPACK_spteqr LAPACK_GLOBAL(spteqr,SPTEQR) +#define LAPACK_dpteqr LAPACK_GLOBAL(dpteqr,DPTEQR) +#define LAPACK_cpteqr LAPACK_GLOBAL(cpteqr,CPTEQR) +#define LAPACK_zpteqr LAPACK_GLOBAL(zpteqr,ZPTEQR) +#define LAPACK_sstebz LAPACK_GLOBAL(sstebz,SSTEBZ) +#define LAPACK_dstebz LAPACK_GLOBAL(dstebz,DSTEBZ) +#define LAPACK_sstein LAPACK_GLOBAL(sstein,SSTEIN) +#define LAPACK_dstein LAPACK_GLOBAL(dstein,DSTEIN) +#define LAPACK_cstein LAPACK_GLOBAL(cstein,CSTEIN) +#define LAPACK_zstein LAPACK_GLOBAL(zstein,ZSTEIN) +#define LAPACK_sdisna LAPACK_GLOBAL(sdisna,SDISNA) +#define LAPACK_ddisna LAPACK_GLOBAL(ddisna,DDISNA) +#define LAPACK_ssygst LAPACK_GLOBAL(ssygst,SSYGST) +#define LAPACK_dsygst LAPACK_GLOBAL(dsygst,DSYGST) +#define LAPACK_chegst LAPACK_GLOBAL(chegst,CHEGST) +#define LAPACK_zhegst LAPACK_GLOBAL(zhegst,ZHEGST) +#define LAPACK_sspgst LAPACK_GLOBAL(sspgst,SSPGST) +#define LAPACK_dspgst LAPACK_GLOBAL(dspgst,DSPGST) +#define LAPACK_chpgst LAPACK_GLOBAL(chpgst,CHPGST) +#define LAPACK_zhpgst LAPACK_GLOBAL(zhpgst,ZHPGST) +#define LAPACK_ssbgst LAPACK_GLOBAL(ssbgst,SSBGST) +#define LAPACK_dsbgst LAPACK_GLOBAL(dsbgst,DSBGST) +#define LAPACK_chbgst LAPACK_GLOBAL(chbgst,CHBGST) +#define LAPACK_zhbgst LAPACK_GLOBAL(zhbgst,ZHBGST) +#define LAPACK_spbstf LAPACK_GLOBAL(spbstf,SPBSTF) +#define LAPACK_dpbstf LAPACK_GLOBAL(dpbstf,DPBSTF) +#define LAPACK_cpbstf LAPACK_GLOBAL(cpbstf,CPBSTF) +#define LAPACK_zpbstf LAPACK_GLOBAL(zpbstf,ZPBSTF) +#define LAPACK_sgehrd LAPACK_GLOBAL(sgehrd,SGEHRD) +#define LAPACK_dgehrd LAPACK_GLOBAL(dgehrd,DGEHRD) +#define LAPACK_cgehrd LAPACK_GLOBAL(cgehrd,CGEHRD) +#define LAPACK_zgehrd LAPACK_GLOBAL(zgehrd,ZGEHRD) +#define LAPACK_sorghr LAPACK_GLOBAL(sorghr,SORGHR) +#define LAPACK_dorghr LAPACK_GLOBAL(dorghr,DORGHR) +#define LAPACK_sormhr LAPACK_GLOBAL(sormhr,SORMHR) +#define LAPACK_dormhr LAPACK_GLOBAL(dormhr,DORMHR) +#define LAPACK_cunghr LAPACK_GLOBAL(cunghr,CUNGHR) +#define LAPACK_zunghr LAPACK_GLOBAL(zunghr,ZUNGHR) +#define LAPACK_cunmhr LAPACK_GLOBAL(cunmhr,CUNMHR) +#define LAPACK_zunmhr LAPACK_GLOBAL(zunmhr,ZUNMHR) +#define LAPACK_sgebal LAPACK_GLOBAL(sgebal,SGEBAL) +#define LAPACK_dgebal LAPACK_GLOBAL(dgebal,DGEBAL) +#define LAPACK_cgebal LAPACK_GLOBAL(cgebal,CGEBAL) +#define LAPACK_zgebal LAPACK_GLOBAL(zgebal,ZGEBAL) +#define LAPACK_sgebak LAPACK_GLOBAL(sgebak,SGEBAK) +#define LAPACK_dgebak LAPACK_GLOBAL(dgebak,DGEBAK) +#define LAPACK_cgebak LAPACK_GLOBAL(cgebak,CGEBAK) +#define LAPACK_zgebak LAPACK_GLOBAL(zgebak,ZGEBAK) +#define LAPACK_shseqr LAPACK_GLOBAL(shseqr,SHSEQR) +#define LAPACK_dhseqr LAPACK_GLOBAL(dhseqr,DHSEQR) +#define LAPACK_chseqr LAPACK_GLOBAL(chseqr,CHSEQR) +#define LAPACK_zhseqr LAPACK_GLOBAL(zhseqr,ZHSEQR) +#define LAPACK_shsein LAPACK_GLOBAL(shsein,SHSEIN) +#define LAPACK_dhsein LAPACK_GLOBAL(dhsein,DHSEIN) +#define LAPACK_chsein LAPACK_GLOBAL(chsein,CHSEIN) +#define LAPACK_zhsein LAPACK_GLOBAL(zhsein,ZHSEIN) +#define LAPACK_strevc LAPACK_GLOBAL(strevc,STREVC) +#define LAPACK_dtrevc LAPACK_GLOBAL(dtrevc,DTREVC) +#define LAPACK_ctrevc LAPACK_GLOBAL(ctrevc,CTREVC) +#define LAPACK_ztrevc LAPACK_GLOBAL(ztrevc,ZTREVC) +#define LAPACK_strsna LAPACK_GLOBAL(strsna,STRSNA) +#define LAPACK_dtrsna LAPACK_GLOBAL(dtrsna,DTRSNA) +#define LAPACK_ctrsna LAPACK_GLOBAL(ctrsna,CTRSNA) +#define LAPACK_ztrsna LAPACK_GLOBAL(ztrsna,ZTRSNA) +#define LAPACK_strexc LAPACK_GLOBAL(strexc,STREXC) +#define LAPACK_dtrexc LAPACK_GLOBAL(dtrexc,DTREXC) +#define LAPACK_ctrexc LAPACK_GLOBAL(ctrexc,CTREXC) +#define LAPACK_ztrexc LAPACK_GLOBAL(ztrexc,ZTREXC) +#define LAPACK_strsen LAPACK_GLOBAL(strsen,STRSEN) +#define LAPACK_dtrsen LAPACK_GLOBAL(dtrsen,DTRSEN) +#define LAPACK_ctrsen LAPACK_GLOBAL(ctrsen,CTRSEN) +#define LAPACK_ztrsen LAPACK_GLOBAL(ztrsen,ZTRSEN) +#define LAPACK_strsyl LAPACK_GLOBAL(strsyl,STRSYL) +#define LAPACK_dtrsyl LAPACK_GLOBAL(dtrsyl,DTRSYL) +#define LAPACK_ctrsyl LAPACK_GLOBAL(ctrsyl,CTRSYL) +#define LAPACK_ztrsyl LAPACK_GLOBAL(ztrsyl,ZTRSYL) +#define LAPACK_sgghrd LAPACK_GLOBAL(sgghrd,SGGHRD) +#define LAPACK_dgghrd LAPACK_GLOBAL(dgghrd,DGGHRD) +#define LAPACK_cgghrd LAPACK_GLOBAL(cgghrd,CGGHRD) +#define LAPACK_zgghrd LAPACK_GLOBAL(zgghrd,ZGGHRD) +#define LAPACK_sggbal LAPACK_GLOBAL(sggbal,SGGBAL) +#define LAPACK_dggbal LAPACK_GLOBAL(dggbal,DGGBAL) +#define LAPACK_cggbal LAPACK_GLOBAL(cggbal,CGGBAL) +#define LAPACK_zggbal LAPACK_GLOBAL(zggbal,ZGGBAL) +#define LAPACK_sggbak LAPACK_GLOBAL(sggbak,SGGBAK) +#define LAPACK_dggbak LAPACK_GLOBAL(dggbak,DGGBAK) +#define LAPACK_cggbak LAPACK_GLOBAL(cggbak,CGGBAK) +#define LAPACK_zggbak LAPACK_GLOBAL(zggbak,ZGGBAK) +#define LAPACK_shgeqz LAPACK_GLOBAL(shgeqz,SHGEQZ) +#define LAPACK_dhgeqz LAPACK_GLOBAL(dhgeqz,DHGEQZ) +#define LAPACK_chgeqz LAPACK_GLOBAL(chgeqz,CHGEQZ) +#define LAPACK_zhgeqz LAPACK_GLOBAL(zhgeqz,ZHGEQZ) +#define LAPACK_stgevc LAPACK_GLOBAL(stgevc,STGEVC) +#define LAPACK_dtgevc LAPACK_GLOBAL(dtgevc,DTGEVC) +#define LAPACK_ctgevc LAPACK_GLOBAL(ctgevc,CTGEVC) +#define LAPACK_ztgevc LAPACK_GLOBAL(ztgevc,ZTGEVC) +#define LAPACK_stgexc LAPACK_GLOBAL(stgexc,STGEXC) +#define LAPACK_dtgexc LAPACK_GLOBAL(dtgexc,DTGEXC) +#define LAPACK_ctgexc LAPACK_GLOBAL(ctgexc,CTGEXC) +#define LAPACK_ztgexc LAPACK_GLOBAL(ztgexc,ZTGEXC) +#define LAPACK_stgsen LAPACK_GLOBAL(stgsen,STGSEN) +#define LAPACK_dtgsen LAPACK_GLOBAL(dtgsen,DTGSEN) +#define LAPACK_ctgsen LAPACK_GLOBAL(ctgsen,CTGSEN) +#define LAPACK_ztgsen LAPACK_GLOBAL(ztgsen,ZTGSEN) +#define LAPACK_stgsyl LAPACK_GLOBAL(stgsyl,STGSYL) +#define LAPACK_dtgsyl LAPACK_GLOBAL(dtgsyl,DTGSYL) +#define LAPACK_ctgsyl LAPACK_GLOBAL(ctgsyl,CTGSYL) +#define LAPACK_ztgsyl LAPACK_GLOBAL(ztgsyl,ZTGSYL) +#define LAPACK_stgsna LAPACK_GLOBAL(stgsna,STGSNA) +#define LAPACK_dtgsna LAPACK_GLOBAL(dtgsna,DTGSNA) +#define LAPACK_ctgsna LAPACK_GLOBAL(ctgsna,CTGSNA) +#define LAPACK_ztgsna LAPACK_GLOBAL(ztgsna,ZTGSNA) +#define LAPACK_sggsvp LAPACK_GLOBAL(sggsvp,SGGSVP) +#define LAPACK_dggsvp LAPACK_GLOBAL(dggsvp,DGGSVP) +#define LAPACK_cggsvp LAPACK_GLOBAL(cggsvp,CGGSVP) +#define LAPACK_zggsvp LAPACK_GLOBAL(zggsvp,ZGGSVP) +#define LAPACK_stgsja LAPACK_GLOBAL(stgsja,STGSJA) +#define LAPACK_dtgsja LAPACK_GLOBAL(dtgsja,DTGSJA) +#define LAPACK_ctgsja LAPACK_GLOBAL(ctgsja,CTGSJA) +#define LAPACK_ztgsja LAPACK_GLOBAL(ztgsja,ZTGSJA) +#define LAPACK_sgels LAPACK_GLOBAL(sgels,SGELS) +#define LAPACK_dgels LAPACK_GLOBAL(dgels,DGELS) +#define LAPACK_cgels LAPACK_GLOBAL(cgels,CGELS) +#define LAPACK_zgels LAPACK_GLOBAL(zgels,ZGELS) +#define LAPACK_sgelsy LAPACK_GLOBAL(sgelsy,SGELSY) +#define LAPACK_dgelsy LAPACK_GLOBAL(dgelsy,DGELSY) +#define LAPACK_cgelsy LAPACK_GLOBAL(cgelsy,CGELSY) +#define LAPACK_zgelsy LAPACK_GLOBAL(zgelsy,ZGELSY) +#define LAPACK_sgelss LAPACK_GLOBAL(sgelss,SGELSS) +#define LAPACK_dgelss LAPACK_GLOBAL(dgelss,DGELSS) +#define LAPACK_cgelss LAPACK_GLOBAL(cgelss,CGELSS) +#define LAPACK_zgelss LAPACK_GLOBAL(zgelss,ZGELSS) +#define LAPACK_sgelsd LAPACK_GLOBAL(sgelsd,SGELSD) +#define LAPACK_dgelsd LAPACK_GLOBAL(dgelsd,DGELSD) +#define LAPACK_cgelsd LAPACK_GLOBAL(cgelsd,CGELSD) +#define LAPACK_zgelsd LAPACK_GLOBAL(zgelsd,ZGELSD) +#define LAPACK_sgglse LAPACK_GLOBAL(sgglse,SGGLSE) +#define LAPACK_dgglse LAPACK_GLOBAL(dgglse,DGGLSE) +#define LAPACK_cgglse LAPACK_GLOBAL(cgglse,CGGLSE) +#define LAPACK_zgglse LAPACK_GLOBAL(zgglse,ZGGLSE) +#define LAPACK_sggglm LAPACK_GLOBAL(sggglm,SGGGLM) +#define LAPACK_dggglm LAPACK_GLOBAL(dggglm,DGGGLM) +#define LAPACK_cggglm LAPACK_GLOBAL(cggglm,CGGGLM) +#define LAPACK_zggglm LAPACK_GLOBAL(zggglm,ZGGGLM) +#define LAPACK_ssyev LAPACK_GLOBAL(ssyev,SSYEV) +#define LAPACK_dsyev LAPACK_GLOBAL(dsyev,DSYEV) +#define LAPACK_cheev LAPACK_GLOBAL(cheev,CHEEV) +#define LAPACK_zheev LAPACK_GLOBAL(zheev,ZHEEV) +#define LAPACK_ssyevd LAPACK_GLOBAL(ssyevd,SSYEVD) +#define LAPACK_dsyevd LAPACK_GLOBAL(dsyevd,DSYEVD) +#define LAPACK_cheevd LAPACK_GLOBAL(cheevd,CHEEVD) +#define LAPACK_zheevd LAPACK_GLOBAL(zheevd,ZHEEVD) +#define LAPACK_ssyevx LAPACK_GLOBAL(ssyevx,SSYEVX) +#define LAPACK_dsyevx LAPACK_GLOBAL(dsyevx,DSYEVX) +#define LAPACK_cheevx LAPACK_GLOBAL(cheevx,CHEEVX) +#define LAPACK_zheevx LAPACK_GLOBAL(zheevx,ZHEEVX) +#define LAPACK_ssyevr LAPACK_GLOBAL(ssyevr,SSYEVR) +#define LAPACK_dsyevr LAPACK_GLOBAL(dsyevr,DSYEVR) +#define LAPACK_cheevr LAPACK_GLOBAL(cheevr,CHEEVR) +#define LAPACK_zheevr LAPACK_GLOBAL(zheevr,ZHEEVR) +#define LAPACK_sspev LAPACK_GLOBAL(sspev,SSPEV) +#define LAPACK_dspev LAPACK_GLOBAL(dspev,DSPEV) +#define LAPACK_chpev LAPACK_GLOBAL(chpev,CHPEV) +#define LAPACK_zhpev LAPACK_GLOBAL(zhpev,ZHPEV) +#define LAPACK_sspevd LAPACK_GLOBAL(sspevd,SSPEVD) +#define LAPACK_dspevd LAPACK_GLOBAL(dspevd,DSPEVD) +#define LAPACK_chpevd LAPACK_GLOBAL(chpevd,CHPEVD) +#define LAPACK_zhpevd LAPACK_GLOBAL(zhpevd,ZHPEVD) +#define LAPACK_sspevx LAPACK_GLOBAL(sspevx,SSPEVX) +#define LAPACK_dspevx LAPACK_GLOBAL(dspevx,DSPEVX) +#define LAPACK_chpevx LAPACK_GLOBAL(chpevx,CHPEVX) +#define LAPACK_zhpevx LAPACK_GLOBAL(zhpevx,ZHPEVX) +#define LAPACK_ssbev LAPACK_GLOBAL(ssbev,SSBEV) +#define LAPACK_dsbev LAPACK_GLOBAL(dsbev,DSBEV) +#define LAPACK_chbev LAPACK_GLOBAL(chbev,CHBEV) +#define LAPACK_zhbev LAPACK_GLOBAL(zhbev,ZHBEV) +#define LAPACK_ssbevd LAPACK_GLOBAL(ssbevd,SSBEVD) +#define LAPACK_dsbevd LAPACK_GLOBAL(dsbevd,DSBEVD) +#define LAPACK_chbevd LAPACK_GLOBAL(chbevd,CHBEVD) +#define LAPACK_zhbevd LAPACK_GLOBAL(zhbevd,ZHBEVD) +#define LAPACK_ssbevx LAPACK_GLOBAL(ssbevx,SSBEVX) +#define LAPACK_dsbevx LAPACK_GLOBAL(dsbevx,DSBEVX) +#define LAPACK_chbevx LAPACK_GLOBAL(chbevx,CHBEVX) +#define LAPACK_zhbevx LAPACK_GLOBAL(zhbevx,ZHBEVX) +#define LAPACK_sstev LAPACK_GLOBAL(sstev,SSTEV) +#define LAPACK_dstev LAPACK_GLOBAL(dstev,DSTEV) +#define LAPACK_sstevd LAPACK_GLOBAL(sstevd,SSTEVD) +#define LAPACK_dstevd LAPACK_GLOBAL(dstevd,DSTEVD) +#define LAPACK_sstevx LAPACK_GLOBAL(sstevx,SSTEVX) +#define LAPACK_dstevx LAPACK_GLOBAL(dstevx,DSTEVX) +#define LAPACK_sstevr LAPACK_GLOBAL(sstevr,SSTEVR) +#define LAPACK_dstevr LAPACK_GLOBAL(dstevr,DSTEVR) +#define LAPACK_sgees LAPACK_GLOBAL(sgees,SGEES) +#define LAPACK_dgees LAPACK_GLOBAL(dgees,DGEES) +#define LAPACK_cgees LAPACK_GLOBAL(cgees,CGEES) +#define LAPACK_zgees LAPACK_GLOBAL(zgees,ZGEES) +#define LAPACK_sgeesx LAPACK_GLOBAL(sgeesx,SGEESX) +#define LAPACK_dgeesx LAPACK_GLOBAL(dgeesx,DGEESX) +#define LAPACK_cgeesx LAPACK_GLOBAL(cgeesx,CGEESX) +#define LAPACK_zgeesx LAPACK_GLOBAL(zgeesx,ZGEESX) +#define LAPACK_sgeev LAPACK_GLOBAL(sgeev,SGEEV) +#define LAPACK_dgeev LAPACK_GLOBAL(dgeev,DGEEV) +#define LAPACK_cgeev LAPACK_GLOBAL(cgeev,CGEEV) +#define LAPACK_zgeev LAPACK_GLOBAL(zgeev,ZGEEV) +#define LAPACK_sgeevx LAPACK_GLOBAL(sgeevx,SGEEVX) +#define LAPACK_dgeevx LAPACK_GLOBAL(dgeevx,DGEEVX) +#define LAPACK_cgeevx LAPACK_GLOBAL(cgeevx,CGEEVX) +#define LAPACK_zgeevx LAPACK_GLOBAL(zgeevx,ZGEEVX) +#define LAPACK_sgesvd LAPACK_GLOBAL(sgesvd,SGESVD) +#define LAPACK_dgesvd LAPACK_GLOBAL(dgesvd,DGESVD) +#define LAPACK_cgesvd LAPACK_GLOBAL(cgesvd,CGESVD) +#define LAPACK_zgesvd LAPACK_GLOBAL(zgesvd,ZGESVD) +#define LAPACK_sgesdd LAPACK_GLOBAL(sgesdd,SGESDD) +#define LAPACK_dgesdd LAPACK_GLOBAL(dgesdd,DGESDD) +#define LAPACK_cgesdd LAPACK_GLOBAL(cgesdd,CGESDD) +#define LAPACK_zgesdd LAPACK_GLOBAL(zgesdd,ZGESDD) +#define LAPACK_dgejsv LAPACK_GLOBAL(dgejsv,DGEJSV) +#define LAPACK_sgejsv LAPACK_GLOBAL(sgejsv,SGEJSV) +#define LAPACK_dgesvj LAPACK_GLOBAL(dgesvj,DGESVJ) +#define LAPACK_sgesvj LAPACK_GLOBAL(sgesvj,SGESVJ) +#define LAPACK_sggsvd LAPACK_GLOBAL(sggsvd,SGGSVD) +#define LAPACK_dggsvd LAPACK_GLOBAL(dggsvd,DGGSVD) +#define LAPACK_cggsvd LAPACK_GLOBAL(cggsvd,CGGSVD) +#define LAPACK_zggsvd LAPACK_GLOBAL(zggsvd,ZGGSVD) +#define LAPACK_ssygv LAPACK_GLOBAL(ssygv,SSYGV) +#define LAPACK_dsygv LAPACK_GLOBAL(dsygv,DSYGV) +#define LAPACK_chegv LAPACK_GLOBAL(chegv,CHEGV) +#define LAPACK_zhegv LAPACK_GLOBAL(zhegv,ZHEGV) +#define LAPACK_ssygvd LAPACK_GLOBAL(ssygvd,SSYGVD) +#define LAPACK_dsygvd LAPACK_GLOBAL(dsygvd,DSYGVD) +#define LAPACK_chegvd LAPACK_GLOBAL(chegvd,CHEGVD) +#define LAPACK_zhegvd LAPACK_GLOBAL(zhegvd,ZHEGVD) +#define LAPACK_ssygvx LAPACK_GLOBAL(ssygvx,SSYGVX) +#define LAPACK_dsygvx LAPACK_GLOBAL(dsygvx,DSYGVX) +#define LAPACK_chegvx LAPACK_GLOBAL(chegvx,CHEGVX) +#define LAPACK_zhegvx LAPACK_GLOBAL(zhegvx,ZHEGVX) +#define LAPACK_sspgv LAPACK_GLOBAL(sspgv,SSPGV) +#define LAPACK_dspgv LAPACK_GLOBAL(dspgv,DSPGV) +#define LAPACK_chpgv LAPACK_GLOBAL(chpgv,CHPGV) +#define LAPACK_zhpgv LAPACK_GLOBAL(zhpgv,ZHPGV) +#define LAPACK_sspgvd LAPACK_GLOBAL(sspgvd,SSPGVD) +#define LAPACK_dspgvd LAPACK_GLOBAL(dspgvd,DSPGVD) +#define LAPACK_chpgvd LAPACK_GLOBAL(chpgvd,CHPGVD) +#define LAPACK_zhpgvd LAPACK_GLOBAL(zhpgvd,ZHPGVD) +#define LAPACK_sspgvx LAPACK_GLOBAL(sspgvx,SSPGVX) +#define LAPACK_dspgvx LAPACK_GLOBAL(dspgvx,DSPGVX) +#define LAPACK_chpgvx LAPACK_GLOBAL(chpgvx,CHPGVX) +#define LAPACK_zhpgvx LAPACK_GLOBAL(zhpgvx,ZHPGVX) +#define LAPACK_ssbgv LAPACK_GLOBAL(ssbgv,SSBGV) +#define LAPACK_dsbgv LAPACK_GLOBAL(dsbgv,DSBGV) +#define LAPACK_chbgv LAPACK_GLOBAL(chbgv,CHBGV) +#define LAPACK_zhbgv LAPACK_GLOBAL(zhbgv,ZHBGV) +#define LAPACK_ssbgvd LAPACK_GLOBAL(ssbgvd,SSBGVD) +#define LAPACK_dsbgvd LAPACK_GLOBAL(dsbgvd,DSBGVD) +#define LAPACK_chbgvd LAPACK_GLOBAL(chbgvd,CHBGVD) +#define LAPACK_zhbgvd LAPACK_GLOBAL(zhbgvd,ZHBGVD) +#define LAPACK_ssbgvx LAPACK_GLOBAL(ssbgvx,SSBGVX) +#define LAPACK_dsbgvx LAPACK_GLOBAL(dsbgvx,DSBGVX) +#define LAPACK_chbgvx LAPACK_GLOBAL(chbgvx,CHBGVX) +#define LAPACK_zhbgvx LAPACK_GLOBAL(zhbgvx,ZHBGVX) +#define LAPACK_sgges LAPACK_GLOBAL(sgges,SGGES) +#define LAPACK_dgges LAPACK_GLOBAL(dgges,DGGES) +#define LAPACK_cgges LAPACK_GLOBAL(cgges,CGGES) +#define LAPACK_zgges LAPACK_GLOBAL(zgges,ZGGES) +#define LAPACK_sggesx LAPACK_GLOBAL(sggesx,SGGESX) +#define LAPACK_dggesx LAPACK_GLOBAL(dggesx,DGGESX) +#define LAPACK_cggesx LAPACK_GLOBAL(cggesx,CGGESX) +#define LAPACK_zggesx LAPACK_GLOBAL(zggesx,ZGGESX) +#define LAPACK_sggev LAPACK_GLOBAL(sggev,SGGEV) +#define LAPACK_dggev LAPACK_GLOBAL(dggev,DGGEV) +#define LAPACK_cggev LAPACK_GLOBAL(cggev,CGGEV) +#define LAPACK_zggev LAPACK_GLOBAL(zggev,ZGGEV) +#define LAPACK_sggevx LAPACK_GLOBAL(sggevx,SGGEVX) +#define LAPACK_dggevx LAPACK_GLOBAL(dggevx,DGGEVX) +#define LAPACK_cggevx LAPACK_GLOBAL(cggevx,CGGEVX) +#define LAPACK_zggevx LAPACK_GLOBAL(zggevx,ZGGEVX) +#define LAPACK_dsfrk LAPACK_GLOBAL(dsfrk,DSFRK) +#define LAPACK_ssfrk LAPACK_GLOBAL(ssfrk,SSFRK) +#define LAPACK_zhfrk LAPACK_GLOBAL(zhfrk,ZHFRK) +#define LAPACK_chfrk LAPACK_GLOBAL(chfrk,CHFRK) +#define LAPACK_dtfsm LAPACK_GLOBAL(dtfsm,DTFSM) +#define LAPACK_stfsm LAPACK_GLOBAL(stfsm,STFSM) +#define LAPACK_ztfsm LAPACK_GLOBAL(ztfsm,ZTFSM) +#define LAPACK_ctfsm LAPACK_GLOBAL(ctfsm,CTFSM) +#define LAPACK_dtfttp LAPACK_GLOBAL(dtfttp,DTFTTP) +#define LAPACK_stfttp LAPACK_GLOBAL(stfttp,STFTTP) +#define LAPACK_ztfttp LAPACK_GLOBAL(ztfttp,ZTFTTP) +#define LAPACK_ctfttp LAPACK_GLOBAL(ctfttp,CTFTTP) +#define LAPACK_dtfttr LAPACK_GLOBAL(dtfttr,DTFTTR) +#define LAPACK_stfttr LAPACK_GLOBAL(stfttr,STFTTR) +#define LAPACK_ztfttr LAPACK_GLOBAL(ztfttr,ZTFTTR) +#define LAPACK_ctfttr LAPACK_GLOBAL(ctfttr,CTFTTR) +#define LAPACK_dtpttf LAPACK_GLOBAL(dtpttf,DTPTTF) +#define LAPACK_stpttf LAPACK_GLOBAL(stpttf,STPTTF) +#define LAPACK_ztpttf LAPACK_GLOBAL(ztpttf,ZTPTTF) +#define LAPACK_ctpttf LAPACK_GLOBAL(ctpttf,CTPTTF) +#define LAPACK_dtpttr LAPACK_GLOBAL(dtpttr,DTPTTR) +#define LAPACK_stpttr LAPACK_GLOBAL(stpttr,STPTTR) +#define LAPACK_ztpttr LAPACK_GLOBAL(ztpttr,ZTPTTR) +#define LAPACK_ctpttr LAPACK_GLOBAL(ctpttr,CTPTTR) +#define LAPACK_dtrttf LAPACK_GLOBAL(dtrttf,DTRTTF) +#define LAPACK_strttf LAPACK_GLOBAL(strttf,STRTTF) +#define LAPACK_ztrttf LAPACK_GLOBAL(ztrttf,ZTRTTF) +#define LAPACK_ctrttf LAPACK_GLOBAL(ctrttf,CTRTTF) +#define LAPACK_dtrttp LAPACK_GLOBAL(dtrttp,DTRTTP) +#define LAPACK_strttp LAPACK_GLOBAL(strttp,STRTTP) +#define LAPACK_ztrttp LAPACK_GLOBAL(ztrttp,ZTRTTP) +#define LAPACK_ctrttp LAPACK_GLOBAL(ctrttp,CTRTTP) +#define LAPACK_sgeqrfp LAPACK_GLOBAL(sgeqrfp,SGEQRFP) +#define LAPACK_dgeqrfp LAPACK_GLOBAL(dgeqrfp,DGEQRFP) +#define LAPACK_cgeqrfp LAPACK_GLOBAL(cgeqrfp,CGEQRFP) +#define LAPACK_zgeqrfp LAPACK_GLOBAL(zgeqrfp,ZGEQRFP) +#define LAPACK_clacgv LAPACK_GLOBAL(clacgv,CLACGV) +#define LAPACK_zlacgv LAPACK_GLOBAL(zlacgv,ZLACGV) +#define LAPACK_slarnv LAPACK_GLOBAL(slarnv,SLARNV) +#define LAPACK_dlarnv LAPACK_GLOBAL(dlarnv,DLARNV) +#define LAPACK_clarnv LAPACK_GLOBAL(clarnv,CLARNV) +#define LAPACK_zlarnv LAPACK_GLOBAL(zlarnv,ZLARNV) +#define LAPACK_sgeqr2 LAPACK_GLOBAL(sgeqr2,SGEQR2) +#define LAPACK_dgeqr2 LAPACK_GLOBAL(dgeqr2,DGEQR2) +#define LAPACK_cgeqr2 LAPACK_GLOBAL(cgeqr2,CGEQR2) +#define LAPACK_zgeqr2 LAPACK_GLOBAL(zgeqr2,ZGEQR2) +#define LAPACK_slacpy LAPACK_GLOBAL(slacpy,SLACPY) +#define LAPACK_dlacpy LAPACK_GLOBAL(dlacpy,DLACPY) +#define LAPACK_clacpy LAPACK_GLOBAL(clacpy,CLACPY) +#define LAPACK_zlacpy LAPACK_GLOBAL(zlacpy,ZLACPY) +#define LAPACK_sgetf2 LAPACK_GLOBAL(sgetf2,SGETF2) +#define LAPACK_dgetf2 LAPACK_GLOBAL(dgetf2,DGETF2) +#define LAPACK_cgetf2 LAPACK_GLOBAL(cgetf2,CGETF2) +#define LAPACK_zgetf2 LAPACK_GLOBAL(zgetf2,ZGETF2) +#define LAPACK_slaswp LAPACK_GLOBAL(slaswp,SLASWP) +#define LAPACK_dlaswp LAPACK_GLOBAL(dlaswp,DLASWP) +#define LAPACK_claswp LAPACK_GLOBAL(claswp,CLASWP) +#define LAPACK_zlaswp LAPACK_GLOBAL(zlaswp,ZLASWP) +#define LAPACK_slange LAPACK_GLOBAL(slange,SLANGE) +#define LAPACK_dlange LAPACK_GLOBAL(dlange,DLANGE) +#define LAPACK_clange LAPACK_GLOBAL(clange,CLANGE) +#define LAPACK_zlange LAPACK_GLOBAL(zlange,ZLANGE) +#define LAPACK_clanhe LAPACK_GLOBAL(clanhe,CLANHE) +#define LAPACK_zlanhe LAPACK_GLOBAL(zlanhe,ZLANHE) +#define LAPACK_slansy LAPACK_GLOBAL(slansy,SLANSY) +#define LAPACK_dlansy LAPACK_GLOBAL(dlansy,DLANSY) +#define LAPACK_clansy LAPACK_GLOBAL(clansy,CLANSY) +#define LAPACK_zlansy LAPACK_GLOBAL(zlansy,ZLANSY) +#define LAPACK_slantr LAPACK_GLOBAL(slantr,SLANTR) +#define LAPACK_dlantr LAPACK_GLOBAL(dlantr,DLANTR) +#define LAPACK_clantr LAPACK_GLOBAL(clantr,CLANTR) +#define LAPACK_zlantr LAPACK_GLOBAL(zlantr,ZLANTR) +#define LAPACK_slamch LAPACK_GLOBAL(slamch,SLAMCH) +#define LAPACK_dlamch LAPACK_GLOBAL(dlamch,DLAMCH) +#define LAPACK_sgelq2 LAPACK_GLOBAL(sgelq2,SGELQ2) +#define LAPACK_dgelq2 LAPACK_GLOBAL(dgelq2,DGELQ2) +#define LAPACK_cgelq2 LAPACK_GLOBAL(cgelq2,CGELQ2) +#define LAPACK_zgelq2 LAPACK_GLOBAL(zgelq2,ZGELQ2) +#define LAPACK_slarfb LAPACK_GLOBAL(slarfb,SLARFB) +#define LAPACK_dlarfb LAPACK_GLOBAL(dlarfb,DLARFB) +#define LAPACK_clarfb LAPACK_GLOBAL(clarfb,CLARFB) +#define LAPACK_zlarfb LAPACK_GLOBAL(zlarfb,ZLARFB) +#define LAPACK_slarfg LAPACK_GLOBAL(slarfg,SLARFG) +#define LAPACK_dlarfg LAPACK_GLOBAL(dlarfg,DLARFG) +#define LAPACK_clarfg LAPACK_GLOBAL(clarfg,CLARFG) +#define LAPACK_zlarfg LAPACK_GLOBAL(zlarfg,ZLARFG) +#define LAPACK_slarft LAPACK_GLOBAL(slarft,SLARFT) +#define LAPACK_dlarft LAPACK_GLOBAL(dlarft,DLARFT) +#define LAPACK_clarft LAPACK_GLOBAL(clarft,CLARFT) +#define LAPACK_zlarft LAPACK_GLOBAL(zlarft,ZLARFT) +#define LAPACK_slarfx LAPACK_GLOBAL(slarfx,SLARFX) +#define LAPACK_dlarfx LAPACK_GLOBAL(dlarfx,DLARFX) +#define LAPACK_clarfx LAPACK_GLOBAL(clarfx,CLARFX) +#define LAPACK_zlarfx LAPACK_GLOBAL(zlarfx,ZLARFX) +#define LAPACK_slatms LAPACK_GLOBAL(slatms,SLATMS) +#define LAPACK_dlatms LAPACK_GLOBAL(dlatms,DLATMS) +#define LAPACK_clatms LAPACK_GLOBAL(clatms,CLATMS) +#define LAPACK_zlatms LAPACK_GLOBAL(zlatms,ZLATMS) +#define LAPACK_slag2d LAPACK_GLOBAL(slag2d,SLAG2D) +#define LAPACK_dlag2s LAPACK_GLOBAL(dlag2s,DLAG2S) +#define LAPACK_clag2z LAPACK_GLOBAL(clag2z,CLAG2Z) +#define LAPACK_zlag2c LAPACK_GLOBAL(zlag2c,ZLAG2C) +#define LAPACK_slauum LAPACK_GLOBAL(slauum,SLAUUM) +#define LAPACK_dlauum LAPACK_GLOBAL(dlauum,DLAUUM) +#define LAPACK_clauum LAPACK_GLOBAL(clauum,CLAUUM) +#define LAPACK_zlauum LAPACK_GLOBAL(zlauum,ZLAUUM) +#define LAPACK_slagge LAPACK_GLOBAL(slagge,SLAGGE) +#define LAPACK_dlagge LAPACK_GLOBAL(dlagge,DLAGGE) +#define LAPACK_clagge LAPACK_GLOBAL(clagge,CLAGGE) +#define LAPACK_zlagge LAPACK_GLOBAL(zlagge,ZLAGGE) +#define LAPACK_slaset LAPACK_GLOBAL(slaset,SLASET) +#define LAPACK_dlaset LAPACK_GLOBAL(dlaset,DLASET) +#define LAPACK_claset LAPACK_GLOBAL(claset,CLASET) +#define LAPACK_zlaset LAPACK_GLOBAL(zlaset,ZLASET) +#define LAPACK_slasrt LAPACK_GLOBAL(slasrt,SLASRT) +#define LAPACK_dlasrt LAPACK_GLOBAL(dlasrt,DLASRT) +#define LAPACK_slagsy LAPACK_GLOBAL(slagsy,SLAGSY) +#define LAPACK_dlagsy LAPACK_GLOBAL(dlagsy,DLAGSY) +#define LAPACK_clagsy LAPACK_GLOBAL(clagsy,CLAGSY) +#define LAPACK_zlagsy LAPACK_GLOBAL(zlagsy,ZLAGSY) +#define LAPACK_claghe LAPACK_GLOBAL(claghe,CLAGHE) +#define LAPACK_zlaghe LAPACK_GLOBAL(zlaghe,ZLAGHE) +#define LAPACK_slapmr LAPACK_GLOBAL(slapmr,SLAPMR) +#define LAPACK_dlapmr LAPACK_GLOBAL(dlapmr,DLAPMR) +#define LAPACK_clapmr LAPACK_GLOBAL(clapmr,CLAPMR) +#define LAPACK_zlapmr LAPACK_GLOBAL(zlapmr,ZLAPMR) +#define LAPACK_slapy2 LAPACK_GLOBAL(slapy2,SLAPY2) +#define LAPACK_dlapy2 LAPACK_GLOBAL(dlapy2,DLAPY2) +#define LAPACK_slapy3 LAPACK_GLOBAL(slapy3,SLAPY3) +#define LAPACK_dlapy3 LAPACK_GLOBAL(dlapy3,DLAPY3) +#define LAPACK_slartgp LAPACK_GLOBAL(slartgp,SLARTGP) +#define LAPACK_dlartgp LAPACK_GLOBAL(dlartgp,DLARTGP) +#define LAPACK_slartgs LAPACK_GLOBAL(slartgs,SLARTGS) +#define LAPACK_dlartgs LAPACK_GLOBAL(dlartgs,DLARTGS) +// LAPACK 3.3.0 +#define LAPACK_cbbcsd LAPACK_GLOBAL(cbbcsd,CBBCSD) +#define LAPACK_cheswapr LAPACK_GLOBAL(cheswapr,CHESWAPR) +#define LAPACK_chetri2 LAPACK_GLOBAL(chetri2,CHETRI2) +#define LAPACK_chetri2x LAPACK_GLOBAL(chetri2x,CHETRI2X) +#define LAPACK_chetrs2 LAPACK_GLOBAL(chetrs2,CHETRS2) +#define LAPACK_csyconv LAPACK_GLOBAL(csyconv,CSYCONV) +#define LAPACK_csyswapr LAPACK_GLOBAL(csyswapr,CSYSWAPR) +#define LAPACK_csytri2 LAPACK_GLOBAL(csytri2,CSYTRI2) +#define LAPACK_csytri2x LAPACK_GLOBAL(csytri2x,CSYTRI2X) +#define LAPACK_csytrs2 LAPACK_GLOBAL(csytrs2,CSYTRS2) +#define LAPACK_cunbdb LAPACK_GLOBAL(cunbdb,CUNBDB) +#define LAPACK_cuncsd LAPACK_GLOBAL(cuncsd,CUNCSD) +#define LAPACK_dbbcsd LAPACK_GLOBAL(dbbcsd,DBBCSD) +#define LAPACK_dorbdb LAPACK_GLOBAL(dorbdb,DORBDB) +#define LAPACK_dorcsd LAPACK_GLOBAL(dorcsd,DORCSD) +#define LAPACK_dsyconv LAPACK_GLOBAL(dsyconv,DSYCONV) +#define LAPACK_dsyswapr LAPACK_GLOBAL(dsyswapr,DSYSWAPR) +#define LAPACK_dsytri2 LAPACK_GLOBAL(dsytri2,DSYTRI2) +#define LAPACK_dsytri2x LAPACK_GLOBAL(dsytri2x,DSYTRI2X) +#define LAPACK_dsytrs2 LAPACK_GLOBAL(dsytrs2,DSYTRS2) +#define LAPACK_sbbcsd LAPACK_GLOBAL(sbbcsd,SBBCSD) +#define LAPACK_sorbdb LAPACK_GLOBAL(sorbdb,SORBDB) +#define LAPACK_sorcsd LAPACK_GLOBAL(sorcsd,SORCSD) +#define LAPACK_ssyconv LAPACK_GLOBAL(ssyconv,SSYCONV) +#define LAPACK_ssyswapr LAPACK_GLOBAL(ssyswapr,SSYSWAPR) +#define LAPACK_ssytri2 LAPACK_GLOBAL(ssytri2,SSYTRI2) +#define LAPACK_ssytri2x LAPACK_GLOBAL(ssytri2x,SSYTRI2X) +#define LAPACK_ssytrs2 LAPACK_GLOBAL(ssytrs2,SSYTRS2) +#define LAPACK_zbbcsd LAPACK_GLOBAL(zbbcsd,ZBBCSD) +#define LAPACK_zheswapr LAPACK_GLOBAL(zheswapr,ZHESWAPR) +#define LAPACK_zhetri2 LAPACK_GLOBAL(zhetri2,ZHETRI2) +#define LAPACK_zhetri2x LAPACK_GLOBAL(zhetri2x,ZHETRI2X) +#define LAPACK_zhetrs2 LAPACK_GLOBAL(zhetrs2,ZHETRS2) +#define LAPACK_zsyconv LAPACK_GLOBAL(zsyconv,ZSYCONV) +#define LAPACK_zsyswapr LAPACK_GLOBAL(zsyswapr,ZSYSWAPR) +#define LAPACK_zsytri2 LAPACK_GLOBAL(zsytri2,ZSYTRI2) +#define LAPACK_zsytri2x LAPACK_GLOBAL(zsytri2x,ZSYTRI2X) +#define LAPACK_zsytrs2 LAPACK_GLOBAL(zsytrs2,ZSYTRS2) +#define LAPACK_zunbdb LAPACK_GLOBAL(zunbdb,ZUNBDB) +#define LAPACK_zuncsd LAPACK_GLOBAL(zuncsd,ZUNCSD) +// LAPACK 3.4.0 +#define LAPACK_sgemqrt LAPACK_GLOBAL(sgemqrt,SGEMQRT) +#define LAPACK_dgemqrt LAPACK_GLOBAL(dgemqrt,DGEMQRT) +#define LAPACK_cgemqrt LAPACK_GLOBAL(cgemqrt,CGEMQRT) +#define LAPACK_zgemqrt LAPACK_GLOBAL(zgemqrt,ZGEMQRT) +#define LAPACK_sgeqrt LAPACK_GLOBAL(sgeqrt,SGEQRT) +#define LAPACK_dgeqrt LAPACK_GLOBAL(dgeqrt,DGEQRT) +#define LAPACK_cgeqrt LAPACK_GLOBAL(cgeqrt,CGEQRT) +#define LAPACK_zgeqrt LAPACK_GLOBAL(zgeqrt,ZGEQRT) +#define LAPACK_sgeqrt2 LAPACK_GLOBAL(sgeqrt2,SGEQRT2) +#define LAPACK_dgeqrt2 LAPACK_GLOBAL(dgeqrt2,DGEQRT2) +#define LAPACK_cgeqrt2 LAPACK_GLOBAL(cgeqrt2,CGEQRT2) +#define LAPACK_zgeqrt2 LAPACK_GLOBAL(zgeqrt2,ZGEQRT2) +#define LAPACK_sgeqrt3 LAPACK_GLOBAL(sgeqrt3,SGEQRT3) +#define LAPACK_dgeqrt3 LAPACK_GLOBAL(dgeqrt3,DGEQRT3) +#define LAPACK_cgeqrt3 LAPACK_GLOBAL(cgeqrt3,CGEQRT3) +#define LAPACK_zgeqrt3 LAPACK_GLOBAL(zgeqrt3,ZGEQRT3) +#define LAPACK_stpmqrt LAPACK_GLOBAL(stpmqrt,STPMQRT) +#define LAPACK_dtpmqrt LAPACK_GLOBAL(dtpmqrt,DTPMQRT) +#define LAPACK_ctpmqrt LAPACK_GLOBAL(ctpmqrt,CTPMQRT) +#define LAPACK_ztpmqrt LAPACK_GLOBAL(ztpmqrt,ZTPMQRT) +#define LAPACK_dtpqrt LAPACK_GLOBAL(dtpqrt,DTPQRT) +#define LAPACK_ctpqrt LAPACK_GLOBAL(ctpqrt,CTPQRT) +#define LAPACK_ztpqrt LAPACK_GLOBAL(ztpqrt,ZTPQRT) +#define LAPACK_stpqrt2 LAPACK_GLOBAL(stpqrt2,STPQRT2) +#define LAPACK_dtpqrt2 LAPACK_GLOBAL(dtpqrt2,DTPQRT2) +#define LAPACK_ctpqrt2 LAPACK_GLOBAL(ctpqrt2,CTPQRT2) +#define LAPACK_ztpqrt2 LAPACK_GLOBAL(ztpqrt2,ZTPQRT2) +#define LAPACK_stprfb LAPACK_GLOBAL(stprfb,STPRFB) +#define LAPACK_dtprfb LAPACK_GLOBAL(dtprfb,DTPRFB) +#define LAPACK_ctprfb LAPACK_GLOBAL(ctprfb,CTPRFB) +#define LAPACK_ztprfb LAPACK_GLOBAL(ztprfb,ZTPRFB) +// LAPACK 3.X.X +#define LAPACK_csyr LAPACK_GLOBAL(csyr,CSYR) +#define LAPACK_zsyr LAPACK_GLOBAL(zsyr,ZSYR) + + +void LAPACK_sgetrf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_dgetrf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_cgetrf( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int* ipiv, lapack_int *info ); +void LAPACK_zgetrf( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int* ipiv, lapack_int *info ); +void LAPACK_sgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, float* ab, lapack_int* ldab, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_dgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, double* ab, lapack_int* ldab, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_cgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_complex_float* ab, lapack_int* ldab, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_zgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_complex_double* ab, lapack_int* ldab, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_sgttrf( lapack_int* n, float* dl, float* d, float* du, float* du2, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_dgttrf( lapack_int* n, double* dl, double* d, double* du, + double* du2, lapack_int* ipiv, lapack_int *info ); +void LAPACK_cgttrf( lapack_int* n, lapack_complex_float* dl, + lapack_complex_float* d, lapack_complex_float* du, + lapack_complex_float* du2, lapack_int* ipiv, + lapack_int *info ); +void LAPACK_zgttrf( lapack_int* n, lapack_complex_double* dl, + lapack_complex_double* d, lapack_complex_double* du, + lapack_complex_double* du2, lapack_int* ipiv, + lapack_int *info ); +void LAPACK_spotrf( char* uplo, lapack_int* n, float* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_dpotrf( char* uplo, lapack_int* n, double* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_cpotrf( char* uplo, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_zpotrf( char* uplo, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_dpstrf( char* uplo, lapack_int* n, double* a, lapack_int* lda, + lapack_int* piv, lapack_int* rank, double* tol, + double* work, lapack_int *info ); +void LAPACK_spstrf( char* uplo, lapack_int* n, float* a, lapack_int* lda, + lapack_int* piv, lapack_int* rank, float* tol, float* work, + lapack_int *info ); +void LAPACK_zpstrf( char* uplo, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int* piv, lapack_int* rank, + double* tol, double* work, lapack_int *info ); +void LAPACK_cpstrf( char* uplo, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int* piv, lapack_int* rank, + float* tol, float* work, lapack_int *info ); +void LAPACK_dpftrf( char* transr, char* uplo, lapack_int* n, double* a, + lapack_int *info ); +void LAPACK_spftrf( char* transr, char* uplo, lapack_int* n, float* a, + lapack_int *info ); +void LAPACK_zpftrf( char* transr, char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int *info ); +void LAPACK_cpftrf( char* transr, char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int *info ); +void LAPACK_spptrf( char* uplo, lapack_int* n, float* ap, lapack_int *info ); +void LAPACK_dpptrf( char* uplo, lapack_int* n, double* ap, lapack_int *info ); +void LAPACK_cpptrf( char* uplo, lapack_int* n, lapack_complex_float* ap, + lapack_int *info ); +void LAPACK_zpptrf( char* uplo, lapack_int* n, lapack_complex_double* ap, + lapack_int *info ); +void LAPACK_spbtrf( char* uplo, lapack_int* n, lapack_int* kd, float* ab, + lapack_int* ldab, lapack_int *info ); +void LAPACK_dpbtrf( char* uplo, lapack_int* n, lapack_int* kd, double* ab, + lapack_int* ldab, lapack_int *info ); +void LAPACK_cpbtrf( char* uplo, lapack_int* n, lapack_int* kd, + lapack_complex_float* ab, lapack_int* ldab, + lapack_int *info ); +void LAPACK_zpbtrf( char* uplo, lapack_int* n, lapack_int* kd, + lapack_complex_double* ab, lapack_int* ldab, + lapack_int *info ); +void LAPACK_spttrf( lapack_int* n, float* d, float* e, lapack_int *info ); +void LAPACK_dpttrf( lapack_int* n, double* d, double* e, lapack_int *info ); +void LAPACK_cpttrf( lapack_int* n, float* d, lapack_complex_float* e, + lapack_int *info ); +void LAPACK_zpttrf( lapack_int* n, double* d, lapack_complex_double* e, + lapack_int *info ); +void LAPACK_ssytrf( char* uplo, lapack_int* n, float* a, lapack_int* lda, + lapack_int* ipiv, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dsytrf( char* uplo, lapack_int* n, double* a, lapack_int* lda, + lapack_int* ipiv, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_csytrf( char* uplo, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int* ipiv, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zsytrf( char* uplo, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int* ipiv, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_chetrf( char* uplo, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int* ipiv, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zhetrf( char* uplo, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int* ipiv, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_ssptrf( char* uplo, lapack_int* n, float* ap, lapack_int* ipiv, + lapack_int *info ); +void LAPACK_dsptrf( char* uplo, lapack_int* n, double* ap, lapack_int* ipiv, + lapack_int *info ); +void LAPACK_csptrf( char* uplo, lapack_int* n, lapack_complex_float* ap, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_zsptrf( char* uplo, lapack_int* n, lapack_complex_double* ap, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_chptrf( char* uplo, lapack_int* n, lapack_complex_float* ap, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_zhptrf( char* uplo, lapack_int* n, lapack_complex_double* ap, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_sgetrs( char* trans, lapack_int* n, lapack_int* nrhs, + const float* a, lapack_int* lda, const lapack_int* ipiv, + float* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_dgetrs( char* trans, lapack_int* n, lapack_int* nrhs, + const double* a, lapack_int* lda, const lapack_int* ipiv, + double* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_cgetrs( char* trans, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_zgetrs( char* trans, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_int* ipiv, lapack_complex_double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_sgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, const float* ab, lapack_int* ldab, + const lapack_int* ipiv, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, const double* ab, lapack_int* ldab, + const lapack_int* ipiv, double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_cgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, const lapack_complex_float* ab, + lapack_int* ldab, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_zgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, const lapack_complex_double* ab, + lapack_int* ldab, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_sgttrs( char* trans, lapack_int* n, lapack_int* nrhs, + const float* dl, const float* d, const float* du, + const float* du2, const lapack_int* ipiv, float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_dgttrs( char* trans, lapack_int* n, lapack_int* nrhs, + const double* dl, const double* d, const double* du, + const double* du2, const lapack_int* ipiv, double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_cgttrs( char* trans, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + const lapack_complex_float* du2, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_zgttrs( char* trans, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + const lapack_complex_double* du2, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_spotrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a, + lapack_int* lda, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dpotrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const double* a, lapack_int* lda, double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_cpotrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_zpotrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs, + const double* a, double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_spftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs, + const float* a, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_zpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_complex_double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_cpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_complex_float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_spptrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const float* ap, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dpptrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const double* ap, double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_cpptrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* ap, lapack_complex_float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_zpptrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* ap, lapack_complex_double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_spbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + const float* ab, lapack_int* ldab, float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_dpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + const double* ab, lapack_int* ldab, double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_cpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + const lapack_complex_float* ab, lapack_int* ldab, + lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_zpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + const lapack_complex_double* ab, lapack_int* ldab, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_spttrs( lapack_int* n, lapack_int* nrhs, const float* d, + const float* e, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dpttrs( lapack_int* n, lapack_int* nrhs, const double* d, + const double* e, double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_cpttrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* d, + const lapack_complex_float* e, lapack_complex_float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_zpttrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const double* d, const lapack_complex_double* e, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_ssytrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a, + lapack_int* lda, const lapack_int* ipiv, float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_dsytrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const double* a, lapack_int* lda, const lapack_int* ipiv, + double* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_csytrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_zsytrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_int* ipiv, lapack_complex_double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_chetrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_int* ipiv, lapack_complex_float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_zhetrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_int* ipiv, lapack_complex_double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_ssptrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const float* ap, const lapack_int* ipiv, float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_dsptrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const double* ap, const lapack_int* ipiv, double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_csptrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* ap, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_zsptrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* ap, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_chptrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* ap, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_zhptrs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* ap, const lapack_int* ipiv, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_strtrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const float* a, lapack_int* lda, float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_dtrtrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const double* a, lapack_int* lda, + double* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_ctrtrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_ztrtrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_stptrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const float* ap, float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_dtptrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const double* ap, double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_ctptrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const lapack_complex_float* ap, + lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_ztptrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const lapack_complex_double* ap, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_stbtrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* kd, lapack_int* nrhs, const float* ab, + lapack_int* ldab, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dtbtrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* kd, lapack_int* nrhs, const double* ab, + lapack_int* ldab, double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_ctbtrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* kd, lapack_int* nrhs, + const lapack_complex_float* ab, lapack_int* ldab, + lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_ztbtrs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* kd, lapack_int* nrhs, + const lapack_complex_double* ab, lapack_int* ldab, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_sgecon( char* norm, lapack_int* n, const float* a, lapack_int* lda, + float* anorm, float* rcond, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dgecon( char* norm, lapack_int* n, const double* a, lapack_int* lda, + double* anorm, double* rcond, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_cgecon( char* norm, lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, float* anorm, float* rcond, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zgecon( char* norm, lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, double* anorm, double* rcond, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_sgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku, + const float* ab, lapack_int* ldab, const lapack_int* ipiv, + float* anorm, float* rcond, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku, + const double* ab, lapack_int* ldab, const lapack_int* ipiv, + double* anorm, double* rcond, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_cgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku, + const lapack_complex_float* ab, lapack_int* ldab, + const lapack_int* ipiv, float* anorm, float* rcond, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku, + const lapack_complex_double* ab, lapack_int* ldab, + const lapack_int* ipiv, double* anorm, double* rcond, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_sgtcon( char* norm, lapack_int* n, const float* dl, const float* d, + const float* du, const float* du2, const lapack_int* ipiv, + float* anorm, float* rcond, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dgtcon( char* norm, lapack_int* n, const double* dl, + const double* d, const double* du, const double* du2, + const lapack_int* ipiv, double* anorm, double* rcond, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_cgtcon( char* norm, lapack_int* n, const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + const lapack_complex_float* du2, const lapack_int* ipiv, + float* anorm, float* rcond, lapack_complex_float* work, + lapack_int *info ); +void LAPACK_zgtcon( char* norm, lapack_int* n, const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + const lapack_complex_double* du2, const lapack_int* ipiv, + double* anorm, double* rcond, lapack_complex_double* work, + lapack_int *info ); +void LAPACK_spocon( char* uplo, lapack_int* n, const float* a, lapack_int* lda, + float* anorm, float* rcond, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dpocon( char* uplo, lapack_int* n, const double* a, lapack_int* lda, + double* anorm, double* rcond, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_cpocon( char* uplo, lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, float* anorm, float* rcond, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zpocon( char* uplo, lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, double* anorm, double* rcond, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_sppcon( char* uplo, lapack_int* n, const float* ap, float* anorm, + float* rcond, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dppcon( char* uplo, lapack_int* n, const double* ap, double* anorm, + double* rcond, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_cppcon( char* uplo, lapack_int* n, const lapack_complex_float* ap, + float* anorm, float* rcond, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zppcon( char* uplo, lapack_int* n, const lapack_complex_double* ap, + double* anorm, double* rcond, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_spbcon( char* uplo, lapack_int* n, lapack_int* kd, const float* ab, + lapack_int* ldab, float* anorm, float* rcond, float* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_dpbcon( char* uplo, lapack_int* n, lapack_int* kd, const double* ab, + lapack_int* ldab, double* anorm, double* rcond, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_cpbcon( char* uplo, lapack_int* n, lapack_int* kd, + const lapack_complex_float* ab, lapack_int* ldab, + float* anorm, float* rcond, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zpbcon( char* uplo, lapack_int* n, lapack_int* kd, + const lapack_complex_double* ab, lapack_int* ldab, + double* anorm, double* rcond, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_sptcon( lapack_int* n, const float* d, const float* e, float* anorm, + float* rcond, float* work, lapack_int *info ); +void LAPACK_dptcon( lapack_int* n, const double* d, const double* e, + double* anorm, double* rcond, double* work, + lapack_int *info ); +void LAPACK_cptcon( lapack_int* n, const float* d, + const lapack_complex_float* e, float* anorm, float* rcond, + float* work, lapack_int *info ); +void LAPACK_zptcon( lapack_int* n, const double* d, + const lapack_complex_double* e, double* anorm, + double* rcond, double* work, lapack_int *info ); +void LAPACK_ssycon( char* uplo, lapack_int* n, const float* a, lapack_int* lda, + const lapack_int* ipiv, float* anorm, float* rcond, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_dsycon( char* uplo, lapack_int* n, const double* a, lapack_int* lda, + const lapack_int* ipiv, double* anorm, double* rcond, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_csycon( char* uplo, lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, const lapack_int* ipiv, float* anorm, + float* rcond, lapack_complex_float* work, + lapack_int *info ); +void LAPACK_zsycon( char* uplo, lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, const lapack_int* ipiv, double* anorm, + double* rcond, lapack_complex_double* work, + lapack_int *info ); +void LAPACK_checon( char* uplo, lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, const lapack_int* ipiv, float* anorm, + float* rcond, lapack_complex_float* work, + lapack_int *info ); +void LAPACK_zhecon( char* uplo, lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, const lapack_int* ipiv, double* anorm, + double* rcond, lapack_complex_double* work, + lapack_int *info ); +void LAPACK_sspcon( char* uplo, lapack_int* n, const float* ap, + const lapack_int* ipiv, float* anorm, float* rcond, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_dspcon( char* uplo, lapack_int* n, const double* ap, + const lapack_int* ipiv, double* anorm, double* rcond, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_cspcon( char* uplo, lapack_int* n, const lapack_complex_float* ap, + const lapack_int* ipiv, float* anorm, float* rcond, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zspcon( char* uplo, lapack_int* n, const lapack_complex_double* ap, + const lapack_int* ipiv, double* anorm, double* rcond, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_chpcon( char* uplo, lapack_int* n, const lapack_complex_float* ap, + const lapack_int* ipiv, float* anorm, float* rcond, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zhpcon( char* uplo, lapack_int* n, const lapack_complex_double* ap, + const lapack_int* ipiv, double* anorm, double* rcond, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_strcon( char* norm, char* uplo, char* diag, lapack_int* n, + const float* a, lapack_int* lda, float* rcond, float* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_dtrcon( char* norm, char* uplo, char* diag, lapack_int* n, + const double* a, lapack_int* lda, double* rcond, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_ctrcon( char* norm, char* uplo, char* diag, lapack_int* n, + const lapack_complex_float* a, lapack_int* lda, + float* rcond, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_ztrcon( char* norm, char* uplo, char* diag, lapack_int* n, + const lapack_complex_double* a, lapack_int* lda, + double* rcond, lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_stpcon( char* norm, char* uplo, char* diag, lapack_int* n, + const float* ap, float* rcond, float* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_dtpcon( char* norm, char* uplo, char* diag, lapack_int* n, + const double* ap, double* rcond, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_ctpcon( char* norm, char* uplo, char* diag, lapack_int* n, + const lapack_complex_float* ap, float* rcond, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_ztpcon( char* norm, char* uplo, char* diag, lapack_int* n, + const lapack_complex_double* ap, double* rcond, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_stbcon( char* norm, char* uplo, char* diag, lapack_int* n, + lapack_int* kd, const float* ab, lapack_int* ldab, + float* rcond, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dtbcon( char* norm, char* uplo, char* diag, lapack_int* n, + lapack_int* kd, const double* ab, lapack_int* ldab, + double* rcond, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_ctbcon( char* norm, char* uplo, char* diag, lapack_int* n, + lapack_int* kd, const lapack_complex_float* ab, + lapack_int* ldab, float* rcond, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_ztbcon( char* norm, char* uplo, char* diag, lapack_int* n, + lapack_int* kd, const lapack_complex_double* ab, + lapack_int* ldab, double* rcond, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_sgerfs( char* trans, lapack_int* n, lapack_int* nrhs, + const float* a, lapack_int* lda, const float* af, + lapack_int* ldaf, const lapack_int* ipiv, const float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* ferr, + float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dgerfs( char* trans, lapack_int* n, lapack_int* nrhs, + const double* a, lapack_int* lda, const double* af, + lapack_int* ldaf, const lapack_int* ipiv, const double* b, + lapack_int* ldb, double* x, lapack_int* ldx, double* ferr, + double* berr, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_cgerfs( char* trans, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* af, lapack_int* ldaf, + const lapack_int* ipiv, const lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, + float* ferr, float* berr, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zgerfs( char* trans, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* af, lapack_int* ldaf, + const lapack_int* ipiv, const lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + double* ferr, double* berr, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_dgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs, + const double* a, lapack_int* lda, const double* af, + lapack_int* ldaf, const lapack_int* ipiv, const double* r, + const double* c, const double* b, lapack_int* ldb, + double* x, lapack_int* ldx, double* rcond, double* berr, + lapack_int* n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int* nparams, double* params, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_sgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs, + const float* a, lapack_int* lda, const float* af, + lapack_int* ldaf, const lapack_int* ipiv, const float* r, + const float* c, const float* b, lapack_int* ldb, float* x, + lapack_int* ldx, float* rcond, float* berr, + lapack_int* n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int* nparams, float* params, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_zgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* af, lapack_int* ldaf, + const lapack_int* ipiv, const double* r, const double* c, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_cgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* af, lapack_int* ldaf, + const lapack_int* ipiv, const float* r, const float* c, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* berr, lapack_int* n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int* nparams, float* params, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_sgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, const float* ab, lapack_int* ldab, + const float* afb, lapack_int* ldafb, const lapack_int* ipiv, + const float* b, lapack_int* ldb, float* x, lapack_int* ldx, + float* ferr, float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, const double* ab, lapack_int* ldab, + const double* afb, lapack_int* ldafb, + const lapack_int* ipiv, const double* b, lapack_int* ldb, + double* x, lapack_int* ldx, double* ferr, double* berr, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_cgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, const lapack_complex_float* ab, + lapack_int* ldab, const lapack_complex_float* afb, + lapack_int* ldafb, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* ferr, + float* berr, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, const lapack_complex_double* ab, + lapack_int* ldab, const lapack_complex_double* afb, + lapack_int* ldafb, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* ferr, + double* berr, lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_dgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, const double* ab, + lapack_int* ldab, const double* afb, lapack_int* ldafb, + const lapack_int* ipiv, const double* r, const double* c, + const double* b, lapack_int* ldb, double* x, + lapack_int* ldx, double* rcond, double* berr, + lapack_int* n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int* nparams, double* params, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_sgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, const float* ab, + lapack_int* ldab, const float* afb, lapack_int* ldafb, + const lapack_int* ipiv, const float* r, const float* c, + const float* b, lapack_int* ldb, float* x, lapack_int* ldx, + float* rcond, float* berr, lapack_int* n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int* nparams, float* params, float* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_zgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, + const lapack_complex_double* ab, lapack_int* ldab, + const lapack_complex_double* afb, lapack_int* ldafb, + const lapack_int* ipiv, const double* r, const double* c, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_cgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, + const lapack_complex_float* ab, lapack_int* ldab, + const lapack_complex_float* afb, lapack_int* ldafb, + const lapack_int* ipiv, const float* r, const float* c, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* berr, lapack_int* n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int* nparams, float* params, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_sgtrfs( char* trans, lapack_int* n, lapack_int* nrhs, + const float* dl, const float* d, const float* du, + const float* dlf, const float* df, const float* duf, + const float* du2, const lapack_int* ipiv, const float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* ferr, + float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dgtrfs( char* trans, lapack_int* n, lapack_int* nrhs, + const double* dl, const double* d, const double* du, + const double* dlf, const double* df, const double* duf, + const double* du2, const lapack_int* ipiv, const double* b, + lapack_int* ldb, double* x, lapack_int* ldx, double* ferr, + double* berr, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_cgtrfs( char* trans, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, + const lapack_complex_float* dlf, + const lapack_complex_float* df, + const lapack_complex_float* duf, + const lapack_complex_float* du2, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* ferr, + float* berr, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zgtrfs( char* trans, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, + const lapack_complex_double* dlf, + const lapack_complex_double* df, + const lapack_complex_double* duf, + const lapack_complex_double* du2, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* ferr, + double* berr, lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_sporfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a, + lapack_int* lda, const float* af, lapack_int* ldaf, + const float* b, lapack_int* ldb, float* x, lapack_int* ldx, + float* ferr, float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dporfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const double* a, lapack_int* lda, const double* af, + lapack_int* ldaf, const double* b, lapack_int* ldb, + double* x, lapack_int* ldx, double* ferr, double* berr, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_cporfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* af, lapack_int* ldaf, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* ferr, + float* berr, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zporfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* af, lapack_int* ldaf, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* ferr, + double* berr, lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_dporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, + const double* a, lapack_int* lda, const double* af, + lapack_int* ldaf, const double* s, const double* b, + lapack_int* ldb, double* x, lapack_int* ldx, double* rcond, + double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_sporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, + const float* a, lapack_int* lda, const float* af, + lapack_int* ldaf, const float* s, const float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, + float* berr, lapack_int* n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int* nparams, float* params, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_zporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* af, lapack_int* ldaf, + const double* s, const lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + double* rcond, double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_cporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* af, lapack_int* ldaf, + const float* s, const lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, + float* rcond, float* berr, lapack_int* n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int* nparams, float* params, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_spprfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const float* ap, const float* afp, const float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* ferr, + float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dpprfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const double* ap, const double* afp, const double* b, + lapack_int* ldb, double* x, lapack_int* ldx, double* ferr, + double* berr, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_cpprfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* ap, + const lapack_complex_float* afp, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* ferr, + float* berr, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zpprfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* ap, + const lapack_complex_double* afp, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* ferr, + double* berr, lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_spbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + const float* ab, lapack_int* ldab, const float* afb, + lapack_int* ldafb, const float* b, lapack_int* ldb, + float* x, lapack_int* ldx, float* ferr, float* berr, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_dpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + const double* ab, lapack_int* ldab, const double* afb, + lapack_int* ldafb, const double* b, lapack_int* ldb, + double* x, lapack_int* ldx, double* ferr, double* berr, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_cpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + const lapack_complex_float* ab, lapack_int* ldab, + const lapack_complex_float* afb, lapack_int* ldafb, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* ferr, + float* berr, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + const lapack_complex_double* ab, lapack_int* ldab, + const lapack_complex_double* afb, lapack_int* ldafb, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* ferr, + double* berr, lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_sptrfs( lapack_int* n, lapack_int* nrhs, const float* d, + const float* e, const float* df, const float* ef, + const float* b, lapack_int* ldb, float* x, lapack_int* ldx, + float* ferr, float* berr, float* work, lapack_int *info ); +void LAPACK_dptrfs( lapack_int* n, lapack_int* nrhs, const double* d, + const double* e, const double* df, const double* ef, + const double* b, lapack_int* ldb, double* x, + lapack_int* ldx, double* ferr, double* berr, double* work, + lapack_int *info ); +void LAPACK_cptrfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* d, + const lapack_complex_float* e, const float* df, + const lapack_complex_float* ef, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* ferr, + float* berr, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zptrfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const double* d, const lapack_complex_double* e, + const double* df, const lapack_complex_double* ef, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* ferr, + double* berr, lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_ssyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a, + lapack_int* lda, const float* af, lapack_int* ldaf, + const lapack_int* ipiv, const float* b, lapack_int* ldb, + float* x, lapack_int* ldx, float* ferr, float* berr, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_dsyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const double* a, lapack_int* lda, const double* af, + lapack_int* ldaf, const lapack_int* ipiv, const double* b, + lapack_int* ldb, double* x, lapack_int* ldx, double* ferr, + double* berr, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_csyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* af, lapack_int* ldaf, + const lapack_int* ipiv, const lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, + float* ferr, float* berr, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zsyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* af, lapack_int* ldaf, + const lapack_int* ipiv, const lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + double* ferr, double* berr, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_dsyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, + const double* a, lapack_int* lda, const double* af, + lapack_int* ldaf, const lapack_int* ipiv, const double* s, + const double* b, lapack_int* ldb, double* x, + lapack_int* ldx, double* rcond, double* berr, + lapack_int* n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int* nparams, double* params, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_ssyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, + const float* a, lapack_int* lda, const float* af, + lapack_int* ldaf, const lapack_int* ipiv, const float* s, + const float* b, lapack_int* ldb, float* x, lapack_int* ldx, + float* rcond, float* berr, lapack_int* n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int* nparams, float* params, float* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_zsyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* af, lapack_int* ldaf, + const lapack_int* ipiv, const double* s, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_csyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* af, lapack_int* ldaf, + const lapack_int* ipiv, const float* s, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* berr, lapack_int* n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int* nparams, float* params, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_cherfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* af, lapack_int* ldaf, + const lapack_int* ipiv, const lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, + float* ferr, float* berr, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zherfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* af, lapack_int* ldaf, + const lapack_int* ipiv, const lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + double* ferr, double* berr, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_zherfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* af, lapack_int* ldaf, + const lapack_int* ipiv, const double* s, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_cherfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* af, lapack_int* ldaf, + const lapack_int* ipiv, const float* s, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* berr, lapack_int* n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int* nparams, float* params, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_ssprfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const float* ap, const float* afp, const lapack_int* ipiv, + const float* b, lapack_int* ldb, float* x, lapack_int* ldx, + float* ferr, float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dsprfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const double* ap, const double* afp, const lapack_int* ipiv, + const double* b, lapack_int* ldb, double* x, + lapack_int* ldx, double* ferr, double* berr, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_csprfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* ap, + const lapack_complex_float* afp, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* ferr, + float* berr, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zsprfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* ap, + const lapack_complex_double* afp, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* ferr, + double* berr, lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_chprfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* ap, + const lapack_complex_float* afp, const lapack_int* ipiv, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* ferr, + float* berr, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zhprfs( char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* ap, + const lapack_complex_double* afp, const lapack_int* ipiv, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* ferr, + double* berr, lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_strrfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const float* a, lapack_int* lda, + const float* b, lapack_int* ldb, const float* x, + lapack_int* ldx, float* ferr, float* berr, float* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_dtrrfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const double* a, lapack_int* lda, + const double* b, lapack_int* ldb, const double* x, + lapack_int* ldx, double* ferr, double* berr, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_ctrrfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* b, + lapack_int* ldb, const lapack_complex_float* x, + lapack_int* ldx, float* ferr, float* berr, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_ztrrfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const lapack_complex_double* a, + lapack_int* lda, const lapack_complex_double* b, + lapack_int* ldb, const lapack_complex_double* x, + lapack_int* ldx, double* ferr, double* berr, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_stprfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const float* ap, const float* b, + lapack_int* ldb, const float* x, lapack_int* ldx, + float* ferr, float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dtprfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const double* ap, const double* b, + lapack_int* ldb, const double* x, lapack_int* ldx, + double* ferr, double* berr, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_ctprfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const lapack_complex_float* ap, + const lapack_complex_float* b, lapack_int* ldb, + const lapack_complex_float* x, lapack_int* ldx, float* ferr, + float* berr, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_ztprfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* nrhs, const lapack_complex_double* ap, + const lapack_complex_double* b, lapack_int* ldb, + const lapack_complex_double* x, lapack_int* ldx, + double* ferr, double* berr, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_stbrfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* kd, lapack_int* nrhs, const float* ab, + lapack_int* ldab, const float* b, lapack_int* ldb, + const float* x, lapack_int* ldx, float* ferr, float* berr, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_dtbrfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* kd, lapack_int* nrhs, const double* ab, + lapack_int* ldab, const double* b, lapack_int* ldb, + const double* x, lapack_int* ldx, double* ferr, + double* berr, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_ctbrfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* kd, lapack_int* nrhs, + const lapack_complex_float* ab, lapack_int* ldab, + const lapack_complex_float* b, lapack_int* ldb, + const lapack_complex_float* x, lapack_int* ldx, float* ferr, + float* berr, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_ztbrfs( char* uplo, char* trans, char* diag, lapack_int* n, + lapack_int* kd, lapack_int* nrhs, + const lapack_complex_double* ab, lapack_int* ldab, + const lapack_complex_double* b, lapack_int* ldb, + const lapack_complex_double* x, lapack_int* ldx, + double* ferr, double* berr, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_sgetri( lapack_int* n, float* a, lapack_int* lda, + const lapack_int* ipiv, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dgetri( lapack_int* n, double* a, lapack_int* lda, + const lapack_int* ipiv, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cgetri( lapack_int* n, lapack_complex_float* a, lapack_int* lda, + const lapack_int* ipiv, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zgetri( lapack_int* n, lapack_complex_double* a, lapack_int* lda, + const lapack_int* ipiv, lapack_complex_double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_spotri( char* uplo, lapack_int* n, float* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_dpotri( char* uplo, lapack_int* n, double* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_cpotri( char* uplo, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_zpotri( char* uplo, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_dpftri( char* transr, char* uplo, lapack_int* n, double* a, + lapack_int *info ); +void LAPACK_spftri( char* transr, char* uplo, lapack_int* n, float* a, + lapack_int *info ); +void LAPACK_zpftri( char* transr, char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int *info ); +void LAPACK_cpftri( char* transr, char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int *info ); +void LAPACK_spptri( char* uplo, lapack_int* n, float* ap, lapack_int *info ); +void LAPACK_dpptri( char* uplo, lapack_int* n, double* ap, lapack_int *info ); +void LAPACK_cpptri( char* uplo, lapack_int* n, lapack_complex_float* ap, + lapack_int *info ); +void LAPACK_zpptri( char* uplo, lapack_int* n, lapack_complex_double* ap, + lapack_int *info ); +void LAPACK_ssytri( char* uplo, lapack_int* n, float* a, lapack_int* lda, + const lapack_int* ipiv, float* work, lapack_int *info ); +void LAPACK_dsytri( char* uplo, lapack_int* n, double* a, lapack_int* lda, + const lapack_int* ipiv, double* work, lapack_int *info ); +void LAPACK_csytri( char* uplo, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, const lapack_int* ipiv, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zsytri( char* uplo, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, const lapack_int* ipiv, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_chetri( char* uplo, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, const lapack_int* ipiv, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zhetri( char* uplo, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, const lapack_int* ipiv, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_ssptri( char* uplo, lapack_int* n, float* ap, + const lapack_int* ipiv, float* work, lapack_int *info ); +void LAPACK_dsptri( char* uplo, lapack_int* n, double* ap, + const lapack_int* ipiv, double* work, lapack_int *info ); +void LAPACK_csptri( char* uplo, lapack_int* n, lapack_complex_float* ap, + const lapack_int* ipiv, lapack_complex_float* work, + lapack_int *info ); +void LAPACK_zsptri( char* uplo, lapack_int* n, lapack_complex_double* ap, + const lapack_int* ipiv, lapack_complex_double* work, + lapack_int *info ); +void LAPACK_chptri( char* uplo, lapack_int* n, lapack_complex_float* ap, + const lapack_int* ipiv, lapack_complex_float* work, + lapack_int *info ); +void LAPACK_zhptri( char* uplo, lapack_int* n, lapack_complex_double* ap, + const lapack_int* ipiv, lapack_complex_double* work, + lapack_int *info ); +void LAPACK_strtri( char* uplo, char* diag, lapack_int* n, float* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_dtrtri( char* uplo, char* diag, lapack_int* n, double* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_ctrtri( char* uplo, char* diag, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_ztrtri( char* uplo, char* diag, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_dtftri( char* transr, char* uplo, char* diag, lapack_int* n, + double* a, lapack_int *info ); +void LAPACK_stftri( char* transr, char* uplo, char* diag, lapack_int* n, + float* a, lapack_int *info ); +void LAPACK_ztftri( char* transr, char* uplo, char* diag, lapack_int* n, + lapack_complex_double* a, lapack_int *info ); +void LAPACK_ctftri( char* transr, char* uplo, char* diag, lapack_int* n, + lapack_complex_float* a, lapack_int *info ); +void LAPACK_stptri( char* uplo, char* diag, lapack_int* n, float* ap, + lapack_int *info ); +void LAPACK_dtptri( char* uplo, char* diag, lapack_int* n, double* ap, + lapack_int *info ); +void LAPACK_ctptri( char* uplo, char* diag, lapack_int* n, + lapack_complex_float* ap, lapack_int *info ); +void LAPACK_ztptri( char* uplo, char* diag, lapack_int* n, + lapack_complex_double* ap, lapack_int *info ); +void LAPACK_sgeequ( lapack_int* m, lapack_int* n, const float* a, + lapack_int* lda, float* r, float* c, float* rowcnd, + float* colcnd, float* amax, lapack_int *info ); +void LAPACK_dgeequ( lapack_int* m, lapack_int* n, const double* a, + lapack_int* lda, double* r, double* c, double* rowcnd, + double* colcnd, double* amax, lapack_int *info ); +void LAPACK_cgeequ( lapack_int* m, lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, float* r, float* c, float* rowcnd, + float* colcnd, float* amax, lapack_int *info ); +void LAPACK_zgeequ( lapack_int* m, lapack_int* n, + const lapack_complex_double* a, lapack_int* lda, double* r, + double* c, double* rowcnd, double* colcnd, double* amax, + lapack_int *info ); +void LAPACK_dgeequb( lapack_int* m, lapack_int* n, const double* a, + lapack_int* lda, double* r, double* c, double* rowcnd, + double* colcnd, double* amax, lapack_int *info ); +void LAPACK_sgeequb( lapack_int* m, lapack_int* n, const float* a, + lapack_int* lda, float* r, float* c, float* rowcnd, + float* colcnd, float* amax, lapack_int *info ); +void LAPACK_zgeequb( lapack_int* m, lapack_int* n, + const lapack_complex_double* a, lapack_int* lda, double* r, + double* c, double* rowcnd, double* colcnd, double* amax, + lapack_int *info ); +void LAPACK_cgeequb( lapack_int* m, lapack_int* n, + const lapack_complex_float* a, lapack_int* lda, float* r, + float* c, float* rowcnd, float* colcnd, float* amax, + lapack_int *info ); +void LAPACK_sgbequ( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const float* ab, lapack_int* ldab, float* r, + float* c, float* rowcnd, float* colcnd, float* amax, + lapack_int *info ); +void LAPACK_dgbequ( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const double* ab, lapack_int* ldab, + double* r, double* c, double* rowcnd, double* colcnd, + double* amax, lapack_int *info ); +void LAPACK_cgbequ( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const lapack_complex_float* ab, + lapack_int* ldab, float* r, float* c, float* rowcnd, + float* colcnd, float* amax, lapack_int *info ); +void LAPACK_zgbequ( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const lapack_complex_double* ab, + lapack_int* ldab, double* r, double* c, double* rowcnd, + double* colcnd, double* amax, lapack_int *info ); +void LAPACK_dgbequb( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const double* ab, lapack_int* ldab, + double* r, double* c, double* rowcnd, double* colcnd, + double* amax, lapack_int *info ); +void LAPACK_sgbequb( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const float* ab, lapack_int* ldab, + float* r, float* c, float* rowcnd, float* colcnd, + float* amax, lapack_int *info ); +void LAPACK_zgbequb( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const lapack_complex_double* ab, + lapack_int* ldab, double* r, double* c, double* rowcnd, + double* colcnd, double* amax, lapack_int *info ); +void LAPACK_cgbequb( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const lapack_complex_float* ab, + lapack_int* ldab, float* r, float* c, float* rowcnd, + float* colcnd, float* amax, lapack_int *info ); +void LAPACK_spoequ( lapack_int* n, const float* a, lapack_int* lda, float* s, + float* scond, float* amax, lapack_int *info ); +void LAPACK_dpoequ( lapack_int* n, const double* a, lapack_int* lda, double* s, + double* scond, double* amax, lapack_int *info ); +void LAPACK_cpoequ( lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, float* s, float* scond, float* amax, + lapack_int *info ); +void LAPACK_zpoequ( lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, double* s, double* scond, double* amax, + lapack_int *info ); +void LAPACK_dpoequb( lapack_int* n, const double* a, lapack_int* lda, double* s, + double* scond, double* amax, lapack_int *info ); +void LAPACK_spoequb( lapack_int* n, const float* a, lapack_int* lda, float* s, + float* scond, float* amax, lapack_int *info ); +void LAPACK_zpoequb( lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, double* s, double* scond, double* amax, + lapack_int *info ); +void LAPACK_cpoequb( lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, float* s, float* scond, float* amax, + lapack_int *info ); +void LAPACK_sppequ( char* uplo, lapack_int* n, const float* ap, float* s, + float* scond, float* amax, lapack_int *info ); +void LAPACK_dppequ( char* uplo, lapack_int* n, const double* ap, double* s, + double* scond, double* amax, lapack_int *info ); +void LAPACK_cppequ( char* uplo, lapack_int* n, const lapack_complex_float* ap, + float* s, float* scond, float* amax, lapack_int *info ); +void LAPACK_zppequ( char* uplo, lapack_int* n, const lapack_complex_double* ap, + double* s, double* scond, double* amax, lapack_int *info ); +void LAPACK_spbequ( char* uplo, lapack_int* n, lapack_int* kd, const float* ab, + lapack_int* ldab, float* s, float* scond, float* amax, + lapack_int *info ); +void LAPACK_dpbequ( char* uplo, lapack_int* n, lapack_int* kd, const double* ab, + lapack_int* ldab, double* s, double* scond, double* amax, + lapack_int *info ); +void LAPACK_cpbequ( char* uplo, lapack_int* n, lapack_int* kd, + const lapack_complex_float* ab, lapack_int* ldab, float* s, + float* scond, float* amax, lapack_int *info ); +void LAPACK_zpbequ( char* uplo, lapack_int* n, lapack_int* kd, + const lapack_complex_double* ab, lapack_int* ldab, + double* s, double* scond, double* amax, lapack_int *info ); +void LAPACK_dsyequb( char* uplo, lapack_int* n, const double* a, + lapack_int* lda, double* s, double* scond, double* amax, + double* work, lapack_int *info ); +void LAPACK_ssyequb( char* uplo, lapack_int* n, const float* a, lapack_int* lda, + float* s, float* scond, float* amax, float* work, + lapack_int *info ); +void LAPACK_zsyequb( char* uplo, lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, double* s, double* scond, double* amax, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_csyequb( char* uplo, lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, float* s, float* scond, float* amax, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zheequb( char* uplo, lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, double* s, double* scond, double* amax, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_cheequb( char* uplo, lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, float* s, float* scond, float* amax, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_sgesv( lapack_int* n, lapack_int* nrhs, float* a, lapack_int* lda, + lapack_int* ipiv, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dgesv( lapack_int* n, lapack_int* nrhs, double* a, lapack_int* lda, + lapack_int* ipiv, double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_cgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_float* a, + lapack_int* lda, lapack_int* ipiv, lapack_complex_float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_zgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* a, + lapack_int* lda, lapack_int* ipiv, lapack_complex_double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_dsgesv( lapack_int* n, lapack_int* nrhs, double* a, lapack_int* lda, + lapack_int* ipiv, double* b, lapack_int* ldb, double* x, + lapack_int* ldx, double* work, float* swork, + lapack_int* iter, lapack_int *info ); +void LAPACK_zcgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* a, + lapack_int* lda, lapack_int* ipiv, lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + lapack_complex_double* work, lapack_complex_float* swork, + double* rwork, lapack_int* iter, lapack_int *info ); +void LAPACK_sgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + float* a, lapack_int* lda, float* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, float* r, float* c, float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + double* a, lapack_int* lda, double* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, double* r, double* c, + double* b, lapack_int* ldb, double* x, lapack_int* ldx, + double* rcond, double* ferr, double* berr, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_cgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, float* r, float* c, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, double* r, double* c, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* ferr, double* berr, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_dgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + double* a, lapack_int* lda, double* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, double* r, double* c, + double* b, lapack_int* ldb, double* x, lapack_int* ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int* n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int* nparams, double* params, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_sgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + float* a, lapack_int* lda, float* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, float* r, float* c, + float* b, lapack_int* ldb, float* x, lapack_int* ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int* n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int* nparams, float* params, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_zgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, double* r, double* c, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* rpvgrw, double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_cgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, float* r, float* c, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* rpvgrw, float* berr, lapack_int* n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int* nparams, float* params, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_sgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, float* ab, lapack_int* ldab, + lapack_int* ipiv, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, double* ab, lapack_int* ldab, + lapack_int* ipiv, double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_cgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, lapack_complex_float* ab, lapack_int* ldab, + lapack_int* ipiv, lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_zgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku, + lapack_int* nrhs, lapack_complex_double* ab, + lapack_int* ldab, lapack_int* ipiv, lapack_complex_double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_sgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, float* ab, + lapack_int* ldab, float* afb, lapack_int* ldafb, + lapack_int* ipiv, char* equed, float* r, float* c, float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, double* ab, + lapack_int* ldab, double* afb, lapack_int* ldafb, + lapack_int* ipiv, char* equed, double* r, double* c, + double* b, lapack_int* ldb, double* x, lapack_int* ldx, + double* rcond, double* ferr, double* berr, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_cgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, lapack_complex_float* ab, + lapack_int* ldab, lapack_complex_float* afb, + lapack_int* ldafb, lapack_int* ipiv, char* equed, float* r, + float* c, lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, lapack_complex_double* ab, + lapack_int* ldab, lapack_complex_double* afb, + lapack_int* ldafb, lapack_int* ipiv, char* equed, double* r, + double* c, lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* ferr, double* berr, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_dgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, double* ab, + lapack_int* ldab, double* afb, lapack_int* ldafb, + lapack_int* ipiv, char* equed, double* r, double* c, + double* b, lapack_int* ldb, double* x, lapack_int* ldx, + double* rcond, double* rpvgrw, double* berr, + lapack_int* n_err_bnds, double* err_bnds_norm, + double* err_bnds_comp, lapack_int* nparams, double* params, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_sgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, float* ab, + lapack_int* ldab, float* afb, lapack_int* ldafb, + lapack_int* ipiv, char* equed, float* r, float* c, + float* b, lapack_int* ldb, float* x, lapack_int* ldx, + float* rcond, float* rpvgrw, float* berr, + lapack_int* n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int* nparams, float* params, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_zgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, + lapack_complex_double* ab, lapack_int* ldab, + lapack_complex_double* afb, lapack_int* ldafb, + lapack_int* ipiv, char* equed, double* r, double* c, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* rpvgrw, double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_cgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl, + lapack_int* ku, lapack_int* nrhs, lapack_complex_float* ab, + lapack_int* ldab, lapack_complex_float* afb, + lapack_int* ldafb, lapack_int* ipiv, char* equed, float* r, + float* c, lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* rpvgrw, float* berr, lapack_int* n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int* nparams, float* params, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_sgtsv( lapack_int* n, lapack_int* nrhs, float* dl, float* d, + float* du, float* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_dgtsv( lapack_int* n, lapack_int* nrhs, double* dl, double* d, + double* du, double* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_cgtsv( lapack_int* n, lapack_int* nrhs, lapack_complex_float* dl, + lapack_complex_float* d, lapack_complex_float* du, + lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_zgtsv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* dl, + lapack_complex_double* d, lapack_complex_double* du, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_sgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + const float* dl, const float* d, const float* du, + float* dlf, float* df, float* duf, float* du2, + lapack_int* ipiv, const float* b, lapack_int* ldb, float* x, + lapack_int* ldx, float* rcond, float* ferr, float* berr, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_dgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + const double* dl, const double* d, const double* du, + double* dlf, double* df, double* duf, double* du2, + lapack_int* ipiv, const double* b, lapack_int* ldb, + double* x, lapack_int* ldx, double* rcond, double* ferr, + double* berr, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_cgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* dl, + const lapack_complex_float* d, + const lapack_complex_float* du, lapack_complex_float* dlf, + lapack_complex_float* df, lapack_complex_float* duf, + lapack_complex_float* du2, lapack_int* ipiv, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* dl, + const lapack_complex_double* d, + const lapack_complex_double* du, lapack_complex_double* dlf, + lapack_complex_double* df, lapack_complex_double* duf, + lapack_complex_double* du2, lapack_int* ipiv, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* ferr, double* berr, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_sposv( char* uplo, lapack_int* n, lapack_int* nrhs, float* a, + lapack_int* lda, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dposv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, + lapack_int* lda, double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_cposv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_zposv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dsposv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, + lapack_int* lda, double* b, lapack_int* ldb, double* x, + lapack_int* ldx, double* work, float* swork, + lapack_int* iter, lapack_int *info ); +void LAPACK_zcposv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, + lapack_complex_double* work, lapack_complex_float* swork, + double* rwork, lapack_int* iter, lapack_int *info ); +void LAPACK_sposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + float* a, lapack_int* lda, float* af, lapack_int* ldaf, + char* equed, float* s, float* b, lapack_int* ldb, float* x, + lapack_int* ldx, float* rcond, float* ferr, float* berr, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_dposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + double* a, lapack_int* lda, double* af, lapack_int* ldaf, + char* equed, double* s, double* b, lapack_int* ldb, + double* x, lapack_int* ldx, double* rcond, double* ferr, + double* berr, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_cposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* af, lapack_int* ldaf, char* equed, + float* s, lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* af, lapack_int* ldaf, char* equed, + double* s, lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* ferr, double* berr, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_dposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + double* a, lapack_int* lda, double* af, lapack_int* ldaf, + char* equed, double* s, double* b, lapack_int* ldb, + double* x, lapack_int* ldx, double* rcond, double* rpvgrw, + double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_sposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + float* a, lapack_int* lda, float* af, lapack_int* ldaf, + char* equed, float* s, float* b, lapack_int* ldb, float* x, + lapack_int* ldx, float* rcond, float* rpvgrw, float* berr, + lapack_int* n_err_bnds, float* err_bnds_norm, + float* err_bnds_comp, lapack_int* nparams, float* params, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_zposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* af, lapack_int* ldaf, char* equed, + double* s, lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* rpvgrw, double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_cposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* af, lapack_int* ldaf, char* equed, + float* s, lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* rpvgrw, float* berr, lapack_int* n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int* nparams, float* params, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_sppsv( char* uplo, lapack_int* n, lapack_int* nrhs, float* ap, + float* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_dppsv( char* uplo, lapack_int* n, lapack_int* nrhs, double* ap, + double* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_cppsv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* ap, lapack_complex_float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_zppsv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* ap, lapack_complex_double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_sppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + float* ap, float* afp, char* equed, float* s, float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + double* ap, double* afp, char* equed, double* s, double* b, + lapack_int* ldb, double* x, lapack_int* ldx, double* rcond, + double* ferr, double* berr, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_cppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* ap, lapack_complex_float* afp, + char* equed, float* s, lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, + float* rcond, float* ferr, float* berr, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* ap, lapack_complex_double* afp, + char* equed, double* s, lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_spbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + float* ab, lapack_int* ldab, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + double* ab, lapack_int* ldab, double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_cpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + lapack_complex_float* ab, lapack_int* ldab, + lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_zpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, + lapack_complex_double* ab, lapack_int* ldab, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_spbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd, + lapack_int* nrhs, float* ab, lapack_int* ldab, float* afb, + lapack_int* ldafb, char* equed, float* s, float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd, + lapack_int* nrhs, double* ab, lapack_int* ldab, double* afb, + lapack_int* ldafb, char* equed, double* s, double* b, + lapack_int* ldb, double* x, lapack_int* ldx, double* rcond, + double* ferr, double* berr, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_cpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd, + lapack_int* nrhs, lapack_complex_float* ab, + lapack_int* ldab, lapack_complex_float* afb, + lapack_int* ldafb, char* equed, float* s, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd, + lapack_int* nrhs, lapack_complex_double* ab, + lapack_int* ldab, lapack_complex_double* afb, + lapack_int* ldafb, char* equed, double* s, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* ferr, double* berr, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_sptsv( lapack_int* n, lapack_int* nrhs, float* d, float* e, + float* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_dptsv( lapack_int* n, lapack_int* nrhs, double* d, double* e, + double* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_cptsv( lapack_int* n, lapack_int* nrhs, float* d, + lapack_complex_float* e, lapack_complex_float* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_zptsv( lapack_int* n, lapack_int* nrhs, double* d, + lapack_complex_double* e, lapack_complex_double* b, + lapack_int* ldb, lapack_int *info ); +void LAPACK_sptsvx( char* fact, lapack_int* n, lapack_int* nrhs, const float* d, + const float* e, float* df, float* ef, const float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, float* work, lapack_int *info ); +void LAPACK_dptsvx( char* fact, lapack_int* n, lapack_int* nrhs, + const double* d, const double* e, double* df, double* ef, + const double* b, lapack_int* ldb, double* x, + lapack_int* ldx, double* rcond, double* ferr, double* berr, + double* work, lapack_int *info ); +void LAPACK_cptsvx( char* fact, lapack_int* n, lapack_int* nrhs, const float* d, + const lapack_complex_float* e, float* df, + lapack_complex_float* ef, const lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, + float* rcond, float* ferr, float* berr, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zptsvx( char* fact, lapack_int* n, lapack_int* nrhs, + const double* d, const lapack_complex_double* e, double* df, + lapack_complex_double* ef, const lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_ssysv( char* uplo, lapack_int* n, lapack_int* nrhs, float* a, + lapack_int* lda, lapack_int* ipiv, float* b, lapack_int* ldb, + float* work, lapack_int* lwork, lapack_int *info ); +void LAPACK_dsysv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, + lapack_int* lda, lapack_int* ipiv, double* b, + lapack_int* ldb, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_csysv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zsysv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_ssysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const float* a, lapack_int* lda, float* af, + lapack_int* ldaf, lapack_int* ipiv, const float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, + float* ferr, float* berr, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_dsysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const double* a, lapack_int* lda, double* af, + lapack_int* ldaf, lapack_int* ipiv, const double* b, + lapack_int* ldb, double* x, lapack_int* ldx, double* rcond, + double* ferr, double* berr, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_csysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* af, lapack_int* ldaf, + lapack_int* ipiv, const lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, + float* rcond, float* ferr, float* berr, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int *info ); +void LAPACK_zsysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* af, lapack_int* ldaf, + lapack_int* ipiv, const lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int *info ); +void LAPACK_dsysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + double* a, lapack_int* lda, double* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, double* s, double* b, + lapack_int* ldb, double* x, lapack_int* ldx, double* rcond, + double* rpvgrw, double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_ssysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + float* a, lapack_int* lda, float* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, float* s, float* b, + lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, + float* rpvgrw, float* berr, lapack_int* n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int* nparams, float* params, float* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_zsysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, double* s, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* rpvgrw, double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_csysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, float* s, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* rpvgrw, float* berr, lapack_int* n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int* nparams, float* params, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_chesv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zhesv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_chesvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* af, lapack_int* ldaf, + lapack_int* ipiv, const lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, + float* rcond, float* ferr, float* berr, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int *info ); +void LAPACK_zhesvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* af, lapack_int* ldaf, + lapack_int* ipiv, const lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int *info ); +void LAPACK_zhesvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, double* s, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* x, lapack_int* ldx, double* rcond, + double* rpvgrw, double* berr, lapack_int* n_err_bnds, + double* err_bnds_norm, double* err_bnds_comp, + lapack_int* nparams, double* params, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_chesvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* af, lapack_int* ldaf, + lapack_int* ipiv, char* equed, float* s, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* x, lapack_int* ldx, float* rcond, + float* rpvgrw, float* berr, lapack_int* n_err_bnds, + float* err_bnds_norm, float* err_bnds_comp, + lapack_int* nparams, float* params, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_sspsv( char* uplo, lapack_int* n, lapack_int* nrhs, float* ap, + lapack_int* ipiv, float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dspsv( char* uplo, lapack_int* n, lapack_int* nrhs, double* ap, + lapack_int* ipiv, double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_cspsv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* ap, lapack_int* ipiv, + lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_zspsv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* ap, lapack_int* ipiv, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_sspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const float* ap, float* afp, lapack_int* ipiv, + const float* b, lapack_int* ldb, float* x, lapack_int* ldx, + float* rcond, float* ferr, float* berr, float* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_dspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const double* ap, double* afp, lapack_int* ipiv, + const double* b, lapack_int* ldb, double* x, + lapack_int* ldx, double* rcond, double* ferr, double* berr, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_cspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* ap, lapack_complex_float* afp, + lapack_int* ipiv, const lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, + float* rcond, float* ferr, float* berr, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* ap, lapack_complex_double* afp, + lapack_int* ipiv, const lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_chpsv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* ap, lapack_int* ipiv, + lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); +void LAPACK_zhpsv( char* uplo, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* ap, lapack_int* ipiv, + lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_chpsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_float* ap, lapack_complex_float* afp, + lapack_int* ipiv, const lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, + float* rcond, float* ferr, float* berr, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zhpsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, + const lapack_complex_double* ap, lapack_complex_double* afp, + lapack_int* ipiv, const lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, + double* rcond, double* ferr, double* berr, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_sgeqrf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* tau, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dgeqrf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* tau, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cgeqrf( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zgeqrf( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sgeqpf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + lapack_int* jpvt, float* tau, float* work, + lapack_int *info ); +void LAPACK_dgeqpf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + lapack_int* jpvt, double* tau, double* work, + lapack_int *info ); +void LAPACK_cgeqpf( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int* jpvt, + lapack_complex_float* tau, lapack_complex_float* work, + float* rwork, lapack_int *info ); +void LAPACK_zgeqpf( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int* jpvt, + lapack_complex_double* tau, lapack_complex_double* work, + double* rwork, lapack_int *info ); +void LAPACK_sgeqp3( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + lapack_int* jpvt, float* tau, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dgeqp3( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + lapack_int* jpvt, double* tau, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cgeqp3( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int* jpvt, + lapack_complex_float* tau, lapack_complex_float* work, + lapack_int* lwork, float* rwork, lapack_int *info ); +void LAPACK_zgeqp3( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int* jpvt, + lapack_complex_double* tau, lapack_complex_double* work, + lapack_int* lwork, double* rwork, lapack_int *info ); +void LAPACK_sorgqr( lapack_int* m, lapack_int* n, lapack_int* k, float* a, + lapack_int* lda, const float* tau, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dorgqr( lapack_int* m, lapack_int* n, lapack_int* k, double* a, + lapack_int* lda, const double* tau, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sormqr( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const float* a, lapack_int* lda, + const float* tau, float* c, lapack_int* ldc, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dormqr( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const double* a, lapack_int* lda, + const double* tau, double* c, lapack_int* ldc, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cungqr( lapack_int* m, lapack_int* n, lapack_int* k, + lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* tau, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zungqr( lapack_int* m, lapack_int* n, lapack_int* k, + lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cunmqr( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zunmqr( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const lapack_complex_double* a, + lapack_int* lda, const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int* ldc, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sgelqf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* tau, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dgelqf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* tau, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cgelqf( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zgelqf( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sorglq( lapack_int* m, lapack_int* n, lapack_int* k, float* a, + lapack_int* lda, const float* tau, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dorglq( lapack_int* m, lapack_int* n, lapack_int* k, double* a, + lapack_int* lda, const double* tau, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sormlq( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const float* a, lapack_int* lda, + const float* tau, float* c, lapack_int* ldc, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dormlq( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const double* a, lapack_int* lda, + const double* tau, double* c, lapack_int* ldc, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cunglq( lapack_int* m, lapack_int* n, lapack_int* k, + lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* tau, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zunglq( lapack_int* m, lapack_int* n, lapack_int* k, + lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cunmlq( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zunmlq( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const lapack_complex_double* a, + lapack_int* lda, const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int* ldc, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sgeqlf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* tau, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dgeqlf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* tau, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cgeqlf( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zgeqlf( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sorgql( lapack_int* m, lapack_int* n, lapack_int* k, float* a, + lapack_int* lda, const float* tau, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dorgql( lapack_int* m, lapack_int* n, lapack_int* k, double* a, + lapack_int* lda, const double* tau, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cungql( lapack_int* m, lapack_int* n, lapack_int* k, + lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* tau, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zungql( lapack_int* m, lapack_int* n, lapack_int* k, + lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sormql( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const float* a, lapack_int* lda, + const float* tau, float* c, lapack_int* ldc, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dormql( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const double* a, lapack_int* lda, + const double* tau, double* c, lapack_int* ldc, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cunmql( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zunmql( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const lapack_complex_double* a, + lapack_int* lda, const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int* ldc, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sgerqf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* tau, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dgerqf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* tau, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cgerqf( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zgerqf( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sorgrq( lapack_int* m, lapack_int* n, lapack_int* k, float* a, + lapack_int* lda, const float* tau, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dorgrq( lapack_int* m, lapack_int* n, lapack_int* k, double* a, + lapack_int* lda, const double* tau, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cungrq( lapack_int* m, lapack_int* n, lapack_int* k, + lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* tau, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zungrq( lapack_int* m, lapack_int* n, lapack_int* k, + lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sormrq( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const float* a, lapack_int* lda, + const float* tau, float* c, lapack_int* ldc, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dormrq( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const double* a, lapack_int* lda, + const double* tau, double* c, lapack_int* ldc, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cunmrq( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zunmrq( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, const lapack_complex_double* a, + lapack_int* lda, const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int* ldc, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_stzrzf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* tau, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dtzrzf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* tau, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_ctzrzf( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_ztzrzf( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sormrz( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* l, const float* a, + lapack_int* lda, const float* tau, float* c, + lapack_int* ldc, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dormrz( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* l, const double* a, + lapack_int* lda, const double* tau, double* c, + lapack_int* ldc, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cunmrz( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* l, const lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zunmrz( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* l, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* tau, lapack_complex_double* c, + lapack_int* ldc, lapack_complex_double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sggqrf( lapack_int* n, lapack_int* m, lapack_int* p, float* a, + lapack_int* lda, float* taua, float* b, lapack_int* ldb, + float* taub, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dggqrf( lapack_int* n, lapack_int* m, lapack_int* p, double* a, + lapack_int* lda, double* taua, double* b, lapack_int* ldb, + double* taub, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cggqrf( lapack_int* n, lapack_int* m, lapack_int* p, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* taua, lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* taub, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zggqrf( lapack_int* n, lapack_int* m, lapack_int* p, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* taua, lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* taub, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sggrqf( lapack_int* m, lapack_int* p, lapack_int* n, float* a, + lapack_int* lda, float* taua, float* b, lapack_int* ldb, + float* taub, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dggrqf( lapack_int* m, lapack_int* p, lapack_int* n, double* a, + lapack_int* lda, double* taua, double* b, lapack_int* ldb, + double* taub, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cggrqf( lapack_int* m, lapack_int* p, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* taua, lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* taub, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zggrqf( lapack_int* m, lapack_int* p, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* taua, lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* taub, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sgebrd( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* d, float* e, float* tauq, float* taup, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dgebrd( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* d, double* e, double* tauq, double* taup, + double* work, lapack_int* lwork, lapack_int *info ); +void LAPACK_cgebrd( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, float* d, float* e, + lapack_complex_float* tauq, lapack_complex_float* taup, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zgebrd( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, double* d, double* e, + lapack_complex_double* tauq, lapack_complex_double* taup, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc, + lapack_int* kl, lapack_int* ku, float* ab, lapack_int* ldab, + float* d, float* e, float* q, lapack_int* ldq, float* pt, + lapack_int* ldpt, float* c, lapack_int* ldc, float* work, + lapack_int *info ); +void LAPACK_dgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc, + lapack_int* kl, lapack_int* ku, double* ab, + lapack_int* ldab, double* d, double* e, double* q, + lapack_int* ldq, double* pt, lapack_int* ldpt, double* c, + lapack_int* ldc, double* work, lapack_int *info ); +void LAPACK_cgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc, + lapack_int* kl, lapack_int* ku, lapack_complex_float* ab, + lapack_int* ldab, float* d, float* e, + lapack_complex_float* q, lapack_int* ldq, + lapack_complex_float* pt, lapack_int* ldpt, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc, + lapack_int* kl, lapack_int* ku, lapack_complex_double* ab, + lapack_int* ldab, double* d, double* e, + lapack_complex_double* q, lapack_int* ldq, + lapack_complex_double* pt, lapack_int* ldpt, + lapack_complex_double* c, lapack_int* ldc, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_sorgbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k, + float* a, lapack_int* lda, const float* tau, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dorgbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k, + double* a, lapack_int* lda, const double* tau, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sormbr( char* vect, char* side, char* trans, lapack_int* m, + lapack_int* n, lapack_int* k, const float* a, + lapack_int* lda, const float* tau, float* c, + lapack_int* ldc, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dormbr( char* vect, char* side, char* trans, lapack_int* m, + lapack_int* n, lapack_int* k, const double* a, + lapack_int* lda, const double* tau, double* c, + lapack_int* ldc, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cungbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k, + lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* tau, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zungbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k, + lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cunmbr( char* vect, char* side, char* trans, lapack_int* m, + lapack_int* n, lapack_int* k, const lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zunmbr( char* vect, char* side, char* trans, lapack_int* m, + lapack_int* n, lapack_int* k, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* tau, lapack_complex_double* c, + lapack_int* ldc, lapack_complex_double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt, + lapack_int* nru, lapack_int* ncc, float* d, float* e, + float* vt, lapack_int* ldvt, float* u, lapack_int* ldu, + float* c, lapack_int* ldc, float* work, lapack_int *info ); +void LAPACK_dbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt, + lapack_int* nru, lapack_int* ncc, double* d, double* e, + double* vt, lapack_int* ldvt, double* u, lapack_int* ldu, + double* c, lapack_int* ldc, double* work, + lapack_int *info ); +void LAPACK_cbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt, + lapack_int* nru, lapack_int* ncc, float* d, float* e, + lapack_complex_float* vt, lapack_int* ldvt, + lapack_complex_float* u, lapack_int* ldu, + lapack_complex_float* c, lapack_int* ldc, float* work, + lapack_int *info ); +void LAPACK_zbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt, + lapack_int* nru, lapack_int* ncc, double* d, double* e, + lapack_complex_double* vt, lapack_int* ldvt, + lapack_complex_double* u, lapack_int* ldu, + lapack_complex_double* c, lapack_int* ldc, double* work, + lapack_int *info ); +void LAPACK_sbdsdc( char* uplo, char* compq, lapack_int* n, float* d, float* e, + float* u, lapack_int* ldu, float* vt, lapack_int* ldvt, + float* q, lapack_int* iq, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dbdsdc( char* uplo, char* compq, lapack_int* n, double* d, + double* e, double* u, lapack_int* ldu, double* vt, + lapack_int* ldvt, double* q, lapack_int* iq, double* work, + lapack_int* iwork, lapack_int *info ); +void LAPACK_ssytrd( char* uplo, lapack_int* n, float* a, lapack_int* lda, + float* d, float* e, float* tau, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dsytrd( char* uplo, lapack_int* n, double* a, lapack_int* lda, + double* d, double* e, double* tau, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sorgtr( char* uplo, lapack_int* n, float* a, lapack_int* lda, + const float* tau, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dorgtr( char* uplo, lapack_int* n, double* a, lapack_int* lda, + const double* tau, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sormtr( char* side, char* uplo, char* trans, lapack_int* m, + lapack_int* n, const float* a, lapack_int* lda, + const float* tau, float* c, lapack_int* ldc, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dormtr( char* side, char* uplo, char* trans, lapack_int* m, + lapack_int* n, const double* a, lapack_int* lda, + const double* tau, double* c, lapack_int* ldc, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_chetrd( char* uplo, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, float* d, float* e, + lapack_complex_float* tau, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zhetrd( char* uplo, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, double* d, double* e, + lapack_complex_double* tau, lapack_complex_double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cungtr( char* uplo, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* tau, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zungtr( char* uplo, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cunmtr( char* side, char* uplo, char* trans, lapack_int* m, + lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* tau, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zunmtr( char* side, char* uplo, char* trans, lapack_int* m, + lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, const lapack_complex_double* tau, + lapack_complex_double* c, lapack_int* ldc, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_ssptrd( char* uplo, lapack_int* n, float* ap, float* d, float* e, + float* tau, lapack_int *info ); +void LAPACK_dsptrd( char* uplo, lapack_int* n, double* ap, double* d, double* e, + double* tau, lapack_int *info ); +void LAPACK_sopgtr( char* uplo, lapack_int* n, const float* ap, + const float* tau, float* q, lapack_int* ldq, float* work, + lapack_int *info ); +void LAPACK_dopgtr( char* uplo, lapack_int* n, const double* ap, + const double* tau, double* q, lapack_int* ldq, double* work, + lapack_int *info ); +void LAPACK_sopmtr( char* side, char* uplo, char* trans, lapack_int* m, + lapack_int* n, const float* ap, const float* tau, float* c, + lapack_int* ldc, float* work, lapack_int *info ); +void LAPACK_dopmtr( char* side, char* uplo, char* trans, lapack_int* m, + lapack_int* n, const double* ap, const double* tau, + double* c, lapack_int* ldc, double* work, + lapack_int *info ); +void LAPACK_chptrd( char* uplo, lapack_int* n, lapack_complex_float* ap, + float* d, float* e, lapack_complex_float* tau, + lapack_int *info ); +void LAPACK_zhptrd( char* uplo, lapack_int* n, lapack_complex_double* ap, + double* d, double* e, lapack_complex_double* tau, + lapack_int *info ); +void LAPACK_cupgtr( char* uplo, lapack_int* n, const lapack_complex_float* ap, + const lapack_complex_float* tau, lapack_complex_float* q, + lapack_int* ldq, lapack_complex_float* work, + lapack_int *info ); +void LAPACK_zupgtr( char* uplo, lapack_int* n, const lapack_complex_double* ap, + const lapack_complex_double* tau, lapack_complex_double* q, + lapack_int* ldq, lapack_complex_double* work, + lapack_int *info ); +void LAPACK_cupmtr( char* side, char* uplo, char* trans, lapack_int* m, + lapack_int* n, const lapack_complex_float* ap, + const lapack_complex_float* tau, lapack_complex_float* c, + lapack_int* ldc, lapack_complex_float* work, + lapack_int *info ); +void LAPACK_zupmtr( char* side, char* uplo, char* trans, lapack_int* m, + lapack_int* n, const lapack_complex_double* ap, + const lapack_complex_double* tau, lapack_complex_double* c, + lapack_int* ldc, lapack_complex_double* work, + lapack_int *info ); +void LAPACK_ssbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd, + float* ab, lapack_int* ldab, float* d, float* e, float* q, + lapack_int* ldq, float* work, lapack_int *info ); +void LAPACK_dsbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd, + double* ab, lapack_int* ldab, double* d, double* e, + double* q, lapack_int* ldq, double* work, + lapack_int *info ); +void LAPACK_chbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd, + lapack_complex_float* ab, lapack_int* ldab, float* d, + float* e, lapack_complex_float* q, lapack_int* ldq, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zhbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd, + lapack_complex_double* ab, lapack_int* ldab, double* d, + double* e, lapack_complex_double* q, lapack_int* ldq, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_ssterf( lapack_int* n, float* d, float* e, lapack_int *info ); +void LAPACK_dsterf( lapack_int* n, double* d, double* e, lapack_int *info ); +void LAPACK_ssteqr( char* compz, lapack_int* n, float* d, float* e, float* z, + lapack_int* ldz, float* work, lapack_int *info ); +void LAPACK_dsteqr( char* compz, lapack_int* n, double* d, double* e, double* z, + lapack_int* ldz, double* work, lapack_int *info ); +void LAPACK_csteqr( char* compz, lapack_int* n, float* d, float* e, + lapack_complex_float* z, lapack_int* ldz, float* work, + lapack_int *info ); +void LAPACK_zsteqr( char* compz, lapack_int* n, double* d, double* e, + lapack_complex_double* z, lapack_int* ldz, double* work, + lapack_int *info ); +void LAPACK_sstemr( char* jobz, char* range, lapack_int* n, float* d, float* e, + float* vl, float* vu, lapack_int* il, lapack_int* iu, + lapack_int* m, float* w, float* z, lapack_int* ldz, + lapack_int* nzc, lapack_int* isuppz, lapack_logical* tryrac, + float* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_dstemr( char* jobz, char* range, lapack_int* n, double* d, + double* e, double* vl, double* vu, lapack_int* il, + lapack_int* iu, lapack_int* m, double* w, double* z, + lapack_int* ldz, lapack_int* nzc, lapack_int* isuppz, + lapack_logical* tryrac, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_cstemr( char* jobz, char* range, lapack_int* n, float* d, float* e, + float* vl, float* vu, lapack_int* il, lapack_int* iu, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int* ldz, lapack_int* nzc, lapack_int* isuppz, + lapack_logical* tryrac, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_zstemr( char* jobz, char* range, lapack_int* n, double* d, + double* e, double* vl, double* vu, lapack_int* il, + lapack_int* iu, lapack_int* m, double* w, + lapack_complex_double* z, lapack_int* ldz, lapack_int* nzc, + lapack_int* isuppz, lapack_logical* tryrac, double* work, + lapack_int* lwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_sstedc( char* compz, lapack_int* n, float* d, float* e, float* z, + lapack_int* ldz, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_dstedc( char* compz, lapack_int* n, double* d, double* e, double* z, + lapack_int* ldz, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_cstedc( char* compz, lapack_int* n, float* d, float* e, + lapack_complex_float* z, lapack_int* ldz, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_zstedc( char* compz, lapack_int* n, double* d, double* e, + lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* lrwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_sstegr( char* jobz, char* range, lapack_int* n, float* d, float* e, + float* vl, float* vu, lapack_int* il, lapack_int* iu, + float* abstol, lapack_int* m, float* w, float* z, + lapack_int* ldz, lapack_int* isuppz, float* work, + lapack_int* lwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_dstegr( char* jobz, char* range, lapack_int* n, double* d, + double* e, double* vl, double* vu, lapack_int* il, + lapack_int* iu, double* abstol, lapack_int* m, double* w, + double* z, lapack_int* ldz, lapack_int* isuppz, + double* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_cstegr( char* jobz, char* range, lapack_int* n, float* d, float* e, + float* vl, float* vu, lapack_int* il, lapack_int* iu, + float* abstol, lapack_int* m, float* w, + lapack_complex_float* z, lapack_int* ldz, + lapack_int* isuppz, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_zstegr( char* jobz, char* range, lapack_int* n, double* d, + double* e, double* vl, double* vu, lapack_int* il, + lapack_int* iu, double* abstol, lapack_int* m, double* w, + lapack_complex_double* z, lapack_int* ldz, + lapack_int* isuppz, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_spteqr( char* compz, lapack_int* n, float* d, float* e, float* z, + lapack_int* ldz, float* work, lapack_int *info ); +void LAPACK_dpteqr( char* compz, lapack_int* n, double* d, double* e, double* z, + lapack_int* ldz, double* work, lapack_int *info ); +void LAPACK_cpteqr( char* compz, lapack_int* n, float* d, float* e, + lapack_complex_float* z, lapack_int* ldz, float* work, + lapack_int *info ); +void LAPACK_zpteqr( char* compz, lapack_int* n, double* d, double* e, + lapack_complex_double* z, lapack_int* ldz, double* work, + lapack_int *info ); +void LAPACK_sstebz( char* range, char* order, lapack_int* n, float* vl, + float* vu, lapack_int* il, lapack_int* iu, float* abstol, + const float* d, const float* e, lapack_int* m, + lapack_int* nsplit, float* w, lapack_int* iblock, + lapack_int* isplit, float* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dstebz( char* range, char* order, lapack_int* n, double* vl, + double* vu, lapack_int* il, lapack_int* iu, double* abstol, + const double* d, const double* e, lapack_int* m, + lapack_int* nsplit, double* w, lapack_int* iblock, + lapack_int* isplit, double* work, lapack_int* iwork, + lapack_int *info ); +void LAPACK_sstein( lapack_int* n, const float* d, const float* e, + lapack_int* m, const float* w, const lapack_int* iblock, + const lapack_int* isplit, float* z, lapack_int* ldz, + float* work, lapack_int* iwork, lapack_int* ifailv, + lapack_int *info ); +void LAPACK_dstein( lapack_int* n, const double* d, const double* e, + lapack_int* m, const double* w, const lapack_int* iblock, + const lapack_int* isplit, double* z, lapack_int* ldz, + double* work, lapack_int* iwork, lapack_int* ifailv, + lapack_int *info ); +void LAPACK_cstein( lapack_int* n, const float* d, const float* e, + lapack_int* m, const float* w, const lapack_int* iblock, + const lapack_int* isplit, lapack_complex_float* z, + lapack_int* ldz, float* work, lapack_int* iwork, + lapack_int* ifailv, lapack_int *info ); +void LAPACK_zstein( lapack_int* n, const double* d, const double* e, + lapack_int* m, const double* w, const lapack_int* iblock, + const lapack_int* isplit, lapack_complex_double* z, + lapack_int* ldz, double* work, lapack_int* iwork, + lapack_int* ifailv, lapack_int *info ); +void LAPACK_sdisna( char* job, lapack_int* m, lapack_int* n, const float* d, + float* sep, lapack_int *info ); +void LAPACK_ddisna( char* job, lapack_int* m, lapack_int* n, const double* d, + double* sep, lapack_int *info ); +void LAPACK_ssygst( lapack_int* itype, char* uplo, lapack_int* n, float* a, + lapack_int* lda, const float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_dsygst( lapack_int* itype, char* uplo, lapack_int* n, double* a, + lapack_int* lda, const double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_chegst( lapack_int* itype, char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_zhegst( lapack_int* itype, char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* b, lapack_int* ldb, + lapack_int *info ); +void LAPACK_sspgst( lapack_int* itype, char* uplo, lapack_int* n, float* ap, + const float* bp, lapack_int *info ); +void LAPACK_dspgst( lapack_int* itype, char* uplo, lapack_int* n, double* ap, + const double* bp, lapack_int *info ); +void LAPACK_chpgst( lapack_int* itype, char* uplo, lapack_int* n, + lapack_complex_float* ap, const lapack_complex_float* bp, + lapack_int *info ); +void LAPACK_zhpgst( lapack_int* itype, char* uplo, lapack_int* n, + lapack_complex_double* ap, const lapack_complex_double* bp, + lapack_int *info ); +void LAPACK_ssbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, float* ab, lapack_int* ldab, + const float* bb, lapack_int* ldbb, float* x, + lapack_int* ldx, float* work, lapack_int *info ); +void LAPACK_dsbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, double* ab, lapack_int* ldab, + const double* bb, lapack_int* ldbb, double* x, + lapack_int* ldx, double* work, lapack_int *info ); +void LAPACK_chbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab, + const lapack_complex_float* bb, lapack_int* ldbb, + lapack_complex_float* x, lapack_int* ldx, + lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zhbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab, + const lapack_complex_double* bb, lapack_int* ldbb, + lapack_complex_double* x, lapack_int* ldx, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_spbstf( char* uplo, lapack_int* n, lapack_int* kb, float* bb, + lapack_int* ldbb, lapack_int *info ); +void LAPACK_dpbstf( char* uplo, lapack_int* n, lapack_int* kb, double* bb, + lapack_int* ldbb, lapack_int *info ); +void LAPACK_cpbstf( char* uplo, lapack_int* n, lapack_int* kb, + lapack_complex_float* bb, lapack_int* ldbb, + lapack_int *info ); +void LAPACK_zpbstf( char* uplo, lapack_int* n, lapack_int* kb, + lapack_complex_double* bb, lapack_int* ldbb, + lapack_int *info ); +void LAPACK_sgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, float* a, + lapack_int* lda, float* tau, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, double* a, + lapack_int* lda, double* tau, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* tau, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* tau, lapack_complex_double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sorghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, float* a, + lapack_int* lda, const float* tau, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dorghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, double* a, + lapack_int* lda, const double* tau, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sormhr( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* ilo, lapack_int* ihi, const float* a, + lapack_int* lda, const float* tau, float* c, + lapack_int* ldc, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dormhr( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* ilo, lapack_int* ihi, const double* a, + lapack_int* lda, const double* tau, double* c, + lapack_int* ldc, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cunghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, + lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* tau, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zunghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, + lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cunmhr( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* ilo, lapack_int* ihi, + const lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* tau, lapack_complex_float* c, + lapack_int* ldc, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zunmhr( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* ilo, lapack_int* ihi, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* tau, lapack_complex_double* c, + lapack_int* ldc, lapack_complex_double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sgebal( char* job, lapack_int* n, float* a, lapack_int* lda, + lapack_int* ilo, lapack_int* ihi, float* scale, + lapack_int *info ); +void LAPACK_dgebal( char* job, lapack_int* n, double* a, lapack_int* lda, + lapack_int* ilo, lapack_int* ihi, double* scale, + lapack_int *info ); +void LAPACK_cgebal( char* job, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int* ilo, lapack_int* ihi, + float* scale, lapack_int *info ); +void LAPACK_zgebal( char* job, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int* ilo, lapack_int* ihi, + double* scale, lapack_int *info ); +void LAPACK_sgebak( char* job, char* side, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, const float* scale, lapack_int* m, + float* v, lapack_int* ldv, lapack_int *info ); +void LAPACK_dgebak( char* job, char* side, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, const double* scale, lapack_int* m, + double* v, lapack_int* ldv, lapack_int *info ); +void LAPACK_cgebak( char* job, char* side, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, const float* scale, lapack_int* m, + lapack_complex_float* v, lapack_int* ldv, + lapack_int *info ); +void LAPACK_zgebak( char* job, char* side, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, const double* scale, lapack_int* m, + lapack_complex_double* v, lapack_int* ldv, + lapack_int *info ); +void LAPACK_shseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, float* h, lapack_int* ldh, float* wr, + float* wi, float* z, lapack_int* ldz, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dhseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, double* h, lapack_int* ldh, double* wr, + double* wi, double* z, lapack_int* ldz, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_chseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, lapack_complex_float* h, lapack_int* ldh, + lapack_complex_float* w, lapack_complex_float* z, + lapack_int* ldz, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zhseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, lapack_complex_double* h, lapack_int* ldh, + lapack_complex_double* w, lapack_complex_double* z, + lapack_int* ldz, lapack_complex_double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_shsein( char* job, char* eigsrc, char* initv, + lapack_logical* select, lapack_int* n, const float* h, + lapack_int* ldh, float* wr, const float* wi, float* vl, + lapack_int* ldvl, float* vr, lapack_int* ldvr, + lapack_int* mm, lapack_int* m, float* work, + lapack_int* ifaill, lapack_int* ifailr, lapack_int *info ); +void LAPACK_dhsein( char* job, char* eigsrc, char* initv, + lapack_logical* select, lapack_int* n, const double* h, + lapack_int* ldh, double* wr, const double* wi, double* vl, + lapack_int* ldvl, double* vr, lapack_int* ldvr, + lapack_int* mm, lapack_int* m, double* work, + lapack_int* ifaill, lapack_int* ifailr, lapack_int *info ); +void LAPACK_chsein( char* job, char* eigsrc, char* initv, + const lapack_logical* select, lapack_int* n, + const lapack_complex_float* h, lapack_int* ldh, + lapack_complex_float* w, lapack_complex_float* vl, + lapack_int* ldvl, lapack_complex_float* vr, + lapack_int* ldvr, lapack_int* mm, lapack_int* m, + lapack_complex_float* work, float* rwork, + lapack_int* ifaill, lapack_int* ifailr, lapack_int *info ); +void LAPACK_zhsein( char* job, char* eigsrc, char* initv, + const lapack_logical* select, lapack_int* n, + const lapack_complex_double* h, lapack_int* ldh, + lapack_complex_double* w, lapack_complex_double* vl, + lapack_int* ldvl, lapack_complex_double* vr, + lapack_int* ldvr, lapack_int* mm, lapack_int* m, + lapack_complex_double* work, double* rwork, + lapack_int* ifaill, lapack_int* ifailr, lapack_int *info ); +void LAPACK_strevc( char* side, char* howmny, lapack_logical* select, + lapack_int* n, const float* t, lapack_int* ldt, float* vl, + lapack_int* ldvl, float* vr, lapack_int* ldvr, + lapack_int* mm, lapack_int* m, float* work, + lapack_int *info ); +void LAPACK_dtrevc( char* side, char* howmny, lapack_logical* select, + lapack_int* n, const double* t, lapack_int* ldt, double* vl, + lapack_int* ldvl, double* vr, lapack_int* ldvr, + lapack_int* mm, lapack_int* m, double* work, + lapack_int *info ); +void LAPACK_ctrevc( char* side, char* howmny, const lapack_logical* select, + lapack_int* n, lapack_complex_float* t, lapack_int* ldt, + lapack_complex_float* vl, lapack_int* ldvl, + lapack_complex_float* vr, lapack_int* ldvr, lapack_int* mm, + lapack_int* m, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_ztrevc( char* side, char* howmny, const lapack_logical* select, + lapack_int* n, lapack_complex_double* t, lapack_int* ldt, + lapack_complex_double* vl, lapack_int* ldvl, + lapack_complex_double* vr, lapack_int* ldvr, lapack_int* mm, + lapack_int* m, lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_strsna( char* job, char* howmny, const lapack_logical* select, + lapack_int* n, const float* t, lapack_int* ldt, + const float* vl, lapack_int* ldvl, const float* vr, + lapack_int* ldvr, float* s, float* sep, lapack_int* mm, + lapack_int* m, float* work, lapack_int* ldwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_dtrsna( char* job, char* howmny, const lapack_logical* select, + lapack_int* n, const double* t, lapack_int* ldt, + const double* vl, lapack_int* ldvl, const double* vr, + lapack_int* ldvr, double* s, double* sep, lapack_int* mm, + lapack_int* m, double* work, lapack_int* ldwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_ctrsna( char* job, char* howmny, const lapack_logical* select, + lapack_int* n, const lapack_complex_float* t, + lapack_int* ldt, const lapack_complex_float* vl, + lapack_int* ldvl, const lapack_complex_float* vr, + lapack_int* ldvr, float* s, float* sep, lapack_int* mm, + lapack_int* m, lapack_complex_float* work, + lapack_int* ldwork, float* rwork, lapack_int *info ); +void LAPACK_ztrsna( char* job, char* howmny, const lapack_logical* select, + lapack_int* n, const lapack_complex_double* t, + lapack_int* ldt, const lapack_complex_double* vl, + lapack_int* ldvl, const lapack_complex_double* vr, + lapack_int* ldvr, double* s, double* sep, lapack_int* mm, + lapack_int* m, lapack_complex_double* work, + lapack_int* ldwork, double* rwork, lapack_int *info ); +void LAPACK_strexc( char* compq, lapack_int* n, float* t, lapack_int* ldt, + float* q, lapack_int* ldq, lapack_int* ifst, + lapack_int* ilst, float* work, lapack_int *info ); +void LAPACK_dtrexc( char* compq, lapack_int* n, double* t, lapack_int* ldt, + double* q, lapack_int* ldq, lapack_int* ifst, + lapack_int* ilst, double* work, lapack_int *info ); +void LAPACK_ctrexc( char* compq, lapack_int* n, lapack_complex_float* t, + lapack_int* ldt, lapack_complex_float* q, lapack_int* ldq, + lapack_int* ifst, lapack_int* ilst, lapack_int *info ); +void LAPACK_ztrexc( char* compq, lapack_int* n, lapack_complex_double* t, + lapack_int* ldt, lapack_complex_double* q, lapack_int* ldq, + lapack_int* ifst, lapack_int* ilst, lapack_int *info ); +void LAPACK_strsen( char* job, char* compq, const lapack_logical* select, + lapack_int* n, float* t, lapack_int* ldt, float* q, + lapack_int* ldq, float* wr, float* wi, lapack_int* m, + float* s, float* sep, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_dtrsen( char* job, char* compq, const lapack_logical* select, + lapack_int* n, double* t, lapack_int* ldt, double* q, + lapack_int* ldq, double* wr, double* wi, lapack_int* m, + double* s, double* sep, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_ctrsen( char* job, char* compq, const lapack_logical* select, + lapack_int* n, lapack_complex_float* t, lapack_int* ldt, + lapack_complex_float* q, lapack_int* ldq, + lapack_complex_float* w, lapack_int* m, float* s, + float* sep, lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_ztrsen( char* job, char* compq, const lapack_logical* select, + lapack_int* n, lapack_complex_double* t, lapack_int* ldt, + lapack_complex_double* q, lapack_int* ldq, + lapack_complex_double* w, lapack_int* m, double* s, + double* sep, lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_strsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m, + lapack_int* n, const float* a, lapack_int* lda, + const float* b, lapack_int* ldb, float* c, lapack_int* ldc, + float* scale, lapack_int *info ); +void LAPACK_dtrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m, + lapack_int* n, const double* a, lapack_int* lda, + const double* b, lapack_int* ldb, double* c, + lapack_int* ldc, double* scale, lapack_int *info ); +void LAPACK_ctrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m, + lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* b, + lapack_int* ldb, lapack_complex_float* c, lapack_int* ldc, + float* scale, lapack_int *info ); +void LAPACK_ztrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m, + lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, const lapack_complex_double* b, + lapack_int* ldb, lapack_complex_double* c, lapack_int* ldc, + double* scale, lapack_int *info ); +void LAPACK_sgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, float* a, lapack_int* lda, float* b, + lapack_int* ldb, float* q, lapack_int* ldq, float* z, + lapack_int* ldz, lapack_int *info ); +void LAPACK_dgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, double* a, lapack_int* lda, double* b, + lapack_int* ldb, double* q, lapack_int* ldq, double* z, + lapack_int* ldz, lapack_int *info ); +void LAPACK_cgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* q, lapack_int* ldq, + lapack_complex_float* z, lapack_int* ldz, + lapack_int *info ); +void LAPACK_zgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* q, lapack_int* ldq, + lapack_complex_double* z, lapack_int* ldz, + lapack_int *info ); +void LAPACK_sggbal( char* job, lapack_int* n, float* a, lapack_int* lda, + float* b, lapack_int* ldb, lapack_int* ilo, lapack_int* ihi, + float* lscale, float* rscale, float* work, + lapack_int *info ); +void LAPACK_dggbal( char* job, lapack_int* n, double* a, lapack_int* lda, + double* b, lapack_int* ldb, lapack_int* ilo, + lapack_int* ihi, double* lscale, double* rscale, + double* work, lapack_int *info ); +void LAPACK_cggbal( char* job, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* b, lapack_int* ldb, + lapack_int* ilo, lapack_int* ihi, float* lscale, + float* rscale, float* work, lapack_int *info ); +void LAPACK_zggbal( char* job, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* b, lapack_int* ldb, + lapack_int* ilo, lapack_int* ihi, double* lscale, + double* rscale, double* work, lapack_int *info ); +void LAPACK_sggbak( char* job, char* side, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, const float* lscale, const float* rscale, + lapack_int* m, float* v, lapack_int* ldv, + lapack_int *info ); +void LAPACK_dggbak( char* job, char* side, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, const double* lscale, const double* rscale, + lapack_int* m, double* v, lapack_int* ldv, + lapack_int *info ); +void LAPACK_cggbak( char* job, char* side, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, const float* lscale, const float* rscale, + lapack_int* m, lapack_complex_float* v, lapack_int* ldv, + lapack_int *info ); +void LAPACK_zggbak( char* job, char* side, lapack_int* n, lapack_int* ilo, + lapack_int* ihi, const double* lscale, const double* rscale, + lapack_int* m, lapack_complex_double* v, lapack_int* ldv, + lapack_int *info ); +void LAPACK_shgeqz( char* job, char* compq, char* compz, lapack_int* n, + lapack_int* ilo, lapack_int* ihi, float* h, lapack_int* ldh, + float* t, lapack_int* ldt, float* alphar, float* alphai, + float* beta, float* q, lapack_int* ldq, float* z, + lapack_int* ldz, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dhgeqz( char* job, char* compq, char* compz, lapack_int* n, + lapack_int* ilo, lapack_int* ihi, double* h, + lapack_int* ldh, double* t, lapack_int* ldt, double* alphar, + double* alphai, double* beta, double* q, lapack_int* ldq, + double* z, lapack_int* ldz, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_chgeqz( char* job, char* compq, char* compz, lapack_int* n, + lapack_int* ilo, lapack_int* ihi, lapack_complex_float* h, + lapack_int* ldh, lapack_complex_float* t, lapack_int* ldt, + lapack_complex_float* alpha, lapack_complex_float* beta, + lapack_complex_float* q, lapack_int* ldq, + lapack_complex_float* z, lapack_int* ldz, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int *info ); +void LAPACK_zhgeqz( char* job, char* compq, char* compz, lapack_int* n, + lapack_int* ilo, lapack_int* ihi, lapack_complex_double* h, + lapack_int* ldh, lapack_complex_double* t, lapack_int* ldt, + lapack_complex_double* alpha, lapack_complex_double* beta, + lapack_complex_double* q, lapack_int* ldq, + lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int *info ); +void LAPACK_stgevc( char* side, char* howmny, const lapack_logical* select, + lapack_int* n, const float* s, lapack_int* lds, + const float* p, lapack_int* ldp, float* vl, + lapack_int* ldvl, float* vr, lapack_int* ldvr, + lapack_int* mm, lapack_int* m, float* work, + lapack_int *info ); +void LAPACK_dtgevc( char* side, char* howmny, const lapack_logical* select, + lapack_int* n, const double* s, lapack_int* lds, + const double* p, lapack_int* ldp, double* vl, + lapack_int* ldvl, double* vr, lapack_int* ldvr, + lapack_int* mm, lapack_int* m, double* work, + lapack_int *info ); +void LAPACK_ctgevc( char* side, char* howmny, const lapack_logical* select, + lapack_int* n, const lapack_complex_float* s, + lapack_int* lds, const lapack_complex_float* p, + lapack_int* ldp, lapack_complex_float* vl, lapack_int* ldvl, + lapack_complex_float* vr, lapack_int* ldvr, lapack_int* mm, + lapack_int* m, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_ztgevc( char* side, char* howmny, const lapack_logical* select, + lapack_int* n, const lapack_complex_double* s, + lapack_int* lds, const lapack_complex_double* p, + lapack_int* ldp, lapack_complex_double* vl, + lapack_int* ldvl, lapack_complex_double* vr, + lapack_int* ldvr, lapack_int* mm, lapack_int* m, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_stgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n, + float* a, lapack_int* lda, float* b, lapack_int* ldb, + float* q, lapack_int* ldq, float* z, lapack_int* ldz, + lapack_int* ifst, lapack_int* ilst, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dtgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n, + double* a, lapack_int* lda, double* b, lapack_int* ldb, + double* q, lapack_int* ldq, double* z, lapack_int* ldz, + lapack_int* ifst, lapack_int* ilst, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_ctgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* q, lapack_int* ldq, + lapack_complex_float* z, lapack_int* ldz, lapack_int* ifst, + lapack_int* ilst, lapack_int *info ); +void LAPACK_ztgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* q, lapack_int* ldq, + lapack_complex_double* z, lapack_int* ldz, lapack_int* ifst, + lapack_int* ilst, lapack_int *info ); +void LAPACK_stgsen( lapack_int* ijob, lapack_logical* wantq, + lapack_logical* wantz, const lapack_logical* select, + lapack_int* n, float* a, lapack_int* lda, float* b, + lapack_int* ldb, float* alphar, float* alphai, float* beta, + float* q, lapack_int* ldq, float* z, lapack_int* ldz, + lapack_int* m, float* pl, float* pr, float* dif, + float* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_dtgsen( lapack_int* ijob, lapack_logical* wantq, + lapack_logical* wantz, const lapack_logical* select, + lapack_int* n, double* a, lapack_int* lda, double* b, + lapack_int* ldb, double* alphar, double* alphai, + double* beta, double* q, lapack_int* ldq, double* z, + lapack_int* ldz, lapack_int* m, double* pl, double* pr, + double* dif, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_ctgsen( lapack_int* ijob, lapack_logical* wantq, + lapack_logical* wantz, const lapack_logical* select, + lapack_int* n, lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* alpha, lapack_complex_float* beta, + lapack_complex_float* q, lapack_int* ldq, + lapack_complex_float* z, lapack_int* ldz, lapack_int* m, + float* pl, float* pr, float* dif, + lapack_complex_float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_ztgsen( lapack_int* ijob, lapack_logical* wantq, + lapack_logical* wantz, const lapack_logical* select, + lapack_int* n, lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* alpha, lapack_complex_double* beta, + lapack_complex_double* q, lapack_int* ldq, + lapack_complex_double* z, lapack_int* ldz, lapack_int* m, + double* pl, double* pr, double* dif, + lapack_complex_double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_stgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n, + const float* a, lapack_int* lda, const float* b, + lapack_int* ldb, float* c, lapack_int* ldc, const float* d, + lapack_int* ldd, const float* e, lapack_int* lde, float* f, + lapack_int* ldf, float* scale, float* dif, float* work, + lapack_int* lwork, lapack_int* iwork, lapack_int *info ); +void LAPACK_dtgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n, + const double* a, lapack_int* lda, const double* b, + lapack_int* ldb, double* c, lapack_int* ldc, + const double* d, lapack_int* ldd, const double* e, + lapack_int* lde, double* f, lapack_int* ldf, double* scale, + double* dif, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_ctgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n, + const lapack_complex_float* a, lapack_int* lda, + const lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* c, lapack_int* ldc, + const lapack_complex_float* d, lapack_int* ldd, + const lapack_complex_float* e, lapack_int* lde, + lapack_complex_float* f, lapack_int* ldf, float* scale, + float* dif, lapack_complex_float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_ztgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n, + const lapack_complex_double* a, lapack_int* lda, + const lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* c, lapack_int* ldc, + const lapack_complex_double* d, lapack_int* ldd, + const lapack_complex_double* e, lapack_int* lde, + lapack_complex_double* f, lapack_int* ldf, double* scale, + double* dif, lapack_complex_double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_stgsna( char* job, char* howmny, const lapack_logical* select, + lapack_int* n, const float* a, lapack_int* lda, + const float* b, lapack_int* ldb, const float* vl, + lapack_int* ldvl, const float* vr, lapack_int* ldvr, + float* s, float* dif, lapack_int* mm, lapack_int* m, + float* work, lapack_int* lwork, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dtgsna( char* job, char* howmny, const lapack_logical* select, + lapack_int* n, const double* a, lapack_int* lda, + const double* b, lapack_int* ldb, const double* vl, + lapack_int* ldvl, const double* vr, lapack_int* ldvr, + double* s, double* dif, lapack_int* mm, lapack_int* m, + double* work, lapack_int* lwork, lapack_int* iwork, + lapack_int *info ); +void LAPACK_ctgsna( char* job, char* howmny, const lapack_logical* select, + lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, const lapack_complex_float* b, + lapack_int* ldb, const lapack_complex_float* vl, + lapack_int* ldvl, const lapack_complex_float* vr, + lapack_int* ldvr, float* s, float* dif, lapack_int* mm, + lapack_int* m, lapack_complex_float* work, + lapack_int* lwork, lapack_int* iwork, lapack_int *info ); +void LAPACK_ztgsna( char* job, char* howmny, const lapack_logical* select, + lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, const lapack_complex_double* b, + lapack_int* ldb, const lapack_complex_double* vl, + lapack_int* ldvl, const lapack_complex_double* vr, + lapack_int* ldvr, double* s, double* dif, lapack_int* mm, + lapack_int* m, lapack_complex_double* work, + lapack_int* lwork, lapack_int* iwork, lapack_int *info ); +void LAPACK_sggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* p, lapack_int* n, float* a, lapack_int* lda, + float* b, lapack_int* ldb, float* tola, float* tolb, + lapack_int* k, lapack_int* l, float* u, lapack_int* ldu, + float* v, lapack_int* ldv, float* q, lapack_int* ldq, + lapack_int* iwork, float* tau, float* work, + lapack_int *info ); +void LAPACK_dggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* p, lapack_int* n, double* a, lapack_int* lda, + double* b, lapack_int* ldb, double* tola, double* tolb, + lapack_int* k, lapack_int* l, double* u, lapack_int* ldu, + double* v, lapack_int* ldv, double* q, lapack_int* ldq, + lapack_int* iwork, double* tau, double* work, + lapack_int *info ); +void LAPACK_cggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* p, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* b, lapack_int* ldb, + float* tola, float* tolb, lapack_int* k, lapack_int* l, + lapack_complex_float* u, lapack_int* ldu, + lapack_complex_float* v, lapack_int* ldv, + lapack_complex_float* q, lapack_int* ldq, lapack_int* iwork, + float* rwork, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* p, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* b, lapack_int* ldb, + double* tola, double* tolb, lapack_int* k, lapack_int* l, + lapack_complex_double* u, lapack_int* ldu, + lapack_complex_double* v, lapack_int* ldv, + lapack_complex_double* q, lapack_int* ldq, + lapack_int* iwork, double* rwork, + lapack_complex_double* tau, lapack_complex_double* work, + lapack_int *info ); +void LAPACK_stgsja( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l, + float* a, lapack_int* lda, float* b, lapack_int* ldb, + float* tola, float* tolb, float* alpha, float* beta, + float* u, lapack_int* ldu, float* v, lapack_int* ldv, + float* q, lapack_int* ldq, float* work, lapack_int* ncycle, + lapack_int *info ); +void LAPACK_dtgsja( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l, + double* a, lapack_int* lda, double* b, lapack_int* ldb, + double* tola, double* tolb, double* alpha, double* beta, + double* u, lapack_int* ldu, double* v, lapack_int* ldv, + double* q, lapack_int* ldq, double* work, + lapack_int* ncycle, lapack_int *info ); +void LAPACK_ctgsja( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, float* tola, + float* tolb, float* alpha, float* beta, + lapack_complex_float* u, lapack_int* ldu, + lapack_complex_float* v, lapack_int* ldv, + lapack_complex_float* q, lapack_int* ldq, + lapack_complex_float* work, lapack_int* ncycle, + lapack_int *info ); +void LAPACK_ztgsja( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, double* tola, + double* tolb, double* alpha, double* beta, + lapack_complex_double* u, lapack_int* ldu, + lapack_complex_double* v, lapack_int* ldv, + lapack_complex_double* q, lapack_int* ldq, + lapack_complex_double* work, lapack_int* ncycle, + lapack_int *info ); +void LAPACK_sgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, + float* a, lapack_int* lda, float* b, lapack_int* ldb, + float* work, lapack_int* lwork, lapack_int *info ); +void LAPACK_dgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, + double* a, lapack_int* lda, double* b, lapack_int* ldb, + double* work, lapack_int* lwork, lapack_int *info ); +void LAPACK_cgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_sgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a, + lapack_int* lda, float* b, lapack_int* ldb, + lapack_int* jpvt, float* rcond, lapack_int* rank, + float* work, lapack_int* lwork, lapack_int *info ); +void LAPACK_dgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a, + lapack_int* lda, double* b, lapack_int* ldb, + lapack_int* jpvt, double* rcond, lapack_int* rank, + double* work, lapack_int* lwork, lapack_int *info ); +void LAPACK_cgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, lapack_int* jpvt, + float* rcond, lapack_int* rank, lapack_complex_float* work, + lapack_int* lwork, float* rwork, lapack_int *info ); +void LAPACK_zgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, lapack_int* jpvt, + double* rcond, lapack_int* rank, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int *info ); +void LAPACK_sgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a, + lapack_int* lda, float* b, lapack_int* ldb, float* s, + float* rcond, lapack_int* rank, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a, + lapack_int* lda, double* b, lapack_int* ldb, double* s, + double* rcond, lapack_int* rank, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, float* s, + float* rcond, lapack_int* rank, lapack_complex_float* work, + lapack_int* lwork, float* rwork, lapack_int *info ); +void LAPACK_zgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, double* s, + double* rcond, lapack_int* rank, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int *info ); +void LAPACK_sgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a, + lapack_int* lda, float* b, lapack_int* ldb, float* s, + float* rcond, lapack_int* rank, float* work, + lapack_int* lwork, lapack_int* iwork, lapack_int *info ); +void LAPACK_dgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a, + lapack_int* lda, double* b, lapack_int* ldb, double* s, + double* rcond, lapack_int* rank, double* work, + lapack_int* lwork, lapack_int* iwork, lapack_int *info ); +void LAPACK_cgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, float* s, + float* rcond, lapack_int* rank, lapack_complex_float* work, + lapack_int* lwork, float* rwork, lapack_int* iwork, + lapack_int *info ); +void LAPACK_zgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, double* s, + double* rcond, lapack_int* rank, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* iwork, lapack_int *info ); +void LAPACK_sgglse( lapack_int* m, lapack_int* n, lapack_int* p, float* a, + lapack_int* lda, float* b, lapack_int* ldb, float* c, + float* d, float* x, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dgglse( lapack_int* m, lapack_int* n, lapack_int* p, double* a, + lapack_int* lda, double* b, lapack_int* ldb, double* c, + double* d, double* x, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cgglse( lapack_int* m, lapack_int* n, lapack_int* p, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* c, lapack_complex_float* d, + lapack_complex_float* x, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zgglse( lapack_int* m, lapack_int* n, lapack_int* p, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* c, lapack_complex_double* d, + lapack_complex_double* x, lapack_complex_double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sggglm( lapack_int* n, lapack_int* m, lapack_int* p, float* a, + lapack_int* lda, float* b, lapack_int* ldb, float* d, + float* x, float* y, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dggglm( lapack_int* n, lapack_int* m, lapack_int* p, double* a, + lapack_int* lda, double* b, lapack_int* ldb, double* d, + double* x, double* y, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cggglm( lapack_int* n, lapack_int* m, lapack_int* p, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* d, lapack_complex_float* x, + lapack_complex_float* y, lapack_complex_float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_zggglm( lapack_int* n, lapack_int* m, lapack_int* p, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* d, lapack_complex_double* x, + lapack_complex_double* y, lapack_complex_double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_ssyev( char* jobz, char* uplo, lapack_int* n, float* a, + lapack_int* lda, float* w, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dsyev( char* jobz, char* uplo, lapack_int* n, double* a, + lapack_int* lda, double* w, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cheev( char* jobz, char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, float* w, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int *info ); +void LAPACK_zheev( char* jobz, char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, double* w, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int *info ); +void LAPACK_ssyevd( char* jobz, char* uplo, lapack_int* n, float* a, + lapack_int* lda, float* w, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_dsyevd( char* jobz, char* uplo, lapack_int* n, double* a, + lapack_int* lda, double* w, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_cheevd( char* jobz, char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, float* w, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_zheevd( char* jobz, char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, double* w, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* lrwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_ssyevx( char* jobz, char* range, char* uplo, lapack_int* n, + float* a, lapack_int* lda, float* vl, float* vu, + lapack_int* il, lapack_int* iu, float* abstol, + lapack_int* m, float* w, float* z, lapack_int* ldz, + float* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_dsyevx( char* jobz, char* range, char* uplo, lapack_int* n, + double* a, lapack_int* lda, double* vl, double* vu, + lapack_int* il, lapack_int* iu, double* abstol, + lapack_int* m, double* w, double* z, lapack_int* ldz, + double* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_cheevx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, float* vl, + float* vu, lapack_int* il, lapack_int* iu, float* abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int* ldz, lapack_complex_float* work, + lapack_int* lwork, float* rwork, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_zheevx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, double* vl, + double* vu, lapack_int* il, lapack_int* iu, double* abstol, + lapack_int* m, double* w, lapack_complex_double* z, + lapack_int* ldz, lapack_complex_double* work, + lapack_int* lwork, double* rwork, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_ssyevr( char* jobz, char* range, char* uplo, lapack_int* n, + float* a, lapack_int* lda, float* vl, float* vu, + lapack_int* il, lapack_int* iu, float* abstol, + lapack_int* m, float* w, float* z, lapack_int* ldz, + lapack_int* isuppz, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_dsyevr( char* jobz, char* range, char* uplo, lapack_int* n, + double* a, lapack_int* lda, double* vl, double* vu, + lapack_int* il, lapack_int* iu, double* abstol, + lapack_int* m, double* w, double* z, lapack_int* ldz, + lapack_int* isuppz, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_cheevr( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, float* vl, + float* vu, lapack_int* il, lapack_int* iu, float* abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int* ldz, lapack_int* isuppz, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_zheevr( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, double* vl, + double* vu, lapack_int* il, lapack_int* iu, double* abstol, + lapack_int* m, double* w, lapack_complex_double* z, + lapack_int* ldz, lapack_int* isuppz, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* lrwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_sspev( char* jobz, char* uplo, lapack_int* n, float* ap, float* w, + float* z, lapack_int* ldz, float* work, lapack_int *info ); +void LAPACK_dspev( char* jobz, char* uplo, lapack_int* n, double* ap, double* w, + double* z, lapack_int* ldz, double* work, lapack_int *info ); +void LAPACK_chpev( char* jobz, char* uplo, lapack_int* n, + lapack_complex_float* ap, float* w, lapack_complex_float* z, + lapack_int* ldz, lapack_complex_float* work, float* rwork, + lapack_int *info ); +void LAPACK_zhpev( char* jobz, char* uplo, lapack_int* n, + lapack_complex_double* ap, double* w, + lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_sspevd( char* jobz, char* uplo, lapack_int* n, float* ap, float* w, + float* z, lapack_int* ldz, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_dspevd( char* jobz, char* uplo, lapack_int* n, double* ap, + double* w, double* z, lapack_int* ldz, double* work, + lapack_int* lwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_chpevd( char* jobz, char* uplo, lapack_int* n, + lapack_complex_float* ap, float* w, lapack_complex_float* z, + lapack_int* ldz, lapack_complex_float* work, + lapack_int* lwork, float* rwork, lapack_int* lrwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_zhpevd( char* jobz, char* uplo, lapack_int* n, + lapack_complex_double* ap, double* w, + lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* lrwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_sspevx( char* jobz, char* range, char* uplo, lapack_int* n, + float* ap, float* vl, float* vu, lapack_int* il, + lapack_int* iu, float* abstol, lapack_int* m, float* w, + float* z, lapack_int* ldz, float* work, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_dspevx( char* jobz, char* range, char* uplo, lapack_int* n, + double* ap, double* vl, double* vu, lapack_int* il, + lapack_int* iu, double* abstol, lapack_int* m, double* w, + double* z, lapack_int* ldz, double* work, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_chpevx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_complex_float* ap, float* vl, float* vu, + lapack_int* il, lapack_int* iu, float* abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int* ldz, lapack_complex_float* work, float* rwork, + lapack_int* iwork, lapack_int* ifail, lapack_int *info ); +void LAPACK_zhpevx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_complex_double* ap, double* vl, double* vu, + lapack_int* il, lapack_int* iu, double* abstol, + lapack_int* m, double* w, lapack_complex_double* z, + lapack_int* ldz, lapack_complex_double* work, double* rwork, + lapack_int* iwork, lapack_int* ifail, lapack_int *info ); +void LAPACK_ssbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, + float* ab, lapack_int* ldab, float* w, float* z, + lapack_int* ldz, float* work, lapack_int *info ); +void LAPACK_dsbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, + double* ab, lapack_int* ldab, double* w, double* z, + lapack_int* ldz, double* work, lapack_int *info ); +void LAPACK_chbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, + lapack_complex_float* ab, lapack_int* ldab, float* w, + lapack_complex_float* z, lapack_int* ldz, + lapack_complex_float* work, float* rwork, lapack_int *info ); +void LAPACK_zhbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, + lapack_complex_double* ab, lapack_int* ldab, double* w, + lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_ssbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, + float* ab, lapack_int* ldab, float* w, float* z, + lapack_int* ldz, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_dsbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, + double* ab, lapack_int* ldab, double* w, double* z, + lapack_int* ldz, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_chbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, + lapack_complex_float* ab, lapack_int* ldab, float* w, + lapack_complex_float* z, lapack_int* ldz, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_zhbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, + lapack_complex_double* ab, lapack_int* ldab, double* w, + lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* lrwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_ssbevx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_int* kd, float* ab, lapack_int* ldab, float* q, + lapack_int* ldq, float* vl, float* vu, lapack_int* il, + lapack_int* iu, float* abstol, lapack_int* m, float* w, + float* z, lapack_int* ldz, float* work, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_dsbevx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_int* kd, double* ab, lapack_int* ldab, double* q, + lapack_int* ldq, double* vl, double* vu, lapack_int* il, + lapack_int* iu, double* abstol, lapack_int* m, double* w, + double* z, lapack_int* ldz, double* work, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_chbevx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_int* kd, lapack_complex_float* ab, lapack_int* ldab, + lapack_complex_float* q, lapack_int* ldq, float* vl, + float* vu, lapack_int* il, lapack_int* iu, float* abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int* ldz, lapack_complex_float* work, float* rwork, + lapack_int* iwork, lapack_int* ifail, lapack_int *info ); +void LAPACK_zhbevx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_int* kd, lapack_complex_double* ab, lapack_int* ldab, + lapack_complex_double* q, lapack_int* ldq, double* vl, + double* vu, lapack_int* il, lapack_int* iu, double* abstol, + lapack_int* m, double* w, lapack_complex_double* z, + lapack_int* ldz, lapack_complex_double* work, double* rwork, + lapack_int* iwork, lapack_int* ifail, lapack_int *info ); +void LAPACK_sstev( char* jobz, lapack_int* n, float* d, float* e, float* z, + lapack_int* ldz, float* work, lapack_int *info ); +void LAPACK_dstev( char* jobz, lapack_int* n, double* d, double* e, double* z, + lapack_int* ldz, double* work, lapack_int *info ); +void LAPACK_sstevd( char* jobz, lapack_int* n, float* d, float* e, float* z, + lapack_int* ldz, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_dstevd( char* jobz, lapack_int* n, double* d, double* e, double* z, + lapack_int* ldz, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_sstevx( char* jobz, char* range, lapack_int* n, float* d, float* e, + float* vl, float* vu, lapack_int* il, lapack_int* iu, + float* abstol, lapack_int* m, float* w, float* z, + lapack_int* ldz, float* work, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_dstevx( char* jobz, char* range, lapack_int* n, double* d, + double* e, double* vl, double* vu, lapack_int* il, + lapack_int* iu, double* abstol, lapack_int* m, double* w, + double* z, lapack_int* ldz, double* work, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_sstevr( char* jobz, char* range, lapack_int* n, float* d, float* e, + float* vl, float* vu, lapack_int* il, lapack_int* iu, + float* abstol, lapack_int* m, float* w, float* z, + lapack_int* ldz, lapack_int* isuppz, float* work, + lapack_int* lwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_dstevr( char* jobz, char* range, lapack_int* n, double* d, + double* e, double* vl, double* vu, lapack_int* il, + lapack_int* iu, double* abstol, lapack_int* m, double* w, + double* z, lapack_int* ldz, lapack_int* isuppz, + double* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_sgees( char* jobvs, char* sort, LAPACK_S_SELECT2 select, + lapack_int* n, float* a, lapack_int* lda, lapack_int* sdim, + float* wr, float* wi, float* vs, lapack_int* ldvs, + float* work, lapack_int* lwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_dgees( char* jobvs, char* sort, LAPACK_D_SELECT2 select, + lapack_int* n, double* a, lapack_int* lda, lapack_int* sdim, + double* wr, double* wi, double* vs, lapack_int* ldvs, + double* work, lapack_int* lwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_cgees( char* jobvs, char* sort, LAPACK_C_SELECT1 select, + lapack_int* n, lapack_complex_float* a, lapack_int* lda, + lapack_int* sdim, lapack_complex_float* w, + lapack_complex_float* vs, lapack_int* ldvs, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_logical* bwork, lapack_int *info ); +void LAPACK_zgees( char* jobvs, char* sort, LAPACK_Z_SELECT1 select, + lapack_int* n, lapack_complex_double* a, lapack_int* lda, + lapack_int* sdim, lapack_complex_double* w, + lapack_complex_double* vs, lapack_int* ldvs, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_logical* bwork, lapack_int *info ); +void LAPACK_sgeesx( char* jobvs, char* sort, LAPACK_S_SELECT2 select, + char* sense, lapack_int* n, float* a, lapack_int* lda, + lapack_int* sdim, float* wr, float* wi, float* vs, + lapack_int* ldvs, float* rconde, float* rcondv, float* work, + lapack_int* lwork, lapack_int* iwork, lapack_int* liwork, + lapack_logical* bwork, lapack_int *info ); +void LAPACK_dgeesx( char* jobvs, char* sort, LAPACK_D_SELECT2 select, + char* sense, lapack_int* n, double* a, lapack_int* lda, + lapack_int* sdim, double* wr, double* wi, double* vs, + lapack_int* ldvs, double* rconde, double* rcondv, + double* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_cgeesx( char* jobvs, char* sort, LAPACK_C_SELECT1 select, + char* sense, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int* sdim, lapack_complex_float* w, + lapack_complex_float* vs, lapack_int* ldvs, float* rconde, + float* rcondv, lapack_complex_float* work, + lapack_int* lwork, float* rwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_zgeesx( char* jobvs, char* sort, LAPACK_Z_SELECT1 select, + char* sense, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int* sdim, lapack_complex_double* w, + lapack_complex_double* vs, lapack_int* ldvs, double* rconde, + double* rcondv, lapack_complex_double* work, + lapack_int* lwork, double* rwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_sgeev( char* jobvl, char* jobvr, lapack_int* n, float* a, + lapack_int* lda, float* wr, float* wi, float* vl, + lapack_int* ldvl, float* vr, lapack_int* ldvr, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dgeev( char* jobvl, char* jobvr, lapack_int* n, double* a, + lapack_int* lda, double* wr, double* wi, double* vl, + lapack_int* ldvl, double* vr, lapack_int* ldvr, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cgeev( char* jobvl, char* jobvr, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* w, lapack_complex_float* vl, + lapack_int* ldvl, lapack_complex_float* vr, lapack_int* ldvr, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int *info ); +void LAPACK_zgeev( char* jobvl, char* jobvr, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* w, lapack_complex_double* vl, + lapack_int* ldvl, lapack_complex_double* vr, + lapack_int* ldvr, lapack_complex_double* work, + lapack_int* lwork, double* rwork, lapack_int *info ); +void LAPACK_sgeevx( char* balanc, char* jobvl, char* jobvr, char* sense, + lapack_int* n, float* a, lapack_int* lda, float* wr, + float* wi, float* vl, lapack_int* ldvl, float* vr, + lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi, + float* scale, float* abnrm, float* rconde, float* rcondv, + float* work, lapack_int* lwork, lapack_int* iwork, + lapack_int *info ); +void LAPACK_dgeevx( char* balanc, char* jobvl, char* jobvr, char* sense, + lapack_int* n, double* a, lapack_int* lda, double* wr, + double* wi, double* vl, lapack_int* ldvl, double* vr, + lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi, + double* scale, double* abnrm, double* rconde, + double* rcondv, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_cgeevx( char* balanc, char* jobvl, char* jobvr, char* sense, + lapack_int* n, lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* w, lapack_complex_float* vl, + lapack_int* ldvl, lapack_complex_float* vr, + lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi, + float* scale, float* abnrm, float* rconde, float* rcondv, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int *info ); +void LAPACK_zgeevx( char* balanc, char* jobvl, char* jobvr, char* sense, + lapack_int* n, lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* w, lapack_complex_double* vl, + lapack_int* ldvl, lapack_complex_double* vr, + lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi, + double* scale, double* abnrm, double* rconde, + double* rcondv, lapack_complex_double* work, + lapack_int* lwork, double* rwork, lapack_int *info ); +void LAPACK_sgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n, + float* a, lapack_int* lda, float* s, float* u, + lapack_int* ldu, float* vt, lapack_int* ldvt, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_dgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n, + double* a, lapack_int* lda, double* s, double* u, + lapack_int* ldu, double* vt, lapack_int* ldvt, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, float* s, + lapack_complex_float* u, lapack_int* ldu, + lapack_complex_float* vt, lapack_int* ldvt, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int *info ); +void LAPACK_zgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, double* s, + lapack_complex_double* u, lapack_int* ldu, + lapack_complex_double* vt, lapack_int* ldvt, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int *info ); +void LAPACK_sgesdd( char* jobz, lapack_int* m, lapack_int* n, float* a, + lapack_int* lda, float* s, float* u, lapack_int* ldu, + float* vt, lapack_int* ldvt, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_dgesdd( char* jobz, lapack_int* m, lapack_int* n, double* a, + lapack_int* lda, double* s, double* u, lapack_int* ldu, + double* vt, lapack_int* ldvt, double* work, + lapack_int* lwork, lapack_int* iwork, lapack_int *info ); +void LAPACK_cgesdd( char* jobz, lapack_int* m, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, float* s, + lapack_complex_float* u, lapack_int* ldu, + lapack_complex_float* vt, lapack_int* ldvt, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_zgesdd( char* jobz, lapack_int* m, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, double* s, + lapack_complex_double* u, lapack_int* ldu, + lapack_complex_double* vt, lapack_int* ldvt, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* iwork, lapack_int *info ); +void LAPACK_dgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt, + char* jobp, lapack_int* m, lapack_int* n, double* a, + lapack_int* lda, double* sva, double* u, lapack_int* ldu, + double* v, lapack_int* ldv, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_sgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt, + char* jobp, lapack_int* m, lapack_int* n, float* a, + lapack_int* lda, float* sva, float* u, lapack_int* ldu, + float* v, lapack_int* ldv, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_dgesvj( char* joba, char* jobu, char* jobv, lapack_int* m, + lapack_int* n, double* a, lapack_int* lda, double* sva, + lapack_int* mv, double* v, lapack_int* ldv, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sgesvj( char* joba, char* jobu, char* jobv, lapack_int* m, + lapack_int* n, float* a, lapack_int* lda, float* sva, + lapack_int* mv, float* v, lapack_int* ldv, float* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_sggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, + float* a, lapack_int* lda, float* b, lapack_int* ldb, + float* alpha, float* beta, float* u, lapack_int* ldu, + float* v, lapack_int* ldv, float* q, lapack_int* ldq, + float* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_dggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, + double* a, lapack_int* lda, double* b, lapack_int* ldb, + double* alpha, double* beta, double* u, lapack_int* ldu, + double* v, lapack_int* ldv, double* q, lapack_int* ldq, + double* work, lapack_int* iwork, lapack_int *info ); +void LAPACK_cggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, float* alpha, + float* beta, lapack_complex_float* u, lapack_int* ldu, + lapack_complex_float* v, lapack_int* ldv, + lapack_complex_float* q, lapack_int* ldq, + lapack_complex_float* work, float* rwork, lapack_int* iwork, + lapack_int *info ); +void LAPACK_zggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m, + lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, double* alpha, + double* beta, lapack_complex_double* u, lapack_int* ldu, + lapack_complex_double* v, lapack_int* ldv, + lapack_complex_double* q, lapack_int* ldq, + lapack_complex_double* work, double* rwork, + lapack_int* iwork, lapack_int *info ); +void LAPACK_ssygv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + float* a, lapack_int* lda, float* b, lapack_int* ldb, + float* w, float* work, lapack_int* lwork, lapack_int *info ); +void LAPACK_dsygv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + double* a, lapack_int* lda, double* b, lapack_int* ldb, + double* w, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_chegv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, float* w, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int *info ); +void LAPACK_zhegv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, double* w, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int *info ); +void LAPACK_ssygvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + float* a, lapack_int* lda, float* b, lapack_int* ldb, + float* w, float* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_dsygvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + double* a, lapack_int* lda, double* b, lapack_int* ldb, + double* w, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_chegvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, float* w, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_zhegvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, double* w, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* lrwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_ssygvx( lapack_int* itype, char* jobz, char* range, char* uplo, + lapack_int* n, float* a, lapack_int* lda, float* b, + lapack_int* ldb, float* vl, float* vu, lapack_int* il, + lapack_int* iu, float* abstol, lapack_int* m, float* w, + float* z, lapack_int* ldz, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* ifail, lapack_int *info ); +void LAPACK_dsygvx( lapack_int* itype, char* jobz, char* range, char* uplo, + lapack_int* n, double* a, lapack_int* lda, double* b, + lapack_int* ldb, double* vl, double* vu, lapack_int* il, + lapack_int* iu, double* abstol, lapack_int* m, double* w, + double* z, lapack_int* ldz, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* ifail, lapack_int *info ); +void LAPACK_chegvx( lapack_int* itype, char* jobz, char* range, char* uplo, + lapack_int* n, lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, float* vl, + float* vu, lapack_int* il, lapack_int* iu, float* abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int* ldz, lapack_complex_float* work, + lapack_int* lwork, float* rwork, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_zhegvx( lapack_int* itype, char* jobz, char* range, char* uplo, + lapack_int* n, lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, double* vl, + double* vu, lapack_int* il, lapack_int* iu, double* abstol, + lapack_int* m, double* w, lapack_complex_double* z, + lapack_int* ldz, lapack_complex_double* work, + lapack_int* lwork, double* rwork, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_sspgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + float* ap, float* bp, float* w, float* z, lapack_int* ldz, + float* work, lapack_int *info ); +void LAPACK_dspgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + double* ap, double* bp, double* w, double* z, + lapack_int* ldz, double* work, lapack_int *info ); +void LAPACK_chpgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + lapack_complex_float* ap, lapack_complex_float* bp, float* w, + lapack_complex_float* z, lapack_int* ldz, + lapack_complex_float* work, float* rwork, lapack_int *info ); +void LAPACK_zhpgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + lapack_complex_double* ap, lapack_complex_double* bp, + double* w, lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_sspgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + float* ap, float* bp, float* w, float* z, lapack_int* ldz, + float* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_dspgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + double* ap, double* bp, double* w, double* z, + lapack_int* ldz, double* work, lapack_int* lwork, + lapack_int* iwork, lapack_int* liwork, lapack_int *info ); +void LAPACK_chpgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + lapack_complex_float* ap, lapack_complex_float* bp, + float* w, lapack_complex_float* z, lapack_int* ldz, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_zhpgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, + lapack_complex_double* ap, lapack_complex_double* bp, + double* w, lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* lrwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_sspgvx( lapack_int* itype, char* jobz, char* range, char* uplo, + lapack_int* n, float* ap, float* bp, float* vl, float* vu, + lapack_int* il, lapack_int* iu, float* abstol, + lapack_int* m, float* w, float* z, lapack_int* ldz, + float* work, lapack_int* iwork, lapack_int* ifail, + lapack_int *info ); +void LAPACK_dspgvx( lapack_int* itype, char* jobz, char* range, char* uplo, + lapack_int* n, double* ap, double* bp, double* vl, + double* vu, lapack_int* il, lapack_int* iu, double* abstol, + lapack_int* m, double* w, double* z, lapack_int* ldz, + double* work, lapack_int* iwork, lapack_int* ifail, + lapack_int *info ); +void LAPACK_chpgvx( lapack_int* itype, char* jobz, char* range, char* uplo, + lapack_int* n, lapack_complex_float* ap, + lapack_complex_float* bp, float* vl, float* vu, + lapack_int* il, lapack_int* iu, float* abstol, + lapack_int* m, float* w, lapack_complex_float* z, + lapack_int* ldz, lapack_complex_float* work, float* rwork, + lapack_int* iwork, lapack_int* ifail, lapack_int *info ); +void LAPACK_zhpgvx( lapack_int* itype, char* jobz, char* range, char* uplo, + lapack_int* n, lapack_complex_double* ap, + lapack_complex_double* bp, double* vl, double* vu, + lapack_int* il, lapack_int* iu, double* abstol, + lapack_int* m, double* w, lapack_complex_double* z, + lapack_int* ldz, lapack_complex_double* work, double* rwork, + lapack_int* iwork, lapack_int* ifail, lapack_int *info ); +void LAPACK_ssbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, float* ab, lapack_int* ldab, float* bb, + lapack_int* ldbb, float* w, float* z, lapack_int* ldz, + float* work, lapack_int *info ); +void LAPACK_dsbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, double* ab, lapack_int* ldab, double* bb, + lapack_int* ldbb, double* w, double* z, lapack_int* ldz, + double* work, lapack_int *info ); +void LAPACK_chbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab, + lapack_complex_float* bb, lapack_int* ldbb, float* w, + lapack_complex_float* z, lapack_int* ldz, + lapack_complex_float* work, float* rwork, lapack_int *info ); +void LAPACK_zhbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab, + lapack_complex_double* bb, lapack_int* ldbb, double* w, + lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, double* rwork, + lapack_int *info ); +void LAPACK_ssbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, float* ab, lapack_int* ldab, float* bb, + lapack_int* ldbb, float* w, float* z, lapack_int* ldz, + float* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_dsbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, double* ab, lapack_int* ldab, double* bb, + lapack_int* ldbb, double* w, double* z, lapack_int* ldz, + double* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_chbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab, + lapack_complex_float* bb, lapack_int* ldbb, float* w, + lapack_complex_float* z, lapack_int* ldz, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, + lapack_int *info ); +void LAPACK_zhbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, + lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab, + lapack_complex_double* bb, lapack_int* ldbb, double* w, + lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* lrwork, lapack_int* iwork, + lapack_int* liwork, lapack_int *info ); +void LAPACK_ssbgvx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_int* ka, lapack_int* kb, float* ab, lapack_int* ldab, + float* bb, lapack_int* ldbb, float* q, lapack_int* ldq, + float* vl, float* vu, lapack_int* il, lapack_int* iu, + float* abstol, lapack_int* m, float* w, float* z, + lapack_int* ldz, float* work, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_dsbgvx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_int* ka, lapack_int* kb, double* ab, + lapack_int* ldab, double* bb, lapack_int* ldbb, double* q, + lapack_int* ldq, double* vl, double* vu, lapack_int* il, + lapack_int* iu, double* abstol, lapack_int* m, double* w, + double* z, lapack_int* ldz, double* work, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_chbgvx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_int* ka, lapack_int* kb, lapack_complex_float* ab, + lapack_int* ldab, lapack_complex_float* bb, + lapack_int* ldbb, lapack_complex_float* q, lapack_int* ldq, + float* vl, float* vu, lapack_int* il, lapack_int* iu, + float* abstol, lapack_int* m, float* w, + lapack_complex_float* z, lapack_int* ldz, + lapack_complex_float* work, float* rwork, lapack_int* iwork, + lapack_int* ifail, lapack_int *info ); +void LAPACK_zhbgvx( char* jobz, char* range, char* uplo, lapack_int* n, + lapack_int* ka, lapack_int* kb, lapack_complex_double* ab, + lapack_int* ldab, lapack_complex_double* bb, + lapack_int* ldbb, lapack_complex_double* q, lapack_int* ldq, + double* vl, double* vu, lapack_int* il, lapack_int* iu, + double* abstol, lapack_int* m, double* w, + lapack_complex_double* z, lapack_int* ldz, + lapack_complex_double* work, double* rwork, + lapack_int* iwork, lapack_int* ifail, lapack_int *info ); +void LAPACK_sgges( char* jobvsl, char* jobvsr, char* sort, + LAPACK_S_SELECT3 selctg, lapack_int* n, float* a, + lapack_int* lda, float* b, lapack_int* ldb, lapack_int* sdim, + float* alphar, float* alphai, float* beta, float* vsl, + lapack_int* ldvsl, float* vsr, lapack_int* ldvsr, + float* work, lapack_int* lwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_dgges( char* jobvsl, char* jobvsr, char* sort, + LAPACK_D_SELECT3 selctg, lapack_int* n, double* a, + lapack_int* lda, double* b, lapack_int* ldb, + lapack_int* sdim, double* alphar, double* alphai, + double* beta, double* vsl, lapack_int* ldvsl, double* vsr, + lapack_int* ldvsr, double* work, lapack_int* lwork, + lapack_logical* bwork, lapack_int *info ); +void LAPACK_cgges( char* jobvsl, char* jobvsr, char* sort, + LAPACK_C_SELECT2 selctg, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, lapack_int* sdim, + lapack_complex_float* alpha, lapack_complex_float* beta, + lapack_complex_float* vsl, lapack_int* ldvsl, + lapack_complex_float* vsr, lapack_int* ldvsr, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_logical* bwork, lapack_int *info ); +void LAPACK_zgges( char* jobvsl, char* jobvsr, char* sort, + LAPACK_Z_SELECT2 selctg, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim, + lapack_complex_double* alpha, lapack_complex_double* beta, + lapack_complex_double* vsl, lapack_int* ldvsl, + lapack_complex_double* vsr, lapack_int* ldvsr, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_logical* bwork, lapack_int *info ); +void LAPACK_sggesx( char* jobvsl, char* jobvsr, char* sort, + LAPACK_S_SELECT3 selctg, char* sense, lapack_int* n, + float* a, lapack_int* lda, float* b, lapack_int* ldb, + lapack_int* sdim, float* alphar, float* alphai, float* beta, + float* vsl, lapack_int* ldvsl, float* vsr, + lapack_int* ldvsr, float* rconde, float* rcondv, + float* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_dggesx( char* jobvsl, char* jobvsr, char* sort, + LAPACK_D_SELECT3 selctg, char* sense, lapack_int* n, + double* a, lapack_int* lda, double* b, lapack_int* ldb, + lapack_int* sdim, double* alphar, double* alphai, + double* beta, double* vsl, lapack_int* ldvsl, double* vsr, + lapack_int* ldvsr, double* rconde, double* rcondv, + double* work, lapack_int* lwork, lapack_int* iwork, + lapack_int* liwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_cggesx( char* jobvsl, char* jobvsr, char* sort, + LAPACK_C_SELECT2 selctg, char* sense, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, lapack_int* sdim, + lapack_complex_float* alpha, lapack_complex_float* beta, + lapack_complex_float* vsl, lapack_int* ldvsl, + lapack_complex_float* vsr, lapack_int* ldvsr, float* rconde, + float* rcondv, lapack_complex_float* work, + lapack_int* lwork, float* rwork, lapack_int* iwork, + lapack_int* liwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_zggesx( char* jobvsl, char* jobvsr, char* sort, + LAPACK_Z_SELECT2 selctg, char* sense, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim, + lapack_complex_double* alpha, lapack_complex_double* beta, + lapack_complex_double* vsl, lapack_int* ldvsl, + lapack_complex_double* vsr, lapack_int* ldvsr, + double* rconde, double* rcondv, lapack_complex_double* work, + lapack_int* lwork, double* rwork, lapack_int* iwork, + lapack_int* liwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_sggev( char* jobvl, char* jobvr, lapack_int* n, float* a, + lapack_int* lda, float* b, lapack_int* ldb, float* alphar, + float* alphai, float* beta, float* vl, lapack_int* ldvl, + float* vr, lapack_int* ldvr, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dggev( char* jobvl, char* jobvr, lapack_int* n, double* a, + lapack_int* lda, double* b, lapack_int* ldb, double* alphar, + double* alphai, double* beta, double* vl, lapack_int* ldvl, + double* vr, lapack_int* ldvr, double* work, + lapack_int* lwork, lapack_int *info ); +void LAPACK_cggev( char* jobvl, char* jobvr, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* alpha, lapack_complex_float* beta, + lapack_complex_float* vl, lapack_int* ldvl, + lapack_complex_float* vr, lapack_int* ldvr, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int *info ); +void LAPACK_zggev( char* jobvl, char* jobvr, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* alpha, lapack_complex_double* beta, + lapack_complex_double* vl, lapack_int* ldvl, + lapack_complex_double* vr, lapack_int* ldvr, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int *info ); +void LAPACK_sggevx( char* balanc, char* jobvl, char* jobvr, char* sense, + lapack_int* n, float* a, lapack_int* lda, float* b, + lapack_int* ldb, float* alphar, float* alphai, float* beta, + float* vl, lapack_int* ldvl, float* vr, lapack_int* ldvr, + lapack_int* ilo, lapack_int* ihi, float* lscale, + float* rscale, float* abnrm, float* bbnrm, float* rconde, + float* rcondv, float* work, lapack_int* lwork, + lapack_int* iwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_dggevx( char* balanc, char* jobvl, char* jobvr, char* sense, + lapack_int* n, double* a, lapack_int* lda, double* b, + lapack_int* ldb, double* alphar, double* alphai, + double* beta, double* vl, lapack_int* ldvl, double* vr, + lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi, + double* lscale, double* rscale, double* abnrm, + double* bbnrm, double* rconde, double* rcondv, double* work, + lapack_int* lwork, lapack_int* iwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_cggevx( char* balanc, char* jobvl, char* jobvr, char* sense, + lapack_int* n, lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* alpha, lapack_complex_float* beta, + lapack_complex_float* vl, lapack_int* ldvl, + lapack_complex_float* vr, lapack_int* ldvr, lapack_int* ilo, + lapack_int* ihi, float* lscale, float* rscale, float* abnrm, + float* bbnrm, float* rconde, float* rcondv, + lapack_complex_float* work, lapack_int* lwork, float* rwork, + lapack_int* iwork, lapack_logical* bwork, + lapack_int *info ); +void LAPACK_zggevx( char* balanc, char* jobvl, char* jobvr, char* sense, + lapack_int* n, lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* alpha, lapack_complex_double* beta, + lapack_complex_double* vl, lapack_int* ldvl, + lapack_complex_double* vr, lapack_int* ldvr, + lapack_int* ilo, lapack_int* ihi, double* lscale, + double* rscale, double* abnrm, double* bbnrm, + double* rconde, double* rcondv, lapack_complex_double* work, + lapack_int* lwork, double* rwork, lapack_int* iwork, + lapack_logical* bwork, lapack_int *info ); +void LAPACK_dsfrk( char* transr, char* uplo, char* trans, lapack_int* n, + lapack_int* k, double* alpha, const double* a, + lapack_int* lda, double* beta, double* c ); +void LAPACK_ssfrk( char* transr, char* uplo, char* trans, lapack_int* n, + lapack_int* k, float* alpha, const float* a, lapack_int* lda, + float* beta, float* c ); +void LAPACK_zhfrk( char* transr, char* uplo, char* trans, lapack_int* n, + lapack_int* k, double* alpha, const lapack_complex_double* a, + lapack_int* lda, double* beta, lapack_complex_double* c ); +void LAPACK_chfrk( char* transr, char* uplo, char* trans, lapack_int* n, + lapack_int* k, float* alpha, const lapack_complex_float* a, + lapack_int* lda, float* beta, lapack_complex_float* c ); +void LAPACK_dtfsm( char* transr, char* side, char* uplo, char* trans, + char* diag, lapack_int* m, lapack_int* n, double* alpha, + const double* a, double* b, lapack_int* ldb ); +void LAPACK_stfsm( char* transr, char* side, char* uplo, char* trans, + char* diag, lapack_int* m, lapack_int* n, float* alpha, + const float* a, float* b, lapack_int* ldb ); +void LAPACK_ztfsm( char* transr, char* side, char* uplo, char* trans, + char* diag, lapack_int* m, lapack_int* n, + lapack_complex_double* alpha, const lapack_complex_double* a, + lapack_complex_double* b, lapack_int* ldb ); +void LAPACK_ctfsm( char* transr, char* side, char* uplo, char* trans, + char* diag, lapack_int* m, lapack_int* n, + lapack_complex_float* alpha, const lapack_complex_float* a, + lapack_complex_float* b, lapack_int* ldb ); +void LAPACK_dtfttp( char* transr, char* uplo, lapack_int* n, const double* arf, + double* ap, lapack_int *info ); +void LAPACK_stfttp( char* transr, char* uplo, lapack_int* n, const float* arf, + float* ap, lapack_int *info ); +void LAPACK_ztfttp( char* transr, char* uplo, lapack_int* n, + const lapack_complex_double* arf, lapack_complex_double* ap, + lapack_int *info ); +void LAPACK_ctfttp( char* transr, char* uplo, lapack_int* n, + const lapack_complex_float* arf, lapack_complex_float* ap, + lapack_int *info ); +void LAPACK_dtfttr( char* transr, char* uplo, lapack_int* n, const double* arf, + double* a, lapack_int* lda, lapack_int *info ); +void LAPACK_stfttr( char* transr, char* uplo, lapack_int* n, const float* arf, + float* a, lapack_int* lda, lapack_int *info ); +void LAPACK_ztfttr( char* transr, char* uplo, lapack_int* n, + const lapack_complex_double* arf, lapack_complex_double* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_ctfttr( char* transr, char* uplo, lapack_int* n, + const lapack_complex_float* arf, lapack_complex_float* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_dtpttf( char* transr, char* uplo, lapack_int* n, const double* ap, + double* arf, lapack_int *info ); +void LAPACK_stpttf( char* transr, char* uplo, lapack_int* n, const float* ap, + float* arf, lapack_int *info ); +void LAPACK_ztpttf( char* transr, char* uplo, lapack_int* n, + const lapack_complex_double* ap, lapack_complex_double* arf, + lapack_int *info ); +void LAPACK_ctpttf( char* transr, char* uplo, lapack_int* n, + const lapack_complex_float* ap, lapack_complex_float* arf, + lapack_int *info ); +void LAPACK_dtpttr( char* uplo, lapack_int* n, const double* ap, double* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_stpttr( char* uplo, lapack_int* n, const float* ap, float* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_ztpttr( char* uplo, lapack_int* n, const lapack_complex_double* ap, + lapack_complex_double* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_ctpttr( char* uplo, lapack_int* n, const lapack_complex_float* ap, + lapack_complex_float* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_dtrttf( char* transr, char* uplo, lapack_int* n, const double* a, + lapack_int* lda, double* arf, lapack_int *info ); +void LAPACK_strttf( char* transr, char* uplo, lapack_int* n, const float* a, + lapack_int* lda, float* arf, lapack_int *info ); +void LAPACK_ztrttf( char* transr, char* uplo, lapack_int* n, + const lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* arf, lapack_int *info ); +void LAPACK_ctrttf( char* transr, char* uplo, lapack_int* n, + const lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* arf, lapack_int *info ); +void LAPACK_dtrttp( char* uplo, lapack_int* n, const double* a, lapack_int* lda, + double* ap, lapack_int *info ); +void LAPACK_strttp( char* uplo, lapack_int* n, const float* a, lapack_int* lda, + float* ap, lapack_int *info ); +void LAPACK_ztrttp( char* uplo, lapack_int* n, const lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* ap, + lapack_int *info ); +void LAPACK_ctrttp( char* uplo, lapack_int* n, const lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* ap, + lapack_int *info ); +void LAPACK_sgeqrfp( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* tau, float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_dgeqrfp( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* tau, double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_cgeqrfp( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_zgeqrfp( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* lwork, + lapack_int *info ); +void LAPACK_clacgv( lapack_int* n, lapack_complex_float* x, lapack_int* incx ); +void LAPACK_zlacgv( lapack_int* n, lapack_complex_double* x, lapack_int* incx ); +void LAPACK_slarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n, + float* x ); +void LAPACK_dlarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n, + double* x ); +void LAPACK_clarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n, + lapack_complex_float* x ); +void LAPACK_zlarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n, + lapack_complex_double* x ); +void LAPACK_sgeqr2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* tau, float* work, lapack_int *info ); +void LAPACK_dgeqr2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* tau, double* work, lapack_int *info ); +void LAPACK_cgeqr2( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zgeqr2( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_slacpy( char* uplo, lapack_int* m, lapack_int* n, const float* a, + lapack_int* lda, float* b, lapack_int* ldb ); +void LAPACK_dlacpy( char* uplo, lapack_int* m, lapack_int* n, const double* a, + lapack_int* lda, double* b, lapack_int* ldb ); +void LAPACK_clacpy( char* uplo, lapack_int* m, lapack_int* n, + const lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb ); +void LAPACK_zlacpy( char* uplo, lapack_int* m, lapack_int* n, + const lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb ); +void LAPACK_sgetf2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_dgetf2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + lapack_int* ipiv, lapack_int *info ); +void LAPACK_cgetf2( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int* ipiv, lapack_int *info ); +void LAPACK_zgetf2( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int* ipiv, lapack_int *info ); +void LAPACK_slaswp( lapack_int* n, float* a, lapack_int* lda, lapack_int* k1, + lapack_int* k2, const lapack_int* ipiv, lapack_int* incx ); +void LAPACK_dlaswp( lapack_int* n, double* a, lapack_int* lda, lapack_int* k1, + lapack_int* k2, const lapack_int* ipiv, lapack_int* incx ); +void LAPACK_claswp( lapack_int* n, lapack_complex_float* a, lapack_int* lda, + lapack_int* k1, lapack_int* k2, const lapack_int* ipiv, + lapack_int* incx ); +void LAPACK_zlaswp( lapack_int* n, lapack_complex_double* a, lapack_int* lda, + lapack_int* k1, lapack_int* k2, const lapack_int* ipiv, + lapack_int* incx ); +float LAPACK_slange( char* norm, lapack_int* m, lapack_int* n, const float* a, + lapack_int* lda, float* work ); +double LAPACK_dlange( char* norm, lapack_int* m, lapack_int* n, const double* a, + lapack_int* lda, double* work ); +float LAPACK_clange( char* norm, lapack_int* m, lapack_int* n, + const lapack_complex_float* a, lapack_int* lda, float* work ); +double LAPACK_zlange( char* norm, lapack_int* m, lapack_int* n, + const lapack_complex_double* a, lapack_int* lda, double* work ); +float LAPACK_clanhe( char* norm, char* uplo, lapack_int* n, + const lapack_complex_float* a, lapack_int* lda, float* work ); +double LAPACK_zlanhe( char* norm, char* uplo, lapack_int* n, + const lapack_complex_double* a, lapack_int* lda, double* work ); +float LAPACK_slansy( char* norm, char* uplo, lapack_int* n, const float* a, + lapack_int* lda, float* work ); +double LAPACK_dlansy( char* norm, char* uplo, lapack_int* n, const double* a, + lapack_int* lda, double* work ); +float LAPACK_clansy( char* norm, char* uplo, lapack_int* n, + const lapack_complex_float* a, lapack_int* lda, float* work ); +double LAPACK_zlansy( char* norm, char* uplo, lapack_int* n, + const lapack_complex_double* a, lapack_int* lda, double* work ); +float LAPACK_slantr( char* norm, char* uplo, char* diag, lapack_int* m, + lapack_int* n, const float* a, lapack_int* lda, float* work ); +double LAPACK_dlantr( char* norm, char* uplo, char* diag, lapack_int* m, + lapack_int* n, const double* a, lapack_int* lda, double* work ); +float LAPACK_clantr( char* norm, char* uplo, char* diag, lapack_int* m, + lapack_int* n, const lapack_complex_float* a, lapack_int* lda, + float* work ); +double LAPACK_zlantr( char* norm, char* uplo, char* diag, lapack_int* m, + lapack_int* n, const lapack_complex_double* a, lapack_int* lda, + double* work ); +float LAPACK_slamch( char* cmach ); +double LAPACK_dlamch( char* cmach ); +void LAPACK_sgelq2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* tau, float* work, lapack_int *info ); +void LAPACK_dgelq2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* tau, double* work, lapack_int *info ); +void LAPACK_cgelq2( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zgelq2( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_slarfb( char* side, char* trans, char* direct, char* storev, + lapack_int* m, lapack_int* n, lapack_int* k, const float* v, + lapack_int* ldv, const float* t, lapack_int* ldt, float* c, + lapack_int* ldc, float* work, lapack_int* ldwork ); +void LAPACK_dlarfb( char* side, char* trans, char* direct, char* storev, + lapack_int* m, lapack_int* n, lapack_int* k, + const double* v, lapack_int* ldv, const double* t, + lapack_int* ldt, double* c, lapack_int* ldc, double* work, + lapack_int* ldwork ); +void LAPACK_clarfb( char* side, char* trans, char* direct, char* storev, + lapack_int* m, lapack_int* n, lapack_int* k, + const lapack_complex_float* v, lapack_int* ldv, + const lapack_complex_float* t, lapack_int* ldt, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work, lapack_int* ldwork ); +void LAPACK_zlarfb( char* side, char* trans, char* direct, char* storev, + lapack_int* m, lapack_int* n, lapack_int* k, + const lapack_complex_double* v, lapack_int* ldv, + const lapack_complex_double* t, lapack_int* ldt, + lapack_complex_double* c, lapack_int* ldc, + lapack_complex_double* work, lapack_int* ldwork ); +void LAPACK_slarfg( lapack_int* n, float* alpha, float* x, lapack_int* incx, + float* tau ); +void LAPACK_dlarfg( lapack_int* n, double* alpha, double* x, lapack_int* incx, + double* tau ); +void LAPACK_clarfg( lapack_int* n, lapack_complex_float* alpha, + lapack_complex_float* x, lapack_int* incx, + lapack_complex_float* tau ); +void LAPACK_zlarfg( lapack_int* n, lapack_complex_double* alpha, + lapack_complex_double* x, lapack_int* incx, + lapack_complex_double* tau ); +void LAPACK_slarft( char* direct, char* storev, lapack_int* n, lapack_int* k, + const float* v, lapack_int* ldv, const float* tau, float* t, + lapack_int* ldt ); +void LAPACK_dlarft( char* direct, char* storev, lapack_int* n, lapack_int* k, + const double* v, lapack_int* ldv, const double* tau, + double* t, lapack_int* ldt ); +void LAPACK_clarft( char* direct, char* storev, lapack_int* n, lapack_int* k, + const lapack_complex_float* v, lapack_int* ldv, + const lapack_complex_float* tau, lapack_complex_float* t, + lapack_int* ldt ); +void LAPACK_zlarft( char* direct, char* storev, lapack_int* n, lapack_int* k, + const lapack_complex_double* v, lapack_int* ldv, + const lapack_complex_double* tau, lapack_complex_double* t, + lapack_int* ldt ); +void LAPACK_slarfx( char* side, lapack_int* m, lapack_int* n, const float* v, + float* tau, float* c, lapack_int* ldc, float* work ); +void LAPACK_dlarfx( char* side, lapack_int* m, lapack_int* n, const double* v, + double* tau, double* c, lapack_int* ldc, double* work ); +void LAPACK_clarfx( char* side, lapack_int* m, lapack_int* n, + const lapack_complex_float* v, lapack_complex_float* tau, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work ); +void LAPACK_zlarfx( char* side, lapack_int* m, lapack_int* n, + const lapack_complex_double* v, lapack_complex_double* tau, + lapack_complex_double* c, lapack_int* ldc, + lapack_complex_double* work ); +void LAPACK_slatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed, + char* sym, float* d, lapack_int* mode, float* cond, + float* dmax, lapack_int* kl, lapack_int* ku, char* pack, + float* a, lapack_int* lda, float* work, lapack_int *info ); +void LAPACK_dlatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed, + char* sym, double* d, lapack_int* mode, double* cond, + double* dmax, lapack_int* kl, lapack_int* ku, char* pack, + double* a, lapack_int* lda, double* work, + lapack_int *info ); +void LAPACK_clatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed, + char* sym, float* d, lapack_int* mode, float* cond, + float* dmax, lapack_int* kl, lapack_int* ku, char* pack, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zlatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed, + char* sym, double* d, lapack_int* mode, double* cond, + double* dmax, lapack_int* kl, lapack_int* ku, char* pack, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_slag2d( lapack_int* m, lapack_int* n, const float* sa, + lapack_int* ldsa, double* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_dlag2s( lapack_int* m, lapack_int* n, const double* a, + lapack_int* lda, float* sa, lapack_int* ldsa, + lapack_int *info ); +void LAPACK_clag2z( lapack_int* m, lapack_int* n, + const lapack_complex_float* sa, lapack_int* ldsa, + lapack_complex_double* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_zlag2c( lapack_int* m, lapack_int* n, + const lapack_complex_double* a, lapack_int* lda, + lapack_complex_float* sa, lapack_int* ldsa, + lapack_int *info ); +void LAPACK_slauum( char* uplo, lapack_int* n, float* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_dlauum( char* uplo, lapack_int* n, double* a, lapack_int* lda, + lapack_int *info ); +void LAPACK_clauum( char* uplo, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_zlauum( char* uplo, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int *info ); +void LAPACK_slagge( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const float* d, float* a, lapack_int* lda, + lapack_int* iseed, float* work, lapack_int *info ); +void LAPACK_dlagge( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const double* d, double* a, lapack_int* lda, + lapack_int* iseed, double* work, lapack_int *info ); +void LAPACK_clagge( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const float* d, lapack_complex_float* a, + lapack_int* lda, lapack_int* iseed, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zlagge( lapack_int* m, lapack_int* n, lapack_int* kl, + lapack_int* ku, const double* d, lapack_complex_double* a, + lapack_int* lda, lapack_int* iseed, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_slaset( char* uplo, lapack_int* m, lapack_int* n, float* alpha, + float* beta, float* a, lapack_int* lda ); +void LAPACK_dlaset( char* uplo, lapack_int* m, lapack_int* n, double* alpha, + double* beta, double* a, lapack_int* lda ); +void LAPACK_claset( char* uplo, lapack_int* m, lapack_int* n, + lapack_complex_float* alpha, lapack_complex_float* beta, + lapack_complex_float* a, lapack_int* lda ); +void LAPACK_zlaset( char* uplo, lapack_int* m, lapack_int* n, + lapack_complex_double* alpha, lapack_complex_double* beta, + lapack_complex_double* a, lapack_int* lda ); +void LAPACK_slasrt( char* id, lapack_int* n, float* d, lapack_int *info ); +void LAPACK_dlasrt( char* id, lapack_int* n, double* d, lapack_int *info ); +void LAPACK_claghe( lapack_int* n, lapack_int* k, const float* d, + lapack_complex_float* a, lapack_int* lda, lapack_int* iseed, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zlaghe( lapack_int* n, lapack_int* k, const double* d, + lapack_complex_double* a, lapack_int* lda, + lapack_int* iseed, lapack_complex_double* work, + lapack_int *info ); +void LAPACK_slagsy( lapack_int* n, lapack_int* k, const float* d, float* a, + lapack_int* lda, lapack_int* iseed, float* work, + lapack_int *info ); +void LAPACK_dlagsy( lapack_int* n, lapack_int* k, const double* d, double* a, + lapack_int* lda, lapack_int* iseed, double* work, + lapack_int *info ); +void LAPACK_clagsy( lapack_int* n, lapack_int* k, const float* d, + lapack_complex_float* a, lapack_int* lda, lapack_int* iseed, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zlagsy( lapack_int* n, lapack_int* k, const double* d, + lapack_complex_double* a, lapack_int* lda, + lapack_int* iseed, lapack_complex_double* work, + lapack_int *info ); +void LAPACK_slapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n, + float* x, lapack_int* ldx, lapack_int* k ); +void LAPACK_dlapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n, + double* x, lapack_int* ldx, lapack_int* k ); +void LAPACK_clapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n, + lapack_complex_float* x, lapack_int* ldx, lapack_int* k ); +void LAPACK_zlapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n, + lapack_complex_double* x, lapack_int* ldx, lapack_int* k ); +float LAPACK_slapy2( float* x, float* y ); +double LAPACK_dlapy2( double* x, double* y ); +float LAPACK_slapy3( float* x, float* y, float* z ); +double LAPACK_dlapy3( double* x, double* y, double* z ); +void LAPACK_slartgp( float* f, float* g, float* cs, float* sn, float* r ); +void LAPACK_dlartgp( double* f, double* g, double* cs, double* sn, double* r ); +void LAPACK_slartgs( float* x, float* y, float* sigma, float* cs, float* sn ); +void LAPACK_dlartgs( double* x, double* y, double* sigma, double* cs, + double* sn ); +// LAPACK 3.3.0 +void LAPACK_cbbcsd( char* jobu1, char* jobu2, + char* jobv1t, char* jobv2t, char* trans, + lapack_int* m, lapack_int* p, lapack_int* q, + float* theta, float* phi, + lapack_complex_float* u1, lapack_int* ldu1, + lapack_complex_float* u2, lapack_int* ldu2, + lapack_complex_float* v1t, lapack_int* ldv1t, + lapack_complex_float* v2t, lapack_int* ldv2t, + float* b11d, float* b11e, float* b12d, + float* b12e, float* b21d, float* b21e, + float* b22d, float* b22e, float* rwork, + lapack_int* lrwork , lapack_int *info ); +void LAPACK_cheswapr( char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* i1, + lapack_int* i2 ); +void LAPACK_chetri2( char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int* lwork , lapack_int *info ); +void LAPACK_chetri2x( char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int* nb , lapack_int *info ); +void LAPACK_chetrs2( char* uplo, lapack_int* n, + lapack_int* nrhs, const lapack_complex_float* a, + lapack_int* lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* work , lapack_int *info ); +void LAPACK_csyconv( char* uplo, char* way, + lapack_int* n, lapack_complex_float* a, + lapack_int* lda, const lapack_int* ipiv, + lapack_complex_float* work , lapack_int *info ); +void LAPACK_csyswapr( char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* i1, + lapack_int* i2 ); +void LAPACK_csytri2( char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int* lwork , lapack_int *info ); +void LAPACK_csytri2x( char* uplo, lapack_int* n, + lapack_complex_float* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int* nb , lapack_int *info ); +void LAPACK_csytrs2( char* uplo, lapack_int* n, + lapack_int* nrhs, const lapack_complex_float* a, + lapack_int* lda, const lapack_int* ipiv, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* work , lapack_int *info ); +void LAPACK_cunbdb( char* trans, char* signs, + lapack_int* m, lapack_int* p, lapack_int* q, + lapack_complex_float* x11, lapack_int* ldx11, + lapack_complex_float* x12, lapack_int* ldx12, + lapack_complex_float* x21, lapack_int* ldx21, + lapack_complex_float* x22, lapack_int* ldx22, + float* theta, float* phi, + lapack_complex_float* taup1, + lapack_complex_float* taup2, + lapack_complex_float* tauq1, + lapack_complex_float* tauq2, + lapack_complex_float* work, lapack_int* lwork , lapack_int *info ); +void LAPACK_cuncsd( char* jobu1, char* jobu2, + char* jobv1t, char* jobv2t, char* trans, + char* signs, lapack_int* m, lapack_int* p, + lapack_int* q, lapack_complex_float* x11, + lapack_int* ldx11, lapack_complex_float* x12, + lapack_int* ldx12, lapack_complex_float* x21, + lapack_int* ldx21, lapack_complex_float* x22, + lapack_int* ldx22, float* theta, + lapack_complex_float* u1, lapack_int* ldu1, + lapack_complex_float* u2, lapack_int* ldu2, + lapack_complex_float* v1t, lapack_int* ldv1t, + lapack_complex_float* v2t, lapack_int* ldv2t, + lapack_complex_float* work, lapack_int* lwork, + float* rwork, lapack_int* lrwork, + lapack_int* iwork , lapack_int *info ); +void LAPACK_dbbcsd( char* jobu1, char* jobu2, + char* jobv1t, char* jobv2t, char* trans, + lapack_int* m, lapack_int* p, lapack_int* q, + double* theta, double* phi, double* u1, + lapack_int* ldu1, double* u2, lapack_int* ldu2, + double* v1t, lapack_int* ldv1t, double* v2t, + lapack_int* ldv2t, double* b11d, double* b11e, + double* b12d, double* b12e, double* b21d, + double* b21e, double* b22d, double* b22e, + double* work, lapack_int* lwork , lapack_int *info ); +void LAPACK_dorbdb( char* trans, char* signs, + lapack_int* m, lapack_int* p, lapack_int* q, + double* x11, lapack_int* ldx11, double* x12, + lapack_int* ldx12, double* x21, lapack_int* ldx21, + double* x22, lapack_int* ldx22, double* theta, + double* phi, double* taup1, double* taup2, + double* tauq1, double* tauq2, double* work, + lapack_int* lwork , lapack_int *info ); +void LAPACK_dorcsd( char* jobu1, char* jobu2, + char* jobv1t, char* jobv2t, char* trans, + char* signs, lapack_int* m, lapack_int* p, + lapack_int* q, double* x11, lapack_int* ldx11, + double* x12, lapack_int* ldx12, double* x21, + lapack_int* ldx21, double* x22, lapack_int* ldx22, + double* theta, double* u1, lapack_int* ldu1, + double* u2, lapack_int* ldu2, double* v1t, + lapack_int* ldv1t, double* v2t, lapack_int* ldv2t, + double* work, lapack_int* lwork, + lapack_int* iwork , lapack_int *info ); +void LAPACK_dsyconv( char* uplo, char* way, + lapack_int* n, double* a, lapack_int* lda, + const lapack_int* ipiv, double* work , lapack_int *info ); +void LAPACK_dsyswapr( char* uplo, lapack_int* n, + double* a, lapack_int* i1, lapack_int* i2 ); +void LAPACK_dsytri2( char* uplo, lapack_int* n, + double* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int* lwork , lapack_int *info ); +void LAPACK_dsytri2x( char* uplo, lapack_int* n, + double* a, lapack_int* lda, + const lapack_int* ipiv, double* work, + lapack_int* nb , lapack_int *info ); +void LAPACK_dsytrs2( char* uplo, lapack_int* n, + lapack_int* nrhs, const double* a, + lapack_int* lda, const lapack_int* ipiv, + double* b, lapack_int* ldb, double* work , lapack_int *info ); +void LAPACK_sbbcsd( char* jobu1, char* jobu2, + char* jobv1t, char* jobv2t, char* trans, + lapack_int* m, lapack_int* p, lapack_int* q, + float* theta, float* phi, float* u1, + lapack_int* ldu1, float* u2, lapack_int* ldu2, + float* v1t, lapack_int* ldv1t, float* v2t, + lapack_int* ldv2t, float* b11d, float* b11e, + float* b12d, float* b12e, float* b21d, + float* b21e, float* b22d, float* b22e, + float* work, lapack_int* lwork , lapack_int *info ); +void LAPACK_sorbdb( char* trans, char* signs, + lapack_int* m, lapack_int* p, lapack_int* q, + float* x11, lapack_int* ldx11, float* x12, + lapack_int* ldx12, float* x21, lapack_int* ldx21, + float* x22, lapack_int* ldx22, float* theta, + float* phi, float* taup1, float* taup2, + float* tauq1, float* tauq2, float* work, + lapack_int* lwork , lapack_int *info ); +void LAPACK_sorcsd( char* jobu1, char* jobu2, + char* jobv1t, char* jobv2t, char* trans, + char* signs, lapack_int* m, lapack_int* p, + lapack_int* q, float* x11, lapack_int* ldx11, + float* x12, lapack_int* ldx12, float* x21, + lapack_int* ldx21, float* x22, lapack_int* ldx22, + float* theta, float* u1, lapack_int* ldu1, + float* u2, lapack_int* ldu2, float* v1t, + lapack_int* ldv1t, float* v2t, lapack_int* ldv2t, + float* work, lapack_int* lwork, + lapack_int* iwork , lapack_int *info ); +void LAPACK_ssyconv( char* uplo, char* way, + lapack_int* n, float* a, lapack_int* lda, + const lapack_int* ipiv, float* work , lapack_int *info ); +void LAPACK_ssyswapr( char* uplo, lapack_int* n, + float* a, lapack_int* i1, lapack_int* i2 ); +void LAPACK_ssytri2( char* uplo, lapack_int* n, + float* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_float* work, lapack_int* lwork , lapack_int *info ); +void LAPACK_ssytri2x( char* uplo, lapack_int* n, + float* a, lapack_int* lda, + const lapack_int* ipiv, float* work, + lapack_int* nb , lapack_int *info ); +void LAPACK_ssytrs2( char* uplo, lapack_int* n, + lapack_int* nrhs, const float* a, + lapack_int* lda, const lapack_int* ipiv, + float* b, lapack_int* ldb, float* work , lapack_int *info ); +void LAPACK_zbbcsd( char* jobu1, char* jobu2, + char* jobv1t, char* jobv2t, char* trans, + lapack_int* m, lapack_int* p, lapack_int* q, + double* theta, double* phi, + lapack_complex_double* u1, lapack_int* ldu1, + lapack_complex_double* u2, lapack_int* ldu2, + lapack_complex_double* v1t, lapack_int* ldv1t, + lapack_complex_double* v2t, lapack_int* ldv2t, + double* b11d, double* b11e, double* b12d, + double* b12e, double* b21d, double* b21e, + double* b22d, double* b22e, double* rwork, + lapack_int* lrwork , lapack_int *info ); +void LAPACK_zheswapr( char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* i1, + lapack_int* i2 ); +void LAPACK_zhetri2( char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int* lwork , lapack_int *info ); +void LAPACK_zhetri2x( char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int* nb , lapack_int *info ); +void LAPACK_zhetrs2( char* uplo, lapack_int* n, + lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* work , lapack_int *info ); +void LAPACK_zsyconv( char* uplo, char* way, + lapack_int* n, lapack_complex_double* a, + lapack_int* lda, const lapack_int* ipiv, + lapack_complex_double* work , lapack_int *info ); +void LAPACK_zsyswapr( char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* i1, + lapack_int* i2 ); +void LAPACK_zsytri2( char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int* lwork , lapack_int *info ); +void LAPACK_zsytri2x( char* uplo, lapack_int* n, + lapack_complex_double* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_double* work, lapack_int* nb , lapack_int *info ); +void LAPACK_zsytrs2( char* uplo, lapack_int* n, + lapack_int* nrhs, + const lapack_complex_double* a, lapack_int* lda, + const lapack_int* ipiv, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* work , lapack_int *info ); +void LAPACK_zunbdb( char* trans, char* signs, + lapack_int* m, lapack_int* p, lapack_int* q, + lapack_complex_double* x11, lapack_int* ldx11, + lapack_complex_double* x12, lapack_int* ldx12, + lapack_complex_double* x21, lapack_int* ldx21, + lapack_complex_double* x22, lapack_int* ldx22, + double* theta, double* phi, + lapack_complex_double* taup1, + lapack_complex_double* taup2, + lapack_complex_double* tauq1, + lapack_complex_double* tauq2, + lapack_complex_double* work, lapack_int* lwork , lapack_int *info ); +void LAPACK_zuncsd( char* jobu1, char* jobu2, + char* jobv1t, char* jobv2t, char* trans, + char* signs, lapack_int* m, lapack_int* p, + lapack_int* q, lapack_complex_double* x11, + lapack_int* ldx11, lapack_complex_double* x12, + lapack_int* ldx12, lapack_complex_double* x21, + lapack_int* ldx21, lapack_complex_double* x22, + lapack_int* ldx22, double* theta, + lapack_complex_double* u1, lapack_int* ldu1, + lapack_complex_double* u2, lapack_int* ldu2, + lapack_complex_double* v1t, lapack_int* ldv1t, + lapack_complex_double* v2t, lapack_int* ldv2t, + lapack_complex_double* work, lapack_int* lwork, + double* rwork, lapack_int* lrwork, + lapack_int* iwork , lapack_int *info ); +// LAPACK 3.4.0 +void LAPACK_sgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* nb, const float* v, + lapack_int* ldv, const float* t, lapack_int* ldt, float* c, + lapack_int* ldc, float* work, lapack_int *info ); +void LAPACK_dgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* nb, const double* v, + lapack_int* ldv, const double* t, lapack_int* ldt, + double* c, lapack_int* ldc, double* work, + lapack_int *info ); +void LAPACK_cgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* nb, + const lapack_complex_float* v, lapack_int* ldv, + const lapack_complex_float* t, lapack_int* ldt, + lapack_complex_float* c, lapack_int* ldc, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* nb, + const lapack_complex_double* v, lapack_int* ldv, + const lapack_complex_double* t, lapack_int* ldt, + lapack_complex_double* c, lapack_int* ldc, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_sgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, float* a, + lapack_int* lda, float* t, lapack_int* ldt, float* work, + lapack_int *info ); +void LAPACK_dgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, double* a, + lapack_int* lda, double* t, lapack_int* ldt, double* work, + lapack_int *info ); +void LAPACK_cgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* t, lapack_int* ldt, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_zgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* t, lapack_int* ldt, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_sgeqrt2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* t, lapack_int* ldt, lapack_int *info ); +void LAPACK_dgeqrt2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* t, lapack_int* ldt, lapack_int *info ); +void LAPACK_cgeqrt2( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* t, lapack_int* ldt, + lapack_int *info ); +void LAPACK_zgeqrt2( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* t, lapack_int* ldt, + lapack_int *info ); +void LAPACK_sgeqrt3( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* t, lapack_int* ldt, lapack_int *info ); +void LAPACK_dgeqrt3( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* t, lapack_int* ldt, lapack_int *info ); +void LAPACK_cgeqrt3( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* t, lapack_int* ldt, + lapack_int *info ); +void LAPACK_zgeqrt3( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* t, lapack_int* ldt, + lapack_int *info ); +void LAPACK_stpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* l, lapack_int* nb, + const float* v, lapack_int* ldv, const float* t, + lapack_int* ldt, float* a, lapack_int* lda, float* b, + lapack_int* ldb, float* work, lapack_int *info ); +void LAPACK_dtpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* l, lapack_int* nb, + const double* v, lapack_int* ldv, const double* t, + lapack_int* ldt, double* a, lapack_int* lda, double* b, + lapack_int* ldb, double* work, lapack_int *info ); +void LAPACK_ctpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* l, lapack_int* nb, + const lapack_complex_float* v, lapack_int* ldv, + const lapack_complex_float* t, lapack_int* ldt, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_ztpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n, + lapack_int* k, lapack_int* l, lapack_int* nb, + const lapack_complex_double* v, lapack_int* ldv, + const lapack_complex_double* t, lapack_int* ldt, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_dtpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb, + double* a, lapack_int* lda, double* b, lapack_int* ldb, + double* t, lapack_int* ldt, double* work, + lapack_int *info ); +void LAPACK_ctpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* t, lapack_complex_float* b, + lapack_int* ldb, lapack_int* ldt, + lapack_complex_float* work, lapack_int *info ); +void LAPACK_ztpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* t, lapack_int* ldt, + lapack_complex_double* work, lapack_int *info ); +void LAPACK_stpqrt2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + float* b, lapack_int* ldb, float* t, lapack_int* ldt, + lapack_int *info ); +void LAPACK_dtpqrt2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + double* b, lapack_int* ldb, double* t, lapack_int* ldt, + lapack_int *info ); +void LAPACK_ctpqrt2( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_complex_float* b, lapack_int* ldb, + lapack_complex_float* t, lapack_int* ldt, + lapack_int *info ); +void LAPACK_ztpqrt2( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_complex_double* b, lapack_int* ldb, + lapack_complex_double* t, lapack_int* ldt, + lapack_int *info ); +void LAPACK_stprfb( char* side, char* trans, char* direct, char* storev, + lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l, + const float* v, lapack_int* ldv, const float* t, + lapack_int* ldt, float* a, lapack_int* lda, float* b, + lapack_int* ldb, const float* mywork, + lapack_int* myldwork ); +void LAPACK_dtprfb( char* side, char* trans, char* direct, char* storev, + lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l, + const double* v, lapack_int* ldv, const double* t, + lapack_int* ldt, double* a, lapack_int* lda, double* b, + lapack_int* ldb, const double* mywork, + lapack_int* myldwork ); +void LAPACK_ctprfb( char* side, char* trans, char* direct, char* storev, + lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l, + const lapack_complex_float* v, lapack_int* ldv, + const lapack_complex_float* t, lapack_int* ldt, + lapack_complex_float* a, lapack_int* lda, + lapack_complex_float* b, lapack_int* ldb, + const float* mywork, lapack_int* myldwork ); +void LAPACK_ztprfb( char* side, char* trans, char* direct, char* storev, + lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l, + const lapack_complex_double* v, lapack_int* ldv, + const lapack_complex_double* t, lapack_int* ldt, + lapack_complex_double* a, lapack_int* lda, + lapack_complex_double* b, lapack_int* ldb, + const double* mywork, lapack_int* myldwork ); +// LAPACK 3.X.X +void LAPACK_csyr( char* uplo, lapack_int* n, lapack_complex_float* alpha, + const lapack_complex_float* x, lapack_int* incx, + lapack_complex_float* a, lapack_int* lda ); +void LAPACK_zsyr( char* uplo, lapack_int* n, lapack_complex_double* alpha, + const lapack_complex_double* x, lapack_int* incx, + lapack_complex_double* a, lapack_int* lda ); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _LAPACKE_H_ */ + +#endif /* _MKL_LAPACKE_H_ */ diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/lapacke_mangling.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/lapacke_mangling.h new file mode 100644 index 000000000..6211fd144 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/misc/lapacke_mangling.h @@ -0,0 +1,17 @@ +#ifndef LAPACK_HEADER_INCLUDED +#define LAPACK_HEADER_INCLUDED + +#ifndef LAPACK_GLOBAL +#if defined(LAPACK_GLOBAL_PATTERN_LC) || defined(ADD_) +#define LAPACK_GLOBAL(lcname,UCNAME) lcname##_ +#elif defined(LAPACK_GLOBAL_PATTERN_UC) || defined(UPPER) +#define LAPACK_GLOBAL(lcname,UCNAME) UCNAME +#elif defined(LAPACK_GLOBAL_PATTERN_MC) || defined(NOCHANGE) +#define LAPACK_GLOBAL(lcname,UCNAME) lcname +#else +#define LAPACK_GLOBAL(lcname,UCNAME) lcname##_ +#endif +#endif + +#endif + diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h new file mode 100644 index 000000000..1f8a531af --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h @@ -0,0 +1,332 @@ + +/** \returns an expression of the coefficient wise product of \c *this and \a other + * + * \sa MatrixBase::cwiseProduct + */ +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product) +operator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived()); +} + +/** \returns an expression of the coefficient wise quotient of \c *this and \a other + * + * \sa MatrixBase::cwiseQuotient + */ +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> +operator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); +} + +/** \returns an expression of the coefficient-wise min of \c *this and \a other + * + * Example: \include Cwise_min.cpp + * Output: \verbinclude Cwise_min.out + * + * \sa max() + */ +EIGEN_MAKE_CWISE_BINARY_OP(min,min) + +/** \returns an expression of the coefficient-wise min of \c *this and scalar \a other + * + * \sa max() + */ +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, + const CwiseNullaryOp, PlainObject> > +#ifdef EIGEN_PARSED_BY_DOXYGEN +min +#else +(min) +#endif +(const Scalar &other) const +{ + return (min)(Derived::PlainObject::Constant(rows(), cols(), other)); +} + +/** \returns an expression of the coefficient-wise max of \c *this and \a other + * + * Example: \include Cwise_max.cpp + * Output: \verbinclude Cwise_max.out + * + * \sa min() + */ +EIGEN_MAKE_CWISE_BINARY_OP(max,max) + +/** \returns an expression of the coefficient-wise max of \c *this and scalar \a other + * + * \sa min() + */ +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, + const CwiseNullaryOp, PlainObject> > +#ifdef EIGEN_PARSED_BY_DOXYGEN +max +#else +(max) +#endif +(const Scalar &other) const +{ + return (max)(Derived::PlainObject::Constant(rows(), cols(), other)); +} + +/** \returns an expression of the coefficient-wise power of \c *this to the given array of \a exponents. + * + * This function computes the coefficient-wise power. + * + * Example: \include Cwise_array_power_array.cpp + * Output: \verbinclude Cwise_array_power_array.out + */ +EIGEN_MAKE_CWISE_BINARY_OP(pow,pow) + +#ifndef EIGEN_PARSED_BY_DOXYGEN +EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(pow,pow) +#else +/** \returns an expression of the coefficients of \c *this rasied to the constant power \a exponent + * + * \tparam T is the scalar type of \a exponent. It must be compatible with the scalar type of the given expression. + * + * This function computes the coefficient-wise power. The function MatrixBase::pow() in the + * unsupported module MatrixFunctions computes the matrix power. + * + * Example: \include Cwise_pow.cpp + * Output: \verbinclude Cwise_pow.out + * + * \sa ArrayBase::pow(ArrayBase), square(), cube(), exp(), log() + */ +template +const CwiseBinaryOp,Derived,Constant > pow(const T& exponent) const; +#endif + + +// TODO code generating macros could be moved to Macros.h and could include generation of documentation +#define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \ +template \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> \ +OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ +{ \ + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); \ +}\ +typedef CwiseBinaryOp, const Derived, const CwiseNullaryOp, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \ +typedef CwiseBinaryOp, const CwiseNullaryOp, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \ +OP(const Scalar& s) const { \ + return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \ +} \ +EIGEN_DEVICE_FUNC friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \ +OP(const Scalar& s, const Derived& d) { \ + return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \ +} + +#define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \ +template \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp, const OtherDerived, const Derived> \ +OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ +{ \ + return CwiseBinaryOp, const OtherDerived, const Derived>(other.derived(), derived()); \ +} \ +EIGEN_DEVICE_FUNC \ +inline const RCmp ## RCOMPARATOR ## ReturnType \ +OP(const Scalar& s) const { \ + return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \ +} \ +friend inline const Cmp ## RCOMPARATOR ## ReturnType \ +OP(const Scalar& s, const Derived& d) { \ + return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \ +} + + + +/** \returns an expression of the coefficient-wise \< operator of *this and \a other + * + * Example: \include Cwise_less.cpp + * Output: \verbinclude Cwise_less.out + * + * \sa all(), any(), operator>(), operator<=() + */ +EIGEN_MAKE_CWISE_COMP_OP(operator<, LT) + +/** \returns an expression of the coefficient-wise \<= operator of *this and \a other + * + * Example: \include Cwise_less_equal.cpp + * Output: \verbinclude Cwise_less_equal.out + * + * \sa all(), any(), operator>=(), operator<() + */ +EIGEN_MAKE_CWISE_COMP_OP(operator<=, LE) + +/** \returns an expression of the coefficient-wise \> operator of *this and \a other + * + * Example: \include Cwise_greater.cpp + * Output: \verbinclude Cwise_greater.out + * + * \sa all(), any(), operator>=(), operator<() + */ +EIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT) + +/** \returns an expression of the coefficient-wise \>= operator of *this and \a other + * + * Example: \include Cwise_greater_equal.cpp + * Output: \verbinclude Cwise_greater_equal.out + * + * \sa all(), any(), operator>(), operator<=() + */ +EIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE) + +/** \returns an expression of the coefficient-wise == operator of *this and \a other + * + * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. + * In order to check for equality between two vectors or matrices with floating-point coefficients, it is + * generally a far better idea to use a fuzzy comparison as provided by isApprox() and + * isMuchSmallerThan(). + * + * Example: \include Cwise_equal_equal.cpp + * Output: \verbinclude Cwise_equal_equal.out + * + * \sa all(), any(), isApprox(), isMuchSmallerThan() + */ +EIGEN_MAKE_CWISE_COMP_OP(operator==, EQ) + +/** \returns an expression of the coefficient-wise != operator of *this and \a other + * + * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. + * In order to check for equality between two vectors or matrices with floating-point coefficients, it is + * generally a far better idea to use a fuzzy comparison as provided by isApprox() and + * isMuchSmallerThan(). + * + * Example: \include Cwise_not_equal.cpp + * Output: \verbinclude Cwise_not_equal.out + * + * \sa all(), any(), isApprox(), isMuchSmallerThan() + */ +EIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ) + + +#undef EIGEN_MAKE_CWISE_COMP_OP +#undef EIGEN_MAKE_CWISE_COMP_R_OP + +// scalar addition +#ifndef EIGEN_PARSED_BY_DOXYGEN +EIGEN_MAKE_SCALAR_BINARY_OP(operator+,sum) +#else +/** \returns an expression of \c *this with each coeff incremented by the constant \a scalar + * + * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. + * + * Example: \include Cwise_plus.cpp + * Output: \verbinclude Cwise_plus.out + * + * \sa operator+=(), operator-() + */ +template +const CwiseBinaryOp,Derived,Constant > operator+(const T& scalar) const; +/** \returns an expression of \a expr with each coeff incremented by the constant \a scalar + * + * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. + */ +template friend +const CwiseBinaryOp,Constant,Derived> operator+(const T& scalar, const StorageBaseType& expr); +#endif + +#ifndef EIGEN_PARSED_BY_DOXYGEN +EIGEN_MAKE_SCALAR_BINARY_OP(operator-,difference) +#else +/** \returns an expression of \c *this with each coeff decremented by the constant \a scalar + * + * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. + * + * Example: \include Cwise_minus.cpp + * Output: \verbinclude Cwise_minus.out + * + * \sa operator+=(), operator-() + */ +template +const CwiseBinaryOp,Derived,Constant > operator-(const T& scalar) const; +/** \returns an expression of the constant matrix of value \a scalar decremented by the coefficients of \a expr + * + * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. + */ +template friend +const CwiseBinaryOp,Constant,Derived> operator-(const T& scalar, const StorageBaseType& expr); +#endif + + +#ifndef EIGEN_PARSED_BY_DOXYGEN + EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(operator/,quotient) +#else + /** + * \brief Component-wise division of the scalar \a s by array elements of \a a. + * + * \tparam Scalar is the scalar type of \a x. It must be compatible with the scalar type of the given array expression (\c Derived::Scalar). + */ + template friend + inline const CwiseBinaryOp,Constant,Derived> + operator/(const T& s,const StorageBaseType& a); +#endif + +/** \returns an expression of the coefficient-wise ^ operator of *this and \a other + * + * \warning this operator is for expression of bool only. + * + * Example: \include Cwise_boolean_xor.cpp + * Output: \verbinclude Cwise_boolean_xor.out + * + * \sa operator&&(), select() + */ +template +EIGEN_DEVICE_FUNC +inline const CwiseBinaryOp +operator^(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + EIGEN_STATIC_ASSERT((internal::is_same::value && internal::is_same::value), + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); + return CwiseBinaryOp(derived(),other.derived()); +} + +// NOTE disabled until we agree on argument order +#if 0 +/** \cpp11 \returns an expression of the coefficient-wise polygamma function. + * + * \specialfunctions_module + * + * It returns the \a n -th derivative of the digamma(psi) evaluated at \c *this. + * + * \warning Be careful with the order of the parameters: x.polygamma(n) is equivalent to polygamma(n,x) + * + * \sa Eigen::polygamma() + */ +template +inline const CwiseBinaryOp, const DerivedN, const Derived> +polygamma(const EIGEN_CURRENT_STORAGE_BASE_CLASS &n) const +{ + return CwiseBinaryOp, const DerivedN, const Derived>(n.derived(), this->derived()); +} +#endif + +/** \returns an expression of the coefficient-wise zeta function. + * + * \specialfunctions_module + * + * It returns the Riemann zeta function of two arguments \c *this and \a q: + * + * \param *this is the exposent, it must be > 1 + * \param q is the shift, it must be > 0 + * + * \note This function supports only float and double scalar types. To support other scalar types, the user has + * to provide implementations of zeta(T,T) for any scalar type T to be supported. + * + * This method is an alias for zeta(*this,q); + * + * \sa Eigen::zeta() + */ +template +inline const CwiseBinaryOp, const Derived, const DerivedQ> +zeta(const EIGEN_CURRENT_STORAGE_BASE_CLASS &q) const +{ + return CwiseBinaryOp, const Derived, const DerivedQ>(this->derived(), q.derived()); +} diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h new file mode 100644 index 000000000..ebaa3f192 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h @@ -0,0 +1,552 @@ + + +typedef CwiseUnaryOp, const Derived> AbsReturnType; +typedef CwiseUnaryOp, const Derived> ArgReturnType; +typedef CwiseUnaryOp, const Derived> Abs2ReturnType; +typedef CwiseUnaryOp, const Derived> SqrtReturnType; +typedef CwiseUnaryOp, const Derived> RsqrtReturnType; +typedef CwiseUnaryOp, const Derived> SignReturnType; +typedef CwiseUnaryOp, const Derived> InverseReturnType; +typedef CwiseUnaryOp, const Derived> BooleanNotReturnType; + +typedef CwiseUnaryOp, const Derived> ExpReturnType; +typedef CwiseUnaryOp, const Derived> LogReturnType; +typedef CwiseUnaryOp, const Derived> Log1pReturnType; +typedef CwiseUnaryOp, const Derived> Log10ReturnType; +typedef CwiseUnaryOp, const Derived> CosReturnType; +typedef CwiseUnaryOp, const Derived> SinReturnType; +typedef CwiseUnaryOp, const Derived> TanReturnType; +typedef CwiseUnaryOp, const Derived> AcosReturnType; +typedef CwiseUnaryOp, const Derived> AsinReturnType; +typedef CwiseUnaryOp, const Derived> AtanReturnType; +typedef CwiseUnaryOp, const Derived> TanhReturnType; +typedef CwiseUnaryOp, const Derived> SinhReturnType; +typedef CwiseUnaryOp, const Derived> CoshReturnType; +typedef CwiseUnaryOp, const Derived> SquareReturnType; +typedef CwiseUnaryOp, const Derived> CubeReturnType; +typedef CwiseUnaryOp, const Derived> RoundReturnType; +typedef CwiseUnaryOp, const Derived> FloorReturnType; +typedef CwiseUnaryOp, const Derived> CeilReturnType; +typedef CwiseUnaryOp, const Derived> IsNaNReturnType; +typedef CwiseUnaryOp, const Derived> IsInfReturnType; +typedef CwiseUnaryOp, const Derived> IsFiniteReturnType; + +/** \returns an expression of the coefficient-wise absolute value of \c *this + * + * Example: \include Cwise_abs.cpp + * Output: \verbinclude Cwise_abs.out + * + * \sa Math functions, abs2() + */ +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const AbsReturnType +abs() const +{ + return AbsReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise phase angle of \c *this + * + * Example: \include Cwise_arg.cpp + * Output: \verbinclude Cwise_arg.out + * + * \sa abs() + */ +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const ArgReturnType +arg() const +{ + return ArgReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise squared absolute value of \c *this + * + * Example: \include Cwise_abs2.cpp + * Output: \verbinclude Cwise_abs2.out + * + * \sa Math functions, abs(), square() + */ +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const Abs2ReturnType +abs2() const +{ + return Abs2ReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise exponential of *this. + * + * This function computes the coefficient-wise exponential. The function MatrixBase::exp() in the + * unsupported module MatrixFunctions computes the matrix exponential. + * + * Example: \include Cwise_exp.cpp + * Output: \verbinclude Cwise_exp.out + * + * \sa Math functions, pow(), log(), sin(), cos() + */ +EIGEN_DEVICE_FUNC +inline const ExpReturnType +exp() const +{ + return ExpReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise logarithm of *this. + * + * This function computes the coefficient-wise logarithm. The function MatrixBase::log() in the + * unsupported module MatrixFunctions computes the matrix logarithm. + * + * Example: \include Cwise_log.cpp + * Output: \verbinclude Cwise_log.out + * + * \sa Math functions, exp() + */ +EIGEN_DEVICE_FUNC +inline const LogReturnType +log() const +{ + return LogReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise logarithm of 1 plus \c *this. + * + * In exact arithmetic, \c x.log() is equivalent to \c (x+1).log(), + * however, with finite precision, this function is much more accurate when \c x is close to zero. + * + * \sa Math functions, log() + */ +EIGEN_DEVICE_FUNC +inline const Log1pReturnType +log1p() const +{ + return Log1pReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise base-10 logarithm of *this. + * + * This function computes the coefficient-wise base-10 logarithm. + * + * Example: \include Cwise_log10.cpp + * Output: \verbinclude Cwise_log10.out + * + * \sa Math functions, log() + */ +EIGEN_DEVICE_FUNC +inline const Log10ReturnType +log10() const +{ + return Log10ReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise square root of *this. + * + * This function computes the coefficient-wise square root. The function MatrixBase::sqrt() in the + * unsupported module MatrixFunctions computes the matrix square root. + * + * Example: \include Cwise_sqrt.cpp + * Output: \verbinclude Cwise_sqrt.out + * + * \sa Math functions, pow(), square() + */ +EIGEN_DEVICE_FUNC +inline const SqrtReturnType +sqrt() const +{ + return SqrtReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise inverse square root of *this. + * + * This function computes the coefficient-wise inverse square root. + * + * Example: \include Cwise_sqrt.cpp + * Output: \verbinclude Cwise_sqrt.out + * + * \sa pow(), square() + */ +EIGEN_DEVICE_FUNC +inline const RsqrtReturnType +rsqrt() const +{ + return RsqrtReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise signum of *this. + * + * This function computes the coefficient-wise signum. + * + * Example: \include Cwise_sign.cpp + * Output: \verbinclude Cwise_sign.out + * + * \sa pow(), square() + */ +EIGEN_DEVICE_FUNC +inline const SignReturnType +sign() const +{ + return SignReturnType(derived()); +} + + +/** \returns an expression of the coefficient-wise cosine of *this. + * + * This function computes the coefficient-wise cosine. The function MatrixBase::cos() in the + * unsupported module MatrixFunctions computes the matrix cosine. + * + * Example: \include Cwise_cos.cpp + * Output: \verbinclude Cwise_cos.out + * + * \sa Math functions, sin(), acos() + */ +EIGEN_DEVICE_FUNC +inline const CosReturnType +cos() const +{ + return CosReturnType(derived()); +} + + +/** \returns an expression of the coefficient-wise sine of *this. + * + * This function computes the coefficient-wise sine. The function MatrixBase::sin() in the + * unsupported module MatrixFunctions computes the matrix sine. + * + * Example: \include Cwise_sin.cpp + * Output: \verbinclude Cwise_sin.out + * + * \sa Math functions, cos(), asin() + */ +EIGEN_DEVICE_FUNC +inline const SinReturnType +sin() const +{ + return SinReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise tan of *this. + * + * Example: \include Cwise_tan.cpp + * Output: \verbinclude Cwise_tan.out + * + * \sa Math functions, cos(), sin() + */ +EIGEN_DEVICE_FUNC +inline const TanReturnType +tan() const +{ + return TanReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise arc tan of *this. + * + * Example: \include Cwise_atan.cpp + * Output: \verbinclude Cwise_atan.out + * + * \sa Math functions, tan(), asin(), acos() + */ +EIGEN_DEVICE_FUNC +inline const AtanReturnType +atan() const +{ + return AtanReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise arc cosine of *this. + * + * Example: \include Cwise_acos.cpp + * Output: \verbinclude Cwise_acos.out + * + * \sa Math functions, cos(), asin() + */ +EIGEN_DEVICE_FUNC +inline const AcosReturnType +acos() const +{ + return AcosReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise arc sine of *this. + * + * Example: \include Cwise_asin.cpp + * Output: \verbinclude Cwise_asin.out + * + * \sa Math functions, sin(), acos() + */ +EIGEN_DEVICE_FUNC +inline const AsinReturnType +asin() const +{ + return AsinReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise hyperbolic tan of *this. + * + * Example: \include Cwise_tanh.cpp + * Output: \verbinclude Cwise_tanh.out + * + * \sa Math functions, tan(), sinh(), cosh() + */ +EIGEN_DEVICE_FUNC +inline const TanhReturnType +tanh() const +{ + return TanhReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise hyperbolic sin of *this. + * + * Example: \include Cwise_sinh.cpp + * Output: \verbinclude Cwise_sinh.out + * + * \sa Math functions, sin(), tanh(), cosh() + */ +EIGEN_DEVICE_FUNC +inline const SinhReturnType +sinh() const +{ + return SinhReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise hyperbolic cos of *this. + * + * Example: \include Cwise_cosh.cpp + * Output: \verbinclude Cwise_cosh.out + * + * \sa Math functions, tan(), sinh(), cosh() + */ +EIGEN_DEVICE_FUNC +inline const CoshReturnType +cosh() const +{ + return CoshReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise inverse of *this. + * + * Example: \include Cwise_inverse.cpp + * Output: \verbinclude Cwise_inverse.out + * + * \sa operator/(), operator*() + */ +EIGEN_DEVICE_FUNC +inline const InverseReturnType +inverse() const +{ + return InverseReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise square of *this. + * + * Example: \include Cwise_square.cpp + * Output: \verbinclude Cwise_square.out + * + * \sa Math functions, abs2(), cube(), pow() + */ +EIGEN_DEVICE_FUNC +inline const SquareReturnType +square() const +{ + return SquareReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise cube of *this. + * + * Example: \include Cwise_cube.cpp + * Output: \verbinclude Cwise_cube.out + * + * \sa Math functions, square(), pow() + */ +EIGEN_DEVICE_FUNC +inline const CubeReturnType +cube() const +{ + return CubeReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise round of *this. + * + * Example: \include Cwise_round.cpp + * Output: \verbinclude Cwise_round.out + * + * \sa Math functions, ceil(), floor() + */ +EIGEN_DEVICE_FUNC +inline const RoundReturnType +round() const +{ + return RoundReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise floor of *this. + * + * Example: \include Cwise_floor.cpp + * Output: \verbinclude Cwise_floor.out + * + * \sa Math functions, ceil(), round() + */ +EIGEN_DEVICE_FUNC +inline const FloorReturnType +floor() const +{ + return FloorReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise ceil of *this. + * + * Example: \include Cwise_ceil.cpp + * Output: \verbinclude Cwise_ceil.out + * + * \sa Math functions, floor(), round() + */ +EIGEN_DEVICE_FUNC +inline const CeilReturnType +ceil() const +{ + return CeilReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise isnan of *this. + * + * Example: \include Cwise_isNaN.cpp + * Output: \verbinclude Cwise_isNaN.out + * + * \sa isfinite(), isinf() + */ +EIGEN_DEVICE_FUNC +inline const IsNaNReturnType +isNaN() const +{ + return IsNaNReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise isinf of *this. + * + * Example: \include Cwise_isInf.cpp + * Output: \verbinclude Cwise_isInf.out + * + * \sa isnan(), isfinite() + */ +EIGEN_DEVICE_FUNC +inline const IsInfReturnType +isInf() const +{ + return IsInfReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise isfinite of *this. + * + * Example: \include Cwise_isFinite.cpp + * Output: \verbinclude Cwise_isFinite.out + * + * \sa isnan(), isinf() + */ +EIGEN_DEVICE_FUNC +inline const IsFiniteReturnType +isFinite() const +{ + return IsFiniteReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise ! operator of *this + * + * \warning this operator is for expression of bool only. + * + * Example: \include Cwise_boolean_not.cpp + * Output: \verbinclude Cwise_boolean_not.out + * + * \sa operator!=() + */ +EIGEN_DEVICE_FUNC +inline const BooleanNotReturnType +operator!() const +{ + EIGEN_STATIC_ASSERT((internal::is_same::value), + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); + return BooleanNotReturnType(derived()); +} + + +// --- SpecialFunctions module --- + +typedef CwiseUnaryOp, const Derived> LgammaReturnType; +typedef CwiseUnaryOp, const Derived> DigammaReturnType; +typedef CwiseUnaryOp, const Derived> ErfReturnType; +typedef CwiseUnaryOp, const Derived> ErfcReturnType; + +/** \cpp11 \returns an expression of the coefficient-wise ln(|gamma(*this)|). + * + * \specialfunctions_module + * + * Example: \include Cwise_lgamma.cpp + * Output: \verbinclude Cwise_lgamma.out + * + * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, + * or float/double in non c++11 mode, the user has to provide implementations of lgamma(T) for any scalar + * type T to be supported. + * + * \sa Math functions, digamma() + */ +EIGEN_DEVICE_FUNC +inline const LgammaReturnType +lgamma() const +{ + return LgammaReturnType(derived()); +} + +/** \returns an expression of the coefficient-wise digamma (psi, derivative of lgamma). + * + * \specialfunctions_module + * + * \note This function supports only float and double scalar types. To support other scalar types, + * the user has to provide implementations of digamma(T) for any scalar + * type T to be supported. + * + * \sa Math functions, Eigen::digamma(), Eigen::polygamma(), lgamma() + */ +EIGEN_DEVICE_FUNC +inline const DigammaReturnType +digamma() const +{ + return DigammaReturnType(derived()); +} + +/** \cpp11 \returns an expression of the coefficient-wise Gauss error + * function of *this. + * + * \specialfunctions_module + * + * Example: \include Cwise_erf.cpp + * Output: \verbinclude Cwise_erf.out + * + * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, + * or float/double in non c++11 mode, the user has to provide implementations of erf(T) for any scalar + * type T to be supported. + * + * \sa Math functions, erfc() + */ +EIGEN_DEVICE_FUNC +inline const ErfReturnType +erf() const +{ + return ErfReturnType(derived()); +} + +/** \cpp11 \returns an expression of the coefficient-wise Complementary error + * function of *this. + * + * \specialfunctions_module + * + * Example: \include Cwise_erfc.cpp + * Output: \verbinclude Cwise_erfc.out + * + * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, + * or float/double in non c++11 mode, the user has to provide implementations of erfc(T) for any scalar + * type T to be supported. + * + * \sa Math functions, erf() + */ +EIGEN_DEVICE_FUNC +inline const ErfcReturnType +erfc() const +{ + return ErfcReturnType(derived()); +} diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/BlockMethods.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/BlockMethods.h new file mode 100644 index 000000000..ac35a0086 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/BlockMethods.h @@ -0,0 +1,1058 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PARSED_BY_DOXYGEN + +/// \internal expression type of a column */ +typedef Block::RowsAtCompileTime, 1, !IsRowMajor> ColXpr; +typedef const Block::RowsAtCompileTime, 1, !IsRowMajor> ConstColXpr; +/// \internal expression type of a row */ +typedef Block::ColsAtCompileTime, IsRowMajor> RowXpr; +typedef const Block::ColsAtCompileTime, IsRowMajor> ConstRowXpr; +/// \internal expression type of a block of whole columns */ +typedef Block::RowsAtCompileTime, Dynamic, !IsRowMajor> ColsBlockXpr; +typedef const Block::RowsAtCompileTime, Dynamic, !IsRowMajor> ConstColsBlockXpr; +/// \internal expression type of a block of whole rows */ +typedef Block::ColsAtCompileTime, IsRowMajor> RowsBlockXpr; +typedef const Block::ColsAtCompileTime, IsRowMajor> ConstRowsBlockXpr; +/// \internal expression type of a block of whole columns */ +template struct NColsBlockXpr { typedef Block::RowsAtCompileTime, N, !IsRowMajor> Type; }; +template struct ConstNColsBlockXpr { typedef const Block::RowsAtCompileTime, N, !IsRowMajor> Type; }; +/// \internal expression type of a block of whole rows */ +template struct NRowsBlockXpr { typedef Block::ColsAtCompileTime, IsRowMajor> Type; }; +template struct ConstNRowsBlockXpr { typedef const Block::ColsAtCompileTime, IsRowMajor> Type; }; +/// \internal expression of a block */ +typedef Block BlockXpr; +typedef const Block ConstBlockXpr; +/// \internal expression of a block of fixed sizes */ +template struct FixedBlockXpr { typedef Block Type; }; +template struct ConstFixedBlockXpr { typedef Block Type; }; + +typedef VectorBlock SegmentReturnType; +typedef const VectorBlock ConstSegmentReturnType; +template struct FixedSegmentReturnType { typedef VectorBlock Type; }; +template struct ConstFixedSegmentReturnType { typedef const VectorBlock Type; }; + +#endif // not EIGEN_PARSED_BY_DOXYGEN + +/// \returns a dynamic-size expression of a block in *this. +/// +/// \param startRow the first row in the block +/// \param startCol the first column in the block +/// \param blockRows the number of rows in the block +/// \param blockCols the number of columns in the block +/// +/// Example: \include MatrixBase_block_int_int_int_int.cpp +/// Output: \verbinclude MatrixBase_block_int_int_int_int.out +/// +/// \note Even though the returned expression has dynamic size, in the case +/// when it is applied to a fixed-size matrix, it inherits a fixed maximal size, +/// which means that evaluating it does not cause a dynamic memory allocation. +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index) +/// +EIGEN_DEVICE_FUNC +inline BlockXpr block(Index startRow, Index startCol, Index blockRows, Index blockCols) +{ + return BlockXpr(derived(), startRow, startCol, blockRows, blockCols); +} + +/// This is the const version of block(Index,Index,Index,Index). */ +EIGEN_DEVICE_FUNC +inline const ConstBlockXpr block(Index startRow, Index startCol, Index blockRows, Index blockCols) const +{ + return ConstBlockXpr(derived(), startRow, startCol, blockRows, blockCols); +} + + + + +/// \returns a dynamic-size expression of a top-right corner of *this. +/// +/// \param cRows the number of rows in the corner +/// \param cCols the number of columns in the corner +/// +/// Example: \include MatrixBase_topRightCorner_int_int.cpp +/// Output: \verbinclude MatrixBase_topRightCorner_int_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +EIGEN_DEVICE_FUNC +inline BlockXpr topRightCorner(Index cRows, Index cCols) +{ + return BlockXpr(derived(), 0, cols() - cCols, cRows, cCols); +} + +/// This is the const version of topRightCorner(Index, Index). +EIGEN_DEVICE_FUNC +inline const ConstBlockXpr topRightCorner(Index cRows, Index cCols) const +{ + return ConstBlockXpr(derived(), 0, cols() - cCols, cRows, cCols); +} + +/// \returns an expression of a fixed-size top-right corner of *this. +/// +/// \tparam CRows the number of rows in the corner +/// \tparam CCols the number of columns in the corner +/// +/// Example: \include MatrixBase_template_int_int_topRightCorner.cpp +/// Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename FixedBlockXpr::Type topRightCorner() +{ + return typename FixedBlockXpr::Type(derived(), 0, cols() - CCols); +} + +/// This is the const version of topRightCorner(). +template +EIGEN_DEVICE_FUNC +inline const typename ConstFixedBlockXpr::Type topRightCorner() const +{ + return typename ConstFixedBlockXpr::Type(derived(), 0, cols() - CCols); +} + +/// \returns an expression of a top-right corner of *this. +/// +/// \tparam CRows number of rows in corner as specified at compile-time +/// \tparam CCols number of columns in corner as specified at compile-time +/// \param cRows number of rows in corner as specified at run-time +/// \param cCols number of columns in corner as specified at run-time +/// +/// This function is mainly useful for corners where the number of rows is specified at compile-time +/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time +/// information should not contradict. In other words, \a cRows should equal \a CRows unless +/// \a CRows is \a Dynamic, and the same for the number of columns. +/// +/// Example: \include MatrixBase_template_int_int_topRightCorner_int_int.cpp +/// Output: \verbinclude MatrixBase_template_int_int_topRightCorner_int_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block +/// +template +inline typename FixedBlockXpr::Type topRightCorner(Index cRows, Index cCols) +{ + return typename FixedBlockXpr::Type(derived(), 0, cols() - cCols, cRows, cCols); +} + +/// This is the const version of topRightCorner(Index, Index). +template +inline const typename ConstFixedBlockXpr::Type topRightCorner(Index cRows, Index cCols) const +{ + return typename ConstFixedBlockXpr::Type(derived(), 0, cols() - cCols, cRows, cCols); +} + + + +/// \returns a dynamic-size expression of a top-left corner of *this. +/// +/// \param cRows the number of rows in the corner +/// \param cCols the number of columns in the corner +/// +/// Example: \include MatrixBase_topLeftCorner_int_int.cpp +/// Output: \verbinclude MatrixBase_topLeftCorner_int_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +EIGEN_DEVICE_FUNC +inline BlockXpr topLeftCorner(Index cRows, Index cCols) +{ + return BlockXpr(derived(), 0, 0, cRows, cCols); +} + +/// This is the const version of topLeftCorner(Index, Index). +EIGEN_DEVICE_FUNC +inline const ConstBlockXpr topLeftCorner(Index cRows, Index cCols) const +{ + return ConstBlockXpr(derived(), 0, 0, cRows, cCols); +} + +/// \returns an expression of a fixed-size top-left corner of *this. +/// +/// The template parameters CRows and CCols are the number of rows and columns in the corner. +/// +/// Example: \include MatrixBase_template_int_int_topLeftCorner.cpp +/// Output: \verbinclude MatrixBase_template_int_int_topLeftCorner.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename FixedBlockXpr::Type topLeftCorner() +{ + return typename FixedBlockXpr::Type(derived(), 0, 0); +} + +/// This is the const version of topLeftCorner(). +template +EIGEN_DEVICE_FUNC +inline const typename ConstFixedBlockXpr::Type topLeftCorner() const +{ + return typename ConstFixedBlockXpr::Type(derived(), 0, 0); +} + +/// \returns an expression of a top-left corner of *this. +/// +/// \tparam CRows number of rows in corner as specified at compile-time +/// \tparam CCols number of columns in corner as specified at compile-time +/// \param cRows number of rows in corner as specified at run-time +/// \param cCols number of columns in corner as specified at run-time +/// +/// This function is mainly useful for corners where the number of rows is specified at compile-time +/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time +/// information should not contradict. In other words, \a cRows should equal \a CRows unless +/// \a CRows is \a Dynamic, and the same for the number of columns. +/// +/// Example: \include MatrixBase_template_int_int_topLeftCorner_int_int.cpp +/// Output: \verbinclude MatrixBase_template_int_int_topLeftCorner_int_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block +/// +template +inline typename FixedBlockXpr::Type topLeftCorner(Index cRows, Index cCols) +{ + return typename FixedBlockXpr::Type(derived(), 0, 0, cRows, cCols); +} + +/// This is the const version of topLeftCorner(Index, Index). +template +inline const typename ConstFixedBlockXpr::Type topLeftCorner(Index cRows, Index cCols) const +{ + return typename ConstFixedBlockXpr::Type(derived(), 0, 0, cRows, cCols); +} + + + +/// \returns a dynamic-size expression of a bottom-right corner of *this. +/// +/// \param cRows the number of rows in the corner +/// \param cCols the number of columns in the corner +/// +/// Example: \include MatrixBase_bottomRightCorner_int_int.cpp +/// Output: \verbinclude MatrixBase_bottomRightCorner_int_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +EIGEN_DEVICE_FUNC +inline BlockXpr bottomRightCorner(Index cRows, Index cCols) +{ + return BlockXpr(derived(), rows() - cRows, cols() - cCols, cRows, cCols); +} + +/// This is the const version of bottomRightCorner(Index, Index). +EIGEN_DEVICE_FUNC +inline const ConstBlockXpr bottomRightCorner(Index cRows, Index cCols) const +{ + return ConstBlockXpr(derived(), rows() - cRows, cols() - cCols, cRows, cCols); +} + +/// \returns an expression of a fixed-size bottom-right corner of *this. +/// +/// The template parameters CRows and CCols are the number of rows and columns in the corner. +/// +/// Example: \include MatrixBase_template_int_int_bottomRightCorner.cpp +/// Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename FixedBlockXpr::Type bottomRightCorner() +{ + return typename FixedBlockXpr::Type(derived(), rows() - CRows, cols() - CCols); +} + +/// This is the const version of bottomRightCorner(). +template +EIGEN_DEVICE_FUNC +inline const typename ConstFixedBlockXpr::Type bottomRightCorner() const +{ + return typename ConstFixedBlockXpr::Type(derived(), rows() - CRows, cols() - CCols); +} + +/// \returns an expression of a bottom-right corner of *this. +/// +/// \tparam CRows number of rows in corner as specified at compile-time +/// \tparam CCols number of columns in corner as specified at compile-time +/// \param cRows number of rows in corner as specified at run-time +/// \param cCols number of columns in corner as specified at run-time +/// +/// This function is mainly useful for corners where the number of rows is specified at compile-time +/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time +/// information should not contradict. In other words, \a cRows should equal \a CRows unless +/// \a CRows is \a Dynamic, and the same for the number of columns. +/// +/// Example: \include MatrixBase_template_int_int_bottomRightCorner_int_int.cpp +/// Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner_int_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block +/// +template +inline typename FixedBlockXpr::Type bottomRightCorner(Index cRows, Index cCols) +{ + return typename FixedBlockXpr::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols); +} + +/// This is the const version of bottomRightCorner(Index, Index). +template +inline const typename ConstFixedBlockXpr::Type bottomRightCorner(Index cRows, Index cCols) const +{ + return typename ConstFixedBlockXpr::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols); +} + + + +/// \returns a dynamic-size expression of a bottom-left corner of *this. +/// +/// \param cRows the number of rows in the corner +/// \param cCols the number of columns in the corner +/// +/// Example: \include MatrixBase_bottomLeftCorner_int_int.cpp +/// Output: \verbinclude MatrixBase_bottomLeftCorner_int_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +EIGEN_DEVICE_FUNC +inline BlockXpr bottomLeftCorner(Index cRows, Index cCols) +{ + return BlockXpr(derived(), rows() - cRows, 0, cRows, cCols); +} + +/// This is the const version of bottomLeftCorner(Index, Index). +EIGEN_DEVICE_FUNC +inline const ConstBlockXpr bottomLeftCorner(Index cRows, Index cCols) const +{ + return ConstBlockXpr(derived(), rows() - cRows, 0, cRows, cCols); +} + +/// \returns an expression of a fixed-size bottom-left corner of *this. +/// +/// The template parameters CRows and CCols are the number of rows and columns in the corner. +/// +/// Example: \include MatrixBase_template_int_int_bottomLeftCorner.cpp +/// Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename FixedBlockXpr::Type bottomLeftCorner() +{ + return typename FixedBlockXpr::Type(derived(), rows() - CRows, 0); +} + +/// This is the const version of bottomLeftCorner(). +template +EIGEN_DEVICE_FUNC +inline const typename ConstFixedBlockXpr::Type bottomLeftCorner() const +{ + return typename ConstFixedBlockXpr::Type(derived(), rows() - CRows, 0); +} + +/// \returns an expression of a bottom-left corner of *this. +/// +/// \tparam CRows number of rows in corner as specified at compile-time +/// \tparam CCols number of columns in corner as specified at compile-time +/// \param cRows number of rows in corner as specified at run-time +/// \param cCols number of columns in corner as specified at run-time +/// +/// This function is mainly useful for corners where the number of rows is specified at compile-time +/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time +/// information should not contradict. In other words, \a cRows should equal \a CRows unless +/// \a CRows is \a Dynamic, and the same for the number of columns. +/// +/// Example: \include MatrixBase_template_int_int_bottomLeftCorner_int_int.cpp +/// Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner_int_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block +/// +template +inline typename FixedBlockXpr::Type bottomLeftCorner(Index cRows, Index cCols) +{ + return typename FixedBlockXpr::Type(derived(), rows() - cRows, 0, cRows, cCols); +} + +/// This is the const version of bottomLeftCorner(Index, Index). +template +inline const typename ConstFixedBlockXpr::Type bottomLeftCorner(Index cRows, Index cCols) const +{ + return typename ConstFixedBlockXpr::Type(derived(), rows() - cRows, 0, cRows, cCols); +} + + + +/// \returns a block consisting of the top rows of *this. +/// +/// \param n the number of rows in the block +/// +/// Example: \include MatrixBase_topRows_int.cpp +/// Output: \verbinclude MatrixBase_topRows_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +EIGEN_DEVICE_FUNC +inline RowsBlockXpr topRows(Index n) +{ + return RowsBlockXpr(derived(), 0, 0, n, cols()); +} + +/// This is the const version of topRows(Index). +EIGEN_DEVICE_FUNC +inline ConstRowsBlockXpr topRows(Index n) const +{ + return ConstRowsBlockXpr(derived(), 0, 0, n, cols()); +} + +/// \returns a block consisting of the top rows of *this. +/// +/// \tparam N the number of rows in the block as specified at compile-time +/// \param n the number of rows in the block as specified at run-time +/// +/// The compile-time and run-time information should not contradict. In other words, +/// \a n should equal \a N unless \a N is \a Dynamic. +/// +/// Example: \include MatrixBase_template_int_topRows.cpp +/// Output: \verbinclude MatrixBase_template_int_topRows.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename NRowsBlockXpr::Type topRows(Index n = N) +{ + return typename NRowsBlockXpr::Type(derived(), 0, 0, n, cols()); +} + +/// This is the const version of topRows(). +template +EIGEN_DEVICE_FUNC +inline typename ConstNRowsBlockXpr::Type topRows(Index n = N) const +{ + return typename ConstNRowsBlockXpr::Type(derived(), 0, 0, n, cols()); +} + + + +/// \returns a block consisting of the bottom rows of *this. +/// +/// \param n the number of rows in the block +/// +/// Example: \include MatrixBase_bottomRows_int.cpp +/// Output: \verbinclude MatrixBase_bottomRows_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +EIGEN_DEVICE_FUNC +inline RowsBlockXpr bottomRows(Index n) +{ + return RowsBlockXpr(derived(), rows() - n, 0, n, cols()); +} + +/// This is the const version of bottomRows(Index). +EIGEN_DEVICE_FUNC +inline ConstRowsBlockXpr bottomRows(Index n) const +{ + return ConstRowsBlockXpr(derived(), rows() - n, 0, n, cols()); +} + +/// \returns a block consisting of the bottom rows of *this. +/// +/// \tparam N the number of rows in the block as specified at compile-time +/// \param n the number of rows in the block as specified at run-time +/// +/// The compile-time and run-time information should not contradict. In other words, +/// \a n should equal \a N unless \a N is \a Dynamic. +/// +/// Example: \include MatrixBase_template_int_bottomRows.cpp +/// Output: \verbinclude MatrixBase_template_int_bottomRows.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename NRowsBlockXpr::Type bottomRows(Index n = N) +{ + return typename NRowsBlockXpr::Type(derived(), rows() - n, 0, n, cols()); +} + +/// This is the const version of bottomRows(). +template +EIGEN_DEVICE_FUNC +inline typename ConstNRowsBlockXpr::Type bottomRows(Index n = N) const +{ + return typename ConstNRowsBlockXpr::Type(derived(), rows() - n, 0, n, cols()); +} + + + +/// \returns a block consisting of a range of rows of *this. +/// +/// \param startRow the index of the first row in the block +/// \param n the number of rows in the block +/// +/// Example: \include DenseBase_middleRows_int.cpp +/// Output: \verbinclude DenseBase_middleRows_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +EIGEN_DEVICE_FUNC +inline RowsBlockXpr middleRows(Index startRow, Index n) +{ + return RowsBlockXpr(derived(), startRow, 0, n, cols()); +} + +/// This is the const version of middleRows(Index,Index). +EIGEN_DEVICE_FUNC +inline ConstRowsBlockXpr middleRows(Index startRow, Index n) const +{ + return ConstRowsBlockXpr(derived(), startRow, 0, n, cols()); +} + +/// \returns a block consisting of a range of rows of *this. +/// +/// \tparam N the number of rows in the block as specified at compile-time +/// \param startRow the index of the first row in the block +/// \param n the number of rows in the block as specified at run-time +/// +/// The compile-time and run-time information should not contradict. In other words, +/// \a n should equal \a N unless \a N is \a Dynamic. +/// +/// Example: \include DenseBase_template_int_middleRows.cpp +/// Output: \verbinclude DenseBase_template_int_middleRows.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename NRowsBlockXpr::Type middleRows(Index startRow, Index n = N) +{ + return typename NRowsBlockXpr::Type(derived(), startRow, 0, n, cols()); +} + +/// This is the const version of middleRows(). +template +EIGEN_DEVICE_FUNC +inline typename ConstNRowsBlockXpr::Type middleRows(Index startRow, Index n = N) const +{ + return typename ConstNRowsBlockXpr::Type(derived(), startRow, 0, n, cols()); +} + + + +/// \returns a block consisting of the left columns of *this. +/// +/// \param n the number of columns in the block +/// +/// Example: \include MatrixBase_leftCols_int.cpp +/// Output: \verbinclude MatrixBase_leftCols_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +EIGEN_DEVICE_FUNC +inline ColsBlockXpr leftCols(Index n) +{ + return ColsBlockXpr(derived(), 0, 0, rows(), n); +} + +/// This is the const version of leftCols(Index). +EIGEN_DEVICE_FUNC +inline ConstColsBlockXpr leftCols(Index n) const +{ + return ConstColsBlockXpr(derived(), 0, 0, rows(), n); +} + +/// \returns a block consisting of the left columns of *this. +/// +/// \tparam N the number of columns in the block as specified at compile-time +/// \param n the number of columns in the block as specified at run-time +/// +/// The compile-time and run-time information should not contradict. In other words, +/// \a n should equal \a N unless \a N is \a Dynamic. +/// +/// Example: \include MatrixBase_template_int_leftCols.cpp +/// Output: \verbinclude MatrixBase_template_int_leftCols.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename NColsBlockXpr::Type leftCols(Index n = N) +{ + return typename NColsBlockXpr::Type(derived(), 0, 0, rows(), n); +} + +/// This is the const version of leftCols(). +template +EIGEN_DEVICE_FUNC +inline typename ConstNColsBlockXpr::Type leftCols(Index n = N) const +{ + return typename ConstNColsBlockXpr::Type(derived(), 0, 0, rows(), n); +} + + + +/// \returns a block consisting of the right columns of *this. +/// +/// \param n the number of columns in the block +/// +/// Example: \include MatrixBase_rightCols_int.cpp +/// Output: \verbinclude MatrixBase_rightCols_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +EIGEN_DEVICE_FUNC +inline ColsBlockXpr rightCols(Index n) +{ + return ColsBlockXpr(derived(), 0, cols() - n, rows(), n); +} + +/// This is the const version of rightCols(Index). +EIGEN_DEVICE_FUNC +inline ConstColsBlockXpr rightCols(Index n) const +{ + return ConstColsBlockXpr(derived(), 0, cols() - n, rows(), n); +} + +/// \returns a block consisting of the right columns of *this. +/// +/// \tparam N the number of columns in the block as specified at compile-time +/// \param n the number of columns in the block as specified at run-time +/// +/// The compile-time and run-time information should not contradict. In other words, +/// \a n should equal \a N unless \a N is \a Dynamic. +/// +/// Example: \include MatrixBase_template_int_rightCols.cpp +/// Output: \verbinclude MatrixBase_template_int_rightCols.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename NColsBlockXpr::Type rightCols(Index n = N) +{ + return typename NColsBlockXpr::Type(derived(), 0, cols() - n, rows(), n); +} + +/// This is the const version of rightCols(). +template +EIGEN_DEVICE_FUNC +inline typename ConstNColsBlockXpr::Type rightCols(Index n = N) const +{ + return typename ConstNColsBlockXpr::Type(derived(), 0, cols() - n, rows(), n); +} + + + +/// \returns a block consisting of a range of columns of *this. +/// +/// \param startCol the index of the first column in the block +/// \param numCols the number of columns in the block +/// +/// Example: \include DenseBase_middleCols_int.cpp +/// Output: \verbinclude DenseBase_middleCols_int.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +EIGEN_DEVICE_FUNC +inline ColsBlockXpr middleCols(Index startCol, Index numCols) +{ + return ColsBlockXpr(derived(), 0, startCol, rows(), numCols); +} + +/// This is the const version of middleCols(Index,Index). +EIGEN_DEVICE_FUNC +inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const +{ + return ConstColsBlockXpr(derived(), 0, startCol, rows(), numCols); +} + +/// \returns a block consisting of a range of columns of *this. +/// +/// \tparam N the number of columns in the block as specified at compile-time +/// \param startCol the index of the first column in the block +/// \param n the number of columns in the block as specified at run-time +/// +/// The compile-time and run-time information should not contradict. In other words, +/// \a n should equal \a N unless \a N is \a Dynamic. +/// +/// Example: \include DenseBase_template_int_middleCols.cpp +/// Output: \verbinclude DenseBase_template_int_middleCols.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename NColsBlockXpr::Type middleCols(Index startCol, Index n = N) +{ + return typename NColsBlockXpr::Type(derived(), 0, startCol, rows(), n); +} + +/// This is the const version of middleCols(). +template +EIGEN_DEVICE_FUNC +inline typename ConstNColsBlockXpr::Type middleCols(Index startCol, Index n = N) const +{ + return typename ConstNColsBlockXpr::Type(derived(), 0, startCol, rows(), n); +} + + + +/// \returns a fixed-size expression of a block in *this. +/// +/// The template parameters \a NRows and \a NCols are the number of +/// rows and columns in the block. +/// +/// \param startRow the first row in the block +/// \param startCol the first column in the block +/// +/// Example: \include MatrixBase_block_int_int.cpp +/// Output: \verbinclude MatrixBase_block_int_int.out +/// +/// \note since block is a templated member, the keyword template has to be used +/// if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +EIGEN_DEVICE_FUNC +inline typename FixedBlockXpr::Type block(Index startRow, Index startCol) +{ + return typename FixedBlockXpr::Type(derived(), startRow, startCol); +} + +/// This is the const version of block<>(Index, Index). */ +template +EIGEN_DEVICE_FUNC +inline const typename ConstFixedBlockXpr::Type block(Index startRow, Index startCol) const +{ + return typename ConstFixedBlockXpr::Type(derived(), startRow, startCol); +} + +/// \returns an expression of a block in *this. +/// +/// \tparam NRows number of rows in block as specified at compile-time +/// \tparam NCols number of columns in block as specified at compile-time +/// \param startRow the first row in the block +/// \param startCol the first column in the block +/// \param blockRows number of rows in block as specified at run-time +/// \param blockCols number of columns in block as specified at run-time +/// +/// This function is mainly useful for blocks where the number of rows is specified at compile-time +/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time +/// information should not contradict. In other words, \a blockRows should equal \a NRows unless +/// \a NRows is \a Dynamic, and the same for the number of columns. +/// +/// Example: \include MatrixBase_template_int_int_block_int_int_int_int.cpp +/// Output: \verbinclude MatrixBase_template_int_int_block_int_int_int_int.cpp +/// +EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +/// +/// \sa class Block, block(Index,Index,Index,Index) +/// +template +inline typename FixedBlockXpr::Type block(Index startRow, Index startCol, + Index blockRows, Index blockCols) +{ + return typename FixedBlockXpr::Type(derived(), startRow, startCol, blockRows, blockCols); +} + +/// This is the const version of block<>(Index, Index, Index, Index). +template +inline const typename ConstFixedBlockXpr::Type block(Index startRow, Index startCol, + Index blockRows, Index blockCols) const +{ + return typename ConstFixedBlockXpr::Type(derived(), startRow, startCol, blockRows, blockCols); +} + +/// \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0. +/// +/// Example: \include MatrixBase_col.cpp +/// Output: \verbinclude MatrixBase_col.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) +/** + * \sa row(), class Block */ +EIGEN_DEVICE_FUNC +inline ColXpr col(Index i) +{ + return ColXpr(derived(), i); +} + +/// This is the const version of col(). +EIGEN_DEVICE_FUNC +inline ConstColXpr col(Index i) const +{ + return ConstColXpr(derived(), i); +} + +/// \returns an expression of the \a i-th row of *this. Note that the numbering starts at 0. +/// +/// Example: \include MatrixBase_row.cpp +/// Output: \verbinclude MatrixBase_row.out +/// +EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) +/** + * \sa col(), class Block */ +EIGEN_DEVICE_FUNC +inline RowXpr row(Index i) +{ + return RowXpr(derived(), i); +} + +/// This is the const version of row(). */ +EIGEN_DEVICE_FUNC +inline ConstRowXpr row(Index i) const +{ + return ConstRowXpr(derived(), i); +} + +/// \returns a dynamic-size expression of a segment (i.e. a vector block) in *this. +/// +/// \only_for_vectors +/// +/// \param start the first coefficient in the segment +/// \param n the number of coefficients in the segment +/// +/// Example: \include MatrixBase_segment_int_int.cpp +/// Output: \verbinclude MatrixBase_segment_int_int.out +/// +/// \note Even though the returned expression has dynamic size, in the case +/// when it is applied to a fixed-size vector, it inherits a fixed maximal size, +/// which means that evaluating it does not cause a dynamic memory allocation. +/// +/// \sa class Block, segment(Index) +/// +EIGEN_DEVICE_FUNC +inline SegmentReturnType segment(Index start, Index n) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return SegmentReturnType(derived(), start, n); +} + + +/// This is the const version of segment(Index,Index). +EIGEN_DEVICE_FUNC +inline ConstSegmentReturnType segment(Index start, Index n) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return ConstSegmentReturnType(derived(), start, n); +} + +/// \returns a dynamic-size expression of the first coefficients of *this. +/// +/// \only_for_vectors +/// +/// \param n the number of coefficients in the segment +/// +/// Example: \include MatrixBase_start_int.cpp +/// Output: \verbinclude MatrixBase_start_int.out +/// +/// \note Even though the returned expression has dynamic size, in the case +/// when it is applied to a fixed-size vector, it inherits a fixed maximal size, +/// which means that evaluating it does not cause a dynamic memory allocation. +/// +/// \sa class Block, block(Index,Index) +/// +EIGEN_DEVICE_FUNC +inline SegmentReturnType head(Index n) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return SegmentReturnType(derived(), 0, n); +} + +/// This is the const version of head(Index). +EIGEN_DEVICE_FUNC +inline ConstSegmentReturnType head(Index n) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return ConstSegmentReturnType(derived(), 0, n); +} + +/// \returns a dynamic-size expression of the last coefficients of *this. +/// +/// \only_for_vectors +/// +/// \param n the number of coefficients in the segment +/// +/// Example: \include MatrixBase_end_int.cpp +/// Output: \verbinclude MatrixBase_end_int.out +/// +/// \note Even though the returned expression has dynamic size, in the case +/// when it is applied to a fixed-size vector, it inherits a fixed maximal size, +/// which means that evaluating it does not cause a dynamic memory allocation. +/// +/// \sa class Block, block(Index,Index) +/// +EIGEN_DEVICE_FUNC +inline SegmentReturnType tail(Index n) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return SegmentReturnType(derived(), this->size() - n, n); +} + +/// This is the const version of tail(Index). +EIGEN_DEVICE_FUNC +inline ConstSegmentReturnType tail(Index n) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return ConstSegmentReturnType(derived(), this->size() - n, n); +} + +/// \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this +/// +/// \only_for_vectors +/// +/// \tparam N the number of coefficients in the segment as specified at compile-time +/// \param start the index of the first element in the segment +/// \param n the number of coefficients in the segment as specified at compile-time +/// +/// The compile-time and run-time information should not contradict. In other words, +/// \a n should equal \a N unless \a N is \a Dynamic. +/// +/// Example: \include MatrixBase_template_int_segment.cpp +/// Output: \verbinclude MatrixBase_template_int_segment.out +/// +/// \sa class Block +/// +template +EIGEN_DEVICE_FUNC +inline typename FixedSegmentReturnType::Type segment(Index start, Index n = N) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return typename FixedSegmentReturnType::Type(derived(), start, n); +} + +/// This is the const version of segment(Index). +template +EIGEN_DEVICE_FUNC +inline typename ConstFixedSegmentReturnType::Type segment(Index start, Index n = N) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return typename ConstFixedSegmentReturnType::Type(derived(), start, n); +} + +/// \returns a fixed-size expression of the first coefficients of *this. +/// +/// \only_for_vectors +/// +/// \tparam N the number of coefficients in the segment as specified at compile-time +/// \param n the number of coefficients in the segment as specified at run-time +/// +/// The compile-time and run-time information should not contradict. In other words, +/// \a n should equal \a N unless \a N is \a Dynamic. +/// +/// Example: \include MatrixBase_template_int_start.cpp +/// Output: \verbinclude MatrixBase_template_int_start.out +/// +/// \sa class Block +/// +template +EIGEN_DEVICE_FUNC +inline typename FixedSegmentReturnType::Type head(Index n = N) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return typename FixedSegmentReturnType::Type(derived(), 0, n); +} + +/// This is the const version of head(). +template +EIGEN_DEVICE_FUNC +inline typename ConstFixedSegmentReturnType::Type head(Index n = N) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return typename ConstFixedSegmentReturnType::Type(derived(), 0, n); +} + +/// \returns a fixed-size expression of the last coefficients of *this. +/// +/// \only_for_vectors +/// +/// \tparam N the number of coefficients in the segment as specified at compile-time +/// \param n the number of coefficients in the segment as specified at run-time +/// +/// The compile-time and run-time information should not contradict. In other words, +/// \a n should equal \a N unless \a N is \a Dynamic. +/// +/// Example: \include MatrixBase_template_int_end.cpp +/// Output: \verbinclude MatrixBase_template_int_end.out +/// +/// \sa class Block +/// +template +EIGEN_DEVICE_FUNC +inline typename FixedSegmentReturnType::Type tail(Index n = N) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return typename FixedSegmentReturnType::Type(derived(), size() - n); +} + +/// This is the const version of tail. +template +EIGEN_DEVICE_FUNC +inline typename ConstFixedSegmentReturnType::Type tail(Index n = N) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return typename ConstFixedSegmentReturnType::Type(derived(), size() - n); +} diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/CommonCwiseBinaryOps.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/CommonCwiseBinaryOps.h new file mode 100644 index 000000000..8b6730ede --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/CommonCwiseBinaryOps.h @@ -0,0 +1,115 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2016 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// This file is a base class plugin containing common coefficient wise functions. + +/** \returns an expression of the difference of \c *this and \a other + * + * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-(). + * + * \sa class CwiseBinaryOp, operator-=() + */ +EIGEN_MAKE_CWISE_BINARY_OP(operator-,difference) + +/** \returns an expression of the sum of \c *this and \a other + * + * \note If you want to add a given scalar to all coefficients, see Cwise::operator+(). + * + * \sa class CwiseBinaryOp, operator+=() + */ +EIGEN_MAKE_CWISE_BINARY_OP(operator+,sum) + +/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other + * + * The template parameter \a CustomBinaryOp is the type of the functor + * of the custom operator (see class CwiseBinaryOp for an example) + * + * Here is an example illustrating the use of custom functors: + * \include class_CwiseBinaryOp.cpp + * Output: \verbinclude class_CwiseBinaryOp.out + * + * \sa class CwiseBinaryOp, operator+(), operator-(), cwiseProduct() + */ +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseBinaryOp +binaryExpr(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other, const CustomBinaryOp& func = CustomBinaryOp()) const +{ + return CwiseBinaryOp(derived(), other.derived(), func); +} + + +#ifndef EIGEN_PARSED_BY_DOXYGEN +EIGEN_MAKE_SCALAR_BINARY_OP(operator*,product) +#else +/** \returns an expression of \c *this scaled by the scalar factor \a scalar + * + * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. + */ +template +const CwiseBinaryOp,Derived,Constant > operator*(const T& scalar) const; +/** \returns an expression of \a expr scaled by the scalar factor \a scalar + * + * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. + */ +template friend +const CwiseBinaryOp,Constant,Derived> operator*(const T& scalar, const StorageBaseType& expr); +#endif + + + +#ifndef EIGEN_PARSED_BY_DOXYGEN +EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(operator/,quotient) +#else +/** \returns an expression of \c *this divided by the scalar value \a scalar + * + * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. + */ +template +const CwiseBinaryOp,Derived,Constant > operator/(const T& scalar) const; +#endif + +/** \returns an expression of the coefficient-wise boolean \b and operator of \c *this and \a other + * + * \warning this operator is for expression of bool only. + * + * Example: \include Cwise_boolean_and.cpp + * Output: \verbinclude Cwise_boolean_and.out + * + * \sa operator||(), select() + */ +template +EIGEN_DEVICE_FUNC +inline const CwiseBinaryOp +operator&&(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + EIGEN_STATIC_ASSERT((internal::is_same::value && internal::is_same::value), + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); + return CwiseBinaryOp(derived(),other.derived()); +} + +/** \returns an expression of the coefficient-wise boolean \b or operator of \c *this and \a other + * + * \warning this operator is for expression of bool only. + * + * Example: \include Cwise_boolean_or.cpp + * Output: \verbinclude Cwise_boolean_or.out + * + * \sa operator&&(), select() + */ +template +EIGEN_DEVICE_FUNC +inline const CwiseBinaryOp +operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + EIGEN_STATIC_ASSERT((internal::is_same::value && internal::is_same::value), + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); + return CwiseBinaryOp(derived(),other.derived()); +} diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/CommonCwiseUnaryOps.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/CommonCwiseUnaryOps.h new file mode 100644 index 000000000..89f4faaac --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/CommonCwiseUnaryOps.h @@ -0,0 +1,163 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// This file is a base class plugin containing common coefficient wise functions. + +#ifndef EIGEN_PARSED_BY_DOXYGEN + +/** \internal the return type of conjugate() */ +typedef typename internal::conditional::IsComplex, + const CwiseUnaryOp, const Derived>, + const Derived& + >::type ConjugateReturnType; +/** \internal the return type of real() const */ +typedef typename internal::conditional::IsComplex, + const CwiseUnaryOp, const Derived>, + const Derived& + >::type RealReturnType; +/** \internal the return type of real() */ +typedef typename internal::conditional::IsComplex, + CwiseUnaryView, Derived>, + Derived& + >::type NonConstRealReturnType; +/** \internal the return type of imag() const */ +typedef CwiseUnaryOp, const Derived> ImagReturnType; +/** \internal the return type of imag() */ +typedef CwiseUnaryView, Derived> NonConstImagReturnType; + +typedef CwiseUnaryOp, const Derived> NegativeReturnType; + +#endif // not EIGEN_PARSED_BY_DOXYGEN + +/// \returns an expression of the opposite of \c *this +/// +EIGEN_DOC_UNARY_ADDONS(operator-,opposite) +/// +EIGEN_DEVICE_FUNC +inline const NegativeReturnType +operator-() const { return NegativeReturnType(derived()); } + + +template struct CastXpr { typedef typename internal::cast_return_type, const Derived> >::type Type; }; + +/// \returns an expression of \c *this with the \a Scalar type casted to +/// \a NewScalar. +/// +/// The template parameter \a NewScalar is the type we are casting the scalars to. +/// +EIGEN_DOC_UNARY_ADDONS(cast,conversion function) +/// +/// \sa class CwiseUnaryOp +/// +template +EIGEN_DEVICE_FUNC +typename CastXpr::Type +cast() const +{ + return typename CastXpr::Type(derived()); +} + +/// \returns an expression of the complex conjugate of \c *this. +/// +EIGEN_DOC_UNARY_ADDONS(conjugate,complex conjugate) +/// +/// \sa Math functions, MatrixBase::adjoint() +EIGEN_DEVICE_FUNC +inline ConjugateReturnType +conjugate() const +{ + return ConjugateReturnType(derived()); +} + +/// \returns a read-only expression of the real part of \c *this. +/// +EIGEN_DOC_UNARY_ADDONS(real,real part function) +/// +/// \sa imag() +EIGEN_DEVICE_FUNC +inline RealReturnType +real() const { return RealReturnType(derived()); } + +/// \returns an read-only expression of the imaginary part of \c *this. +/// +EIGEN_DOC_UNARY_ADDONS(imag,imaginary part function) +/// +/// \sa real() +EIGEN_DEVICE_FUNC +inline const ImagReturnType +imag() const { return ImagReturnType(derived()); } + +/// \brief Apply a unary operator coefficient-wise +/// \param[in] func Functor implementing the unary operator +/// \tparam CustomUnaryOp Type of \a func +/// \returns An expression of a custom coefficient-wise unary operator \a func of *this +/// +/// The function \c ptr_fun() from the C++ standard library can be used to make functors out of normal functions. +/// +/// Example: +/// \include class_CwiseUnaryOp_ptrfun.cpp +/// Output: \verbinclude class_CwiseUnaryOp_ptrfun.out +/// +/// Genuine functors allow for more possibilities, for instance it may contain a state. +/// +/// Example: +/// \include class_CwiseUnaryOp.cpp +/// Output: \verbinclude class_CwiseUnaryOp.out +/// +EIGEN_DOC_UNARY_ADDONS(unaryExpr,unary function) +/// +/// \sa unaryViewExpr, binaryExpr, class CwiseUnaryOp +/// +template +EIGEN_DEVICE_FUNC +inline const CwiseUnaryOp +unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const +{ + return CwiseUnaryOp(derived(), func); +} + +/// \returns an expression of a custom coefficient-wise unary operator \a func of *this +/// +/// The template parameter \a CustomUnaryOp is the type of the functor +/// of the custom unary operator. +/// +/// Example: +/// \include class_CwiseUnaryOp.cpp +/// Output: \verbinclude class_CwiseUnaryOp.out +/// +EIGEN_DOC_UNARY_ADDONS(unaryViewExpr,unary function) +/// +/// \sa unaryExpr, binaryExpr class CwiseUnaryOp +/// +template +EIGEN_DEVICE_FUNC +inline const CwiseUnaryView +unaryViewExpr(const CustomViewOp& func = CustomViewOp()) const +{ + return CwiseUnaryView(derived(), func); +} + +/// \returns a non const expression of the real part of \c *this. +/// +EIGEN_DOC_UNARY_ADDONS(real,real part function) +/// +/// \sa imag() +EIGEN_DEVICE_FUNC +inline NonConstRealReturnType +real() { return NonConstRealReturnType(derived()); } + +/// \returns a non const expression of the imaginary part of \c *this. +/// +EIGEN_DOC_UNARY_ADDONS(imag,imaginary part function) +/// +/// \sa real() +EIGEN_DEVICE_FUNC +inline NonConstImagReturnType +imag() { return NonConstImagReturnType(derived()); } diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h new file mode 100644 index 000000000..f1084abef --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h @@ -0,0 +1,152 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// This file is a base class plugin containing matrix specifics coefficient wise functions. + +/** \returns an expression of the Schur product (coefficient wise product) of *this and \a other + * + * Example: \include MatrixBase_cwiseProduct.cpp + * Output: \verbinclude MatrixBase_cwiseProduct.out + * + * \sa class CwiseBinaryOp, cwiseAbs2 + */ +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product) +cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived()); +} + +/** \returns an expression of the coefficient-wise == operator of *this and \a other + * + * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. + * In order to check for equality between two vectors or matrices with floating-point coefficients, it is + * generally a far better idea to use a fuzzy comparison as provided by isApprox() and + * isMuchSmallerThan(). + * + * Example: \include MatrixBase_cwiseEqual.cpp + * Output: \verbinclude MatrixBase_cwiseEqual.out + * + * \sa cwiseNotEqual(), isApprox(), isMuchSmallerThan() + */ +template +EIGEN_DEVICE_FUNC +inline const CwiseBinaryOp, const Derived, const OtherDerived> +cwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); +} + +/** \returns an expression of the coefficient-wise != operator of *this and \a other + * + * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. + * In order to check for equality between two vectors or matrices with floating-point coefficients, it is + * generally a far better idea to use a fuzzy comparison as provided by isApprox() and + * isMuchSmallerThan(). + * + * Example: \include MatrixBase_cwiseNotEqual.cpp + * Output: \verbinclude MatrixBase_cwiseNotEqual.out + * + * \sa cwiseEqual(), isApprox(), isMuchSmallerThan() + */ +template +EIGEN_DEVICE_FUNC +inline const CwiseBinaryOp, const Derived, const OtherDerived> +cwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); +} + +/** \returns an expression of the coefficient-wise min of *this and \a other + * + * Example: \include MatrixBase_cwiseMin.cpp + * Output: \verbinclude MatrixBase_cwiseMin.out + * + * \sa class CwiseBinaryOp, max() + */ +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> +cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); +} + +/** \returns an expression of the coefficient-wise min of *this and scalar \a other + * + * \sa class CwiseBinaryOp, min() + */ +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const ConstantReturnType> +cwiseMin(const Scalar &other) const +{ + return cwiseMin(Derived::Constant(rows(), cols(), other)); +} + +/** \returns an expression of the coefficient-wise max of *this and \a other + * + * Example: \include MatrixBase_cwiseMax.cpp + * Output: \verbinclude MatrixBase_cwiseMax.out + * + * \sa class CwiseBinaryOp, min() + */ +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> +cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); +} + +/** \returns an expression of the coefficient-wise max of *this and scalar \a other + * + * \sa class CwiseBinaryOp, min() + */ +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const ConstantReturnType> +cwiseMax(const Scalar &other) const +{ + return cwiseMax(Derived::Constant(rows(), cols(), other)); +} + + +/** \returns an expression of the coefficient-wise quotient of *this and \a other + * + * Example: \include MatrixBase_cwiseQuotient.cpp + * Output: \verbinclude MatrixBase_cwiseQuotient.out + * + * \sa class CwiseBinaryOp, cwiseProduct(), cwiseInverse() + */ +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> +cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); +} + +typedef CwiseBinaryOp, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType; + +/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s + * + * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. + * In order to check for equality between two vectors or matrices with floating-point coefficients, it is + * generally a far better idea to use a fuzzy comparison as provided by isApprox() and + * isMuchSmallerThan(). + * + * \sa cwiseEqual(const MatrixBase &) const + */ +EIGEN_DEVICE_FUNC +inline const CwiseScalarEqualReturnType +cwiseEqual(const Scalar& s) const +{ + return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op()); +} diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h new file mode 100644 index 000000000..b1be3d566 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h @@ -0,0 +1,85 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// This file is included into the body of the base classes supporting matrix specific coefficient-wise functions. +// This include MatrixBase and SparseMatrixBase. + + +typedef CwiseUnaryOp, const Derived> CwiseAbsReturnType; +typedef CwiseUnaryOp, const Derived> CwiseAbs2ReturnType; +typedef CwiseUnaryOp, const Derived> CwiseSqrtReturnType; +typedef CwiseUnaryOp, const Derived> CwiseSignReturnType; +typedef CwiseUnaryOp, const Derived> CwiseInverseReturnType; + +/// \returns an expression of the coefficient-wise absolute value of \c *this +/// +/// Example: \include MatrixBase_cwiseAbs.cpp +/// Output: \verbinclude MatrixBase_cwiseAbs.out +/// +EIGEN_DOC_UNARY_ADDONS(cwiseAbs,absolute value) +/// +/// \sa cwiseAbs2() +/// +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseAbsReturnType +cwiseAbs() const { return CwiseAbsReturnType(derived()); } + +/// \returns an expression of the coefficient-wise squared absolute value of \c *this +/// +/// Example: \include MatrixBase_cwiseAbs2.cpp +/// Output: \verbinclude MatrixBase_cwiseAbs2.out +/// +EIGEN_DOC_UNARY_ADDONS(cwiseAbs2,squared absolute value) +/// +/// \sa cwiseAbs() +/// +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseAbs2ReturnType +cwiseAbs2() const { return CwiseAbs2ReturnType(derived()); } + +/// \returns an expression of the coefficient-wise square root of *this. +/// +/// Example: \include MatrixBase_cwiseSqrt.cpp +/// Output: \verbinclude MatrixBase_cwiseSqrt.out +/// +EIGEN_DOC_UNARY_ADDONS(cwiseSqrt,square-root) +/// +/// \sa cwisePow(), cwiseSquare() +/// +EIGEN_DEVICE_FUNC +inline const CwiseSqrtReturnType +cwiseSqrt() const { return CwiseSqrtReturnType(derived()); } + +/// \returns an expression of the coefficient-wise signum of *this. +/// +/// Example: \include MatrixBase_cwiseSign.cpp +/// Output: \verbinclude MatrixBase_cwiseSign.out +/// +EIGEN_DOC_UNARY_ADDONS(cwiseSign,sign function) +/// +EIGEN_DEVICE_FUNC +inline const CwiseSignReturnType +cwiseSign() const { return CwiseSignReturnType(derived()); } + + +/// \returns an expression of the coefficient-wise inverse of *this. +/// +/// Example: \include MatrixBase_cwiseInverse.cpp +/// Output: \verbinclude MatrixBase_cwiseInverse.out +/// +EIGEN_DOC_UNARY_ADDONS(cwiseInverse,inverse) +/// +/// \sa cwiseProduct() +/// +EIGEN_DEVICE_FUNC +inline const CwiseInverseReturnType +cwiseInverse() const { return CwiseInverseReturnType(derived()); } + + diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/signature_of_eigen3_matrix_library b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/signature_of_eigen3_matrix_library new file mode 100644 index 000000000..80aaf4621 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/signature_of_eigen3_matrix_library @@ -0,0 +1 @@ +This file is just there as a signature to help identify directories containing Eigen3. When writing a script looking for Eigen3, just look for this file. This is especially useful to help disambiguate with Eigen2... diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/AdolcForward b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/AdolcForward new file mode 100644 index 000000000..15f5f0731 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/AdolcForward @@ -0,0 +1,156 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ADLOC_FORWARD +#define EIGEN_ADLOC_FORWARD + +//-------------------------------------------------------------------------------- +// +// This file provides support for adolc's adouble type in forward mode. +// ADOL-C is a C++ automatic differentiation library, +// see https://projects.coin-or.org/ADOL-C for more information. +// +// Note that the maximal number of directions is controlled by +// the preprocessor token NUMBER_DIRECTIONS. The default is 2. +// +//-------------------------------------------------------------------------------- + +#define ADOLC_TAPELESS +#ifndef NUMBER_DIRECTIONS +# define NUMBER_DIRECTIONS 2 +#endif +#include + +// adolc defines some very stupid macros: +#if defined(malloc) +# undef malloc +#endif + +#if defined(calloc) +# undef calloc +#endif + +#if defined(realloc) +# undef realloc +#endif + +#include + +namespace Eigen { + +/** + * \defgroup AdolcForward_Module Adolc forward module + * This module provides support for adolc's adouble type in forward mode. + * ADOL-C is a C++ automatic differentiation library, + * see https://projects.coin-or.org/ADOL-C for more information. + * It mainly consists in: + * - a struct Eigen::NumTraits specialization + * - overloads of internal::* math function for adtl::adouble type. + * + * Note that the maximal number of directions is controlled by + * the preprocessor token NUMBER_DIRECTIONS. The default is 2. + * + * \code + * #include + * \endcode + */ + //@{ + +} // namespace Eigen + +// Eigen's require a few additional functions which must be defined in the same namespace +// than the custom scalar type own namespace +namespace adtl { + +inline const adouble& conj(const adouble& x) { return x; } +inline const adouble& real(const adouble& x) { return x; } +inline adouble imag(const adouble&) { return 0.; } +inline adouble abs(const adouble& x) { return fabs(x); } +inline adouble abs2(const adouble& x) { return x*x; } + +} + +namespace Eigen { + +template<> struct NumTraits + : NumTraits +{ + typedef adtl::adouble Real; + typedef adtl::adouble NonInteger; + typedef adtl::adouble Nested; + enum { + IsComplex = 0, + IsInteger = 0, + IsSigned = 1, + RequireInitialization = 1, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; +}; + +template class AdolcForwardJacobian : public Functor +{ + typedef adtl::adouble ActiveScalar; +public: + + AdolcForwardJacobian() : Functor() {} + AdolcForwardJacobian(const Functor& f) : Functor(f) {} + + // forward constructors + template + AdolcForwardJacobian(const T0& a0) : Functor(a0) {} + template + AdolcForwardJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {} + template + AdolcForwardJacobian(const T0& a0, const T1& a1, const T1& a2) : Functor(a0, a1, a2) {} + + typedef typename Functor::InputType InputType; + typedef typename Functor::ValueType ValueType; + typedef typename Functor::JacobianType JacobianType; + + typedef Matrix ActiveInput; + typedef Matrix ActiveValue; + + void operator() (const InputType& x, ValueType* v, JacobianType* _jac) const + { + eigen_assert(v!=0); + if (!_jac) + { + Functor::operator()(x, v); + return; + } + + JacobianType& jac = *_jac; + + ActiveInput ax = x.template cast(); + ActiveValue av(jac.rows()); + + for (int j=0; j +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ALIGNED_VECTOR3 +#define EIGEN_ALIGNED_VECTOR3 + +#include + +namespace Eigen { + +/** + * \defgroup AlignedVector3_Module Aligned vector3 module + * + * \code + * #include + * \endcode + */ + //@{ + + +/** \class AlignedVector3 + * + * \brief A vectorization friendly 3D vector + * + * This class represents a 3D vector internally using a 4D vector + * such that vectorization can be seamlessly enabled. Of course, + * the same result can be achieved by directly using a 4D vector. + * This class makes this process simpler. + * + */ +// TODO specialize Cwise +template class AlignedVector3; + +namespace internal { +template struct traits > + : traits > +{ +}; +} + +template class AlignedVector3 + : public MatrixBase > +{ + typedef Matrix<_Scalar,4,1> CoeffType; + CoeffType m_coeffs; + public: + + typedef MatrixBase > Base; + EIGEN_DENSE_PUBLIC_INTERFACE(AlignedVector3) + using Base::operator*; + + inline Index rows() const { return 3; } + inline Index cols() const { return 1; } + + Scalar* data() { return m_coeffs.data(); } + const Scalar* data() const { return m_coeffs.data(); } + Index innerStride() const { return 1; } + Index outerStride() const { return 3; } + + inline const Scalar& coeff(Index row, Index col) const + { return m_coeffs.coeff(row, col); } + + inline Scalar& coeffRef(Index row, Index col) + { return m_coeffs.coeffRef(row, col); } + + inline const Scalar& coeff(Index index) const + { return m_coeffs.coeff(index); } + + inline Scalar& coeffRef(Index index) + { return m_coeffs.coeffRef(index);} + + + inline AlignedVector3(const Scalar& x, const Scalar& y, const Scalar& z) + : m_coeffs(x, y, z, Scalar(0)) + {} + + inline AlignedVector3(const AlignedVector3& other) + : Base(), m_coeffs(other.m_coeffs) + {} + + template + struct generic_assign_selector {}; + + template struct generic_assign_selector + { + inline static void run(AlignedVector3& dest, const XprType& src) + { + dest.m_coeffs = src; + } + }; + + template struct generic_assign_selector + { + inline static void run(AlignedVector3& dest, const XprType& src) + { + dest.m_coeffs.template head<3>() = src; + dest.m_coeffs.w() = Scalar(0); + } + }; + + template + inline AlignedVector3(const MatrixBase& other) + { + generic_assign_selector::run(*this,other.derived()); + } + + inline AlignedVector3& operator=(const AlignedVector3& other) + { m_coeffs = other.m_coeffs; return *this; } + + template + inline AlignedVector3& operator=(const MatrixBase& other) + { + generic_assign_selector::run(*this,other.derived()); + return *this; + } + + inline AlignedVector3 operator+(const AlignedVector3& other) const + { return AlignedVector3(m_coeffs + other.m_coeffs); } + + inline AlignedVector3& operator+=(const AlignedVector3& other) + { m_coeffs += other.m_coeffs; return *this; } + + inline AlignedVector3 operator-(const AlignedVector3& other) const + { return AlignedVector3(m_coeffs - other.m_coeffs); } + + inline AlignedVector3 operator-=(const AlignedVector3& other) + { m_coeffs -= other.m_coeffs; return *this; } + + inline AlignedVector3 operator*(const Scalar& s) const + { return AlignedVector3(m_coeffs * s); } + + inline friend AlignedVector3 operator*(const Scalar& s,const AlignedVector3& vec) + { return AlignedVector3(s * vec.m_coeffs); } + + inline AlignedVector3& operator*=(const Scalar& s) + { m_coeffs *= s; return *this; } + + inline AlignedVector3 operator/(const Scalar& s) const + { return AlignedVector3(m_coeffs / s); } + + inline AlignedVector3& operator/=(const Scalar& s) + { m_coeffs /= s; return *this; } + + inline Scalar dot(const AlignedVector3& other) const + { + eigen_assert(m_coeffs.w()==Scalar(0)); + eigen_assert(other.m_coeffs.w()==Scalar(0)); + return m_coeffs.dot(other.m_coeffs); + } + + inline void normalize() + { + m_coeffs /= norm(); + } + + inline AlignedVector3 normalized() const + { + return AlignedVector3(m_coeffs / norm()); + } + + inline Scalar sum() const + { + eigen_assert(m_coeffs.w()==Scalar(0)); + return m_coeffs.sum(); + } + + inline Scalar squaredNorm() const + { + eigen_assert(m_coeffs.w()==Scalar(0)); + return m_coeffs.squaredNorm(); + } + + inline Scalar norm() const + { + using std::sqrt; + return sqrt(squaredNorm()); + } + + inline AlignedVector3 cross(const AlignedVector3& other) const + { + return AlignedVector3(m_coeffs.cross3(other.m_coeffs)); + } + + template + inline bool isApprox(const MatrixBase& other, const RealScalar& eps=NumTraits::dummy_precision()) const + { + return m_coeffs.template head<3>().isApprox(other,eps); + } + + CoeffType& coeffs() { return m_coeffs; } + const CoeffType& coeffs() const { return m_coeffs; } +}; + +namespace internal { + +template +struct eval, Dense> +{ + typedef const AlignedVector3<_Scalar>& type; +}; + +template +struct evaluator > + : evaluator > +{ + typedef AlignedVector3 XprType; + typedef evaluator > Base; + + evaluator(const XprType &m) : Base(m.coeffs()) {} +}; + +} + +//@} + +} + +#endif // EIGEN_ALIGNED_VECTOR3 diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/ArpackSupport b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/ArpackSupport new file mode 100644 index 000000000..37a2799ef --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/ArpackSupport @@ -0,0 +1,31 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARPACKSUPPORT_MODULE_H +#define EIGEN_ARPACKSUPPORT_MODULE_H + +#include + +#include + +/** \defgroup ArpackSupport_Module Arpack support module + * + * This module provides a wrapper to Arpack, a library for sparse eigenvalue decomposition. + * + * \code + * #include + * \endcode + */ + +#include +#include "src/Eigenvalues/ArpackSelfAdjointEigenSolver.h" + +#include + +#endif // EIGEN_ARPACKSUPPORT_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/AutoDiff b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/AutoDiff new file mode 100644 index 000000000..abf5b7d67 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/AutoDiff @@ -0,0 +1,40 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_AUTODIFF_MODULE +#define EIGEN_AUTODIFF_MODULE + +namespace Eigen { + +/** + * \defgroup AutoDiff_Module Auto Diff module + * + * This module features forward automatic differentation via a simple + * templated scalar type wrapper AutoDiffScalar. + * + * Warning : this should NOT be confused with numerical differentiation, which + * is a different method and has its own module in Eigen : \ref NumericalDiff_Module. + * + * \code + * #include + * \endcode + */ +//@{ + +} + +#include "src/AutoDiff/AutoDiffScalar.h" +// #include "src/AutoDiff/AutoDiffVector.h" +#include "src/AutoDiff/AutoDiffJacobian.h" + +namespace Eigen { +//@} +} + +#endif // EIGEN_AUTODIFF_MODULE diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/BVH b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/BVH new file mode 100644 index 000000000..0161a5402 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/BVH @@ -0,0 +1,95 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Ilya Baran +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BVH_MODULE_H +#define EIGEN_BVH_MODULE_H + +#include +#include +#include +#include +#include + +namespace Eigen { + +/** + * \defgroup BVH_Module BVH module + * \brief This module provides generic bounding volume hierarchy algorithms + * and reference tree implementations. + * + * + * \code + * #include + * \endcode + * + * A bounding volume hierarchy (BVH) can accelerate many geometric queries. This module provides a generic implementation + * of the two basic algorithms over a BVH: intersection of a query object against all objects in the hierarchy and minimization + * of a function over the objects in the hierarchy. It also provides intersection and minimization over a cartesian product of + * two BVH's. A BVH accelerates intersection by using the fact that if a query object does not intersect a volume, then it cannot + * intersect any object contained in that volume. Similarly, a BVH accelerates minimization because the minimum of a function + * over a volume is no greater than the minimum of a function over any object contained in it. + * + * Some sample queries that can be written in terms of intersection are: + * - Determine all points where a ray intersects a triangle mesh + * - Given a set of points, determine which are contained in a query sphere + * - Given a set of spheres, determine which contain the query point + * - Given a set of disks, determine if any is completely contained in a query rectangle (represent each 2D disk as a point \f$(x,y,r)\f$ + * in 3D and represent the rectangle as a pyramid based on the original rectangle and shrinking in the \f$r\f$ direction) + * - Given a set of points, count how many pairs are \f$d\pm\epsilon\f$ apart (done by looking at the cartesian product of the set + * of points with itself) + * + * Some sample queries that can be written in terms of function minimization over a set of objects are: + * - Find the intersection between a ray and a triangle mesh closest to the ray origin (function is infinite off the ray) + * - Given a polyline and a query point, determine the closest point on the polyline to the query + * - Find the diameter of a point cloud (done by looking at the cartesian product and using negative distance as the function) + * - Determine how far two meshes are from colliding (this is also a cartesian product query) + * + * This implementation decouples the basic algorithms both from the type of hierarchy (and the types of the bounding volumes) and + * from the particulars of the query. To enable abstraction from the BVH, the BVH is required to implement a generic mechanism + * for traversal. To abstract from the query, the query is responsible for keeping track of results. + * + * To be used in the algorithms, a hierarchy must implement the following traversal mechanism (see KdBVH for a sample implementation): \code + typedef Volume //the type of bounding volume + typedef Object //the type of object in the hierarchy + typedef Index //a reference to a node in the hierarchy--typically an int or a pointer + typedef VolumeIterator //an iterator type over node children--returns Index + typedef ObjectIterator //an iterator over object (leaf) children--returns const Object & + Index getRootIndex() const //returns the index of the hierarchy root + const Volume &getVolume(Index index) const //returns the bounding volume of the node at given index + void getChildren(Index index, VolumeIterator &outVBegin, VolumeIterator &outVEnd, + ObjectIterator &outOBegin, ObjectIterator &outOEnd) const + //getChildren takes a node index and makes [outVBegin, outVEnd) range over its node children + //and [outOBegin, outOEnd) range over its object children + \endcode + * + * To use the hierarchy, call BVIntersect or BVMinimize, passing it a BVH (or two, for cartesian product) and a minimizer or intersector. + * For an intersection query on a single BVH, the intersector encapsulates the query and must provide two functions: + * \code + bool intersectVolume(const Volume &volume) //returns true if the query intersects the volume + bool intersectObject(const Object &object) //returns true if the intersection search should terminate immediately + \endcode + * The guarantee that BVIntersect provides is that intersectObject will be called on every object whose bounding volume + * intersects the query (but possibly on other objects too) unless the search is terminated prematurely. It is the + * responsibility of the intersectObject function to keep track of the results in whatever manner is appropriate. + * The cartesian product intersection and the BVMinimize queries are similar--see their individual documentation. + * + * The following is a simple but complete example for how to use the BVH to accelerate the search for a closest red-blue point pair: + * \include BVH_Example.cpp + * Output: \verbinclude BVH_Example.out + */ +} + +//@{ + +#include "src/BVH/BVAlgorithms.h" +#include "src/BVH/KdBVH.h" + +//@} + +#endif // EIGEN_BVH_MODULE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/Tensor b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/Tensor new file mode 100644 index 000000000..bb6523d15 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/Tensor @@ -0,0 +1,154 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +//#ifndef EIGEN_CXX11_TENSOR_MODULE +//#define EIGEN_CXX11_TENSOR_MODULE + +#include "../../../Eigen/Core" + +#ifdef EIGEN_USE_SYCL +#undef min +#undef max +#undef isnan +#undef isinf +#undef isfinite +#include +#include +#include +#include +#endif + +#include + +#include "../SpecialFunctions" +#include "src/util/CXX11Meta.h" +#include "src/util/MaxSizeVector.h" + +/** \defgroup CXX11_Tensor_Module Tensor Module + * + * This module provides a Tensor class for storing arbitrarily indexed + * objects. + * + * \code + * #include + * \endcode + * + * Much of the documentation can be found \ref eigen_tensors "here". + */ + +#include +#include +#include + +#ifdef _WIN32 +typedef __int16 int16_t; +typedef unsigned __int16 uint16_t; +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +#else +#include +#endif + +#if __cplusplus > 199711 || EIGEN_COMP_MSVC >= 1900 +#include +#endif + +#ifdef _WIN32 +#include +#elif defined(__APPLE__) +#include +#else +#include +#endif + +#ifdef EIGEN_USE_THREADS +#include "ThreadPool" +#endif + +#ifdef EIGEN_USE_GPU +#include +#include +#if __cplusplus >= 201103L +#include +#include +#endif +#endif + +#include "src/Tensor/TensorMacros.h" +#include "src/Tensor/TensorForwardDeclarations.h" +#include "src/Tensor/TensorMeta.h" +#include "src/Tensor/TensorFunctors.h" +#include "src/Tensor/TensorCostModel.h" +#include "src/Tensor/TensorDeviceDefault.h" +#include "src/Tensor/TensorDeviceThreadPool.h" +#include "src/Tensor/TensorDeviceCuda.h" +#include "src/Tensor/TensorDeviceSycl.h" +#include "src/Tensor/TensorIndexList.h" +#include "src/Tensor/TensorDimensionList.h" +#include "src/Tensor/TensorDimensions.h" +#include "src/Tensor/TensorInitializer.h" +#include "src/Tensor/TensorTraits.h" +#include "src/Tensor/TensorRandom.h" +#include "src/Tensor/TensorUInt128.h" +#include "src/Tensor/TensorIntDiv.h" +#include "src/Tensor/TensorGlobalFunctions.h" + +#include "src/Tensor/TensorBase.h" + +#include "src/Tensor/TensorEvaluator.h" +#include "src/Tensor/TensorExpr.h" +#include "src/Tensor/TensorReduction.h" +#include "src/Tensor/TensorReductionCuda.h" +#include "src/Tensor/TensorArgMax.h" +#include "src/Tensor/TensorConcatenation.h" +#include "src/Tensor/TensorContractionMapper.h" +#include "src/Tensor/TensorContractionBlocking.h" +#include "src/Tensor/TensorContraction.h" +#include "src/Tensor/TensorContractionThreadPool.h" +#include "src/Tensor/TensorContractionCuda.h" +#include "src/Tensor/TensorConversion.h" +#include "src/Tensor/TensorConvolution.h" +#include "src/Tensor/TensorFFT.h" +#include "src/Tensor/TensorPatch.h" +#include "src/Tensor/TensorImagePatch.h" +#include "src/Tensor/TensorVolumePatch.h" +#include "src/Tensor/TensorBroadcasting.h" +#include "src/Tensor/TensorChipping.h" +#include "src/Tensor/TensorInflation.h" +#include "src/Tensor/TensorLayoutSwap.h" +#include "src/Tensor/TensorMorphing.h" +#include "src/Tensor/TensorPadding.h" +#include "src/Tensor/TensorReverse.h" +#include "src/Tensor/TensorShuffling.h" +#include "src/Tensor/TensorStriding.h" +#include "src/Tensor/TensorCustomOp.h" +#include "src/Tensor/TensorEvalTo.h" +#include "src/Tensor/TensorForcedEval.h" +#include "src/Tensor/TensorGenerator.h" +#include "src/Tensor/TensorAssign.h" +#include "src/Tensor/TensorScan.h" + +#include "src/Tensor/TensorSycl.h" +#include "src/Tensor/TensorExecutor.h" +#include "src/Tensor/TensorDevice.h" + +#include "src/Tensor/TensorStorage.h" +#include "src/Tensor/Tensor.h" +#include "src/Tensor/TensorFixedSize.h" +#include "src/Tensor/TensorMap.h" +#include "src/Tensor/TensorRef.h" + +#include "src/Tensor/TensorIO.h" + +#include + +//#endif // EIGEN_CXX11_TENSOR_MODULE diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/TensorSymmetry b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/TensorSymmetry new file mode 100644 index 000000000..fb1b0c0fb --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/TensorSymmetry @@ -0,0 +1,42 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSORSYMMETRY_MODULE +#define EIGEN_CXX11_TENSORSYMMETRY_MODULE + +#include + +#include + +#include "src/util/CXX11Meta.h" + +/** \defgroup CXX11_TensorSymmetry_Module Tensor Symmetry Module + * + * This module provides a classes that allow for the definition of + * symmetries w.r.t. tensor indices. + * + * Including this module will implicitly include the Tensor module. + * + * \code + * #include + * \endcode + */ + +#include "src/TensorSymmetry/util/TemplateGroupTheory.h" +#include "src/TensorSymmetry/Symmetry.h" +#include "src/TensorSymmetry/StaticSymmetry.h" +#include "src/TensorSymmetry/DynamicSymmetry.h" + +#include + +#endif // EIGEN_CXX11_TENSORSYMMETRY_MODULE + +/* + * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; + */ diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/ThreadPool b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/ThreadPool new file mode 100644 index 000000000..09d637e9a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/ThreadPool @@ -0,0 +1,65 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_MODULE +#define EIGEN_CXX11_THREADPOOL_MODULE + +#include "../../../Eigen/Core" + +#include + +/** \defgroup CXX11_ThreadPool_Module C++11 ThreadPool Module + * + * This module provides 2 threadpool implementations + * - a simple reference implementation + * - a faster non blocking implementation + * + * This module requires C++11. + * + * \code + * #include + * \endcode + */ + + +// The code depends on CXX11, so only include the module if the +// compiler supports it. +#if __cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900 +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "src/util/CXX11Meta.h" +#include "src/util/MaxSizeVector.h" + +#include "src/ThreadPool/ThreadLocal.h" +#include "src/ThreadPool/ThreadYield.h" +#include "src/ThreadPool/EventCount.h" +#include "src/ThreadPool/RunQueue.h" +#include "src/ThreadPool/ThreadPoolInterface.h" +#include "src/ThreadPool/ThreadEnvironment.h" +#include "src/ThreadPool/SimpleThreadPool.h" +#include "src/ThreadPool/NonBlockingThreadPool.h" + +#endif + +#include + +#endif // EIGEN_CXX11_THREADPOOL_MODULE + diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/Tensor.h new file mode 100644 index 000000000..00295a255 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/Tensor.h @@ -0,0 +1,527 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_H + +namespace Eigen { + +/** \class Tensor + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor class. + * + * The %Tensor class is the work-horse for all \em dense tensors within Eigen. + * + * The %Tensor class encompasses only dynamic-size objects so far. + * + * The first two template parameters are required: + * \tparam Scalar_ Numeric type, e.g. float, double, int or `std::complex`. + * User defined scalar types are supported as well (see \ref user_defined_scalars "here"). + * \tparam NumIndices_ Number of indices (i.e. rank of the tensor) + * + * The remaining template parameters are optional -- in most cases you don't have to worry about them. + * \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of either + * \b #AutoAlign or \b #DontAlign. + * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required + * for vectorization. It defaults to aligning tensors. Note that tensors currently do not support any operations that profit from vectorization. + * Support for such operations (i.e. adding two tensors etc.) is planned. + * + * You can access elements of tensors using normal subscripting: + * + * \code + * Eigen::Tensor t(10, 10, 10, 10); + * t(0, 1, 2, 3) = 42.0; + * \endcode + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_TENSOR_PLUGIN. + * + * Some notes: + * + *
+ *
Relation to other parts of Eigen:
+ *
The midterm development goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that + * taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code + * by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor + * class does not provide any of these features and is only available as a stand-alone class that just allows for + * coefficient access. Also, when fixed-size tensors are implemented, the number of template arguments is likely to + * change dramatically.
+ *
+ * + * \ref TopicStorageOrders + */ + +template +class Tensor : public TensorBase > +{ + public: + typedef Tensor Self; + typedef TensorBase > Base; + typedef typename Eigen::internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef Scalar_ Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + + enum { + IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign), + Layout = Options_ & RowMajor ? RowMajor : ColMajor, + CoordAccess = true, + RawAccess = true + }; + + static const int Options = Options_; + static const int NumIndices = NumIndices_; + typedef DSizes Dimensions; + + protected: + TensorStorage m_storage; + +#ifdef EIGEN_HAS_SFINAE + template + struct isOfNormalIndex{ + static const bool is_array = internal::is_base_of, CustomIndices>::value; + static const bool is_int = NumTraits::IsInteger; + static const bool value = is_array | is_int; + }; +#endif + + public: + // Metadata + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } + + // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + // work, because that uses base().coeffRef() - and we don't yet + // implement a similar class hierarchy + inline Self& base() { return *this; } + inline const Self& base() const { return *this; } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeff(array{{firstIndex, secondIndex, otherIndices...}}); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array& indices) const + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const + { + return coeff(internal::customIndices2Array(indices)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(array{{firstIndex, secondIndex, otherIndices...}}); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array& indices) + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices) + { + return coeffRef(internal::customIndices2Array(indices)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return this->operator()(array{{firstIndex, secondIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const + { + return coeff(array(i0, i1)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const + { + return coeff(array(i0, i1, i2)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const + { + return coeff(array(i0, i1, i2, i3)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + return coeff(array(i0, i1, i2, i3, i4)); + } +#endif + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const + { + return coeff(internal::customIndices2Array(indices)); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array& indices) const + { + return coeff(indices); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return coeff(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const + { + // The bracket operator is only for vectors, use the parenthesis operator instead. + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(index); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return operator()(array{{firstIndex, secondIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1) + { + return coeffRef(array(i0, i1)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2) + { + return coeffRef(array(i0, i1, i2)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) + { + return coeffRef(array(i0, i1, i2, i3)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) + { + return coeffRef(array(i0, i1, i2, i3, i4)); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array& indices) + { + return coeffRef(indices); + } + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices) + { + return coeffRef(internal::customIndices2Array(indices)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index) + { + eigen_assert(index >= 0 && index < size()); + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeffRef(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index) + { + // The bracket operator is only for vectors, use the parenthesis operator instead + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor() + : m_storage() + { + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(const Self& other) + : m_storage(other.m_storage) + { + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions) + : m_storage(firstDimension, otherDimensions...) + { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#else + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1) + : m_storage(dim1, array(dim1)) + { + EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2) + : m_storage(dim1*dim2, array(dim1, dim2)) + { + EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3) + : m_storage(dim1*dim2*dim3, array(dim1, dim2, dim3)) + { + EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4) + : m_storage(dim1*dim2*dim3*dim4, array(dim1, dim2, dim3, dim4)) + { + EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) + : m_storage(dim1*dim2*dim3*dim4*dim5, array(dim1, dim2, dim3, dim4, dim5)) + { + EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#endif + + /** Normal Dimension */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array& dimensions) + : m_storage(internal::array_prod(dimensions), dimensions) + { + EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + void resize(Index firstDimension, IndexTypes... otherDimensions) + { + // The number of dimensions used to resize a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + resize(array{{firstDimension, otherDimensions...}}); + } +#endif + + /** Normal Dimension */ + EIGEN_DEVICE_FUNC void resize(const array& dimensions) + { + int i; + Index size = Index(1); + for (i = 0; i < NumIndices; i++) { + internal::check_rows_cols_for_overflow::run(size, dimensions[i]); + size *= dimensions[i]; + } + #ifdef EIGEN_INITIALIZE_COEFFS + bool size_changed = size != this->size(); + m_storage.resize(size, dimensions); + if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + #else + m_storage.resize(size, dimensions); + #endif + } + + // Why this overload, DSizes is derived from array ??? // + EIGEN_DEVICE_FUNC void resize(const DSizes& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = dimensions[i]; + } + resize(dims); + } + + EIGEN_DEVICE_FUNC + void resize() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + // Nothing to do: rank 0 tensors have fixed size + } + + /** Custom Dimension */ +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions) + { + resize(internal::customIndices2Array(dimensions)); + } +#endif + +#ifndef EIGEN_EMULATE_CXX11_META_H + template + EIGEN_DEVICE_FUNC + void resize(const Sizes& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = static_cast(dimensions[i]); + } + resize(dims); + } +#else + template + EIGEN_DEVICE_FUNC + void resize(const Sizes& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = static_cast(dimensions[i]); + } + resize(dims); + } +#endif + + protected: + + bool checkIndexRange(const array& indices) const + { + using internal::array_apply_and_reduce; + using internal::array_zip_and_reduce; + using internal::greater_equal_zero_op; + using internal::logical_and_op; + using internal::lesser_op; + + return + // check whether the indices are all >= 0 + array_apply_and_reduce(indices) && + // check whether the indices fit in the dimensions + array_zip_and_reduce(indices, m_storage.dimensions()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array& indices) const + { + if (Options&RowMajor) { + return m_storage.dimensions().IndexOfRowMajor(indices); + } else { + return m_storage.dimensions().IndexOfColMajor(indices); + } + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h new file mode 100644 index 000000000..d06f40cd8 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h @@ -0,0 +1,299 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Eugene Brevdo +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H +#define EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H + +namespace Eigen { +namespace internal { + +/** \class TensorIndexTuple + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor + Index Tuple class. + * + * + */ +template +struct traits > : public traits +{ + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef Tuple Scalar; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorIndexTupleOp& type; +}; + +template +struct nested, 1, + typename eval >::type> +{ + typedef TensorIndexTupleOp type; +}; + +} // end namespace internal + +template +class TensorIndexTupleOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + typedef Tuple CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIndexTupleOp(const XprType& expr) + : m_xpr(expr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorIndexTupleOp XprType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + typedef typename TensorEvaluator::Dimensions Dimensions; + static const int NumDims = internal::array_size::value; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/ false, + PacketAccess = /*TensorEvaluator::PacketAccess*/ false, + BlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_impl.dimensions(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return CoeffReturnType(index, m_impl.coeff(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, 1); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + protected: + TensorEvaluator m_impl; +}; + +namespace internal { + +/** \class TensorTupleIndex + * \ingroup CXX11_Tensor_Module + * + * \brief Converts to Tensor > and reduces to Tensor. + * + */ +template +struct traits > : public traits +{ + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef Index Scalar; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions - array_size::value; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorTupleReducerOp& type; +}; + +template +struct nested, 1, + typename eval >::type> +{ + typedef TensorTupleReducerOp type; +}; + +} // end namespace internal + +template +class TensorTupleReducerOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + typedef Index CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTupleReducerOp(const XprType& expr, + const ReduceOp& reduce_op, + const int return_dim, + const Dims& reduce_dims) + : m_xpr(expr), m_reduce_op(reduce_op), m_return_dim(return_dim), m_reduce_dims(reduce_dims) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC + const ReduceOp& reduce_op() const { return m_reduce_op; } + + EIGEN_DEVICE_FUNC + const Dims& reduce_dims() const { return m_reduce_dims; } + + EIGEN_DEVICE_FUNC + int return_dim() const { return m_return_dim; } + + protected: + typename XprType::Nested m_xpr; + const ReduceOp m_reduce_op; + const int m_return_dim; + const Dims m_reduce_dims; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorTupleReducerOp XprType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename TensorIndexTupleOp::CoeffReturnType TupleType; + typedef typename TensorEvaluator >, Device>::Dimensions Dimensions; + typedef typename TensorEvaluator , Device>::Dimensions InputDimensions; + static const int NumDims = internal::array_size::value; + typedef array StrideDims; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/ false, + PacketAccess = /*TensorEvaluator::PacketAccess*/ false, + BlockAccess = false, + Layout = TensorEvaluator >, Device>::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_orig_impl(op.expression(), device), + m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device), + m_return_dim(op.return_dim()) { + + gen_strides(m_orig_impl.dimensions(), m_strides); + if (Layout == static_cast(ColMajor)) { + const Index total_size = internal::array_prod(m_orig_impl.dimensions()); + m_stride_mod = (m_return_dim < NumDims - 1) ? m_strides[m_return_dim + 1] : total_size; + } else { + const Index total_size = internal::array_prod(m_orig_impl.dimensions()); + m_stride_mod = (m_return_dim > 0) ? m_strides[m_return_dim - 1] : total_size; + } + m_stride_div = m_strides[m_return_dim]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_impl.dimensions(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + const TupleType v = m_impl.coeff(index); + return (m_return_dim < 0) ? v.first : (v.first % m_stride_mod) / m_stride_div; + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double compute_cost = 1.0 + + (m_return_dim < 0 ? 0.0 : (TensorOpCost::ModCost() + TensorOpCost::DivCost())); + return m_orig_impl.costPerCoeff(vectorized) + + m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost); + } + + private: + EIGEN_DEVICE_FUNC void gen_strides(const InputDimensions& dims, StrideDims& strides) { + if (m_return_dim < 0) { + return; // Won't be using the strides. + } + eigen_assert(m_return_dim < NumDims && + "Asking to convert index to a dimension outside of the rank"); + + // Calculate m_stride_div and m_stride_mod, which are used to + // calculate the value of an index w.r.t. the m_return_dim. + if (Layout == static_cast(ColMajor)) { + strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + strides[i] = strides[i-1] * dims[i-1]; + } + } else { + strides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + strides[i] = strides[i+1] * dims[i+1]; + } + } + } + + protected: + TensorEvaluator, Device> m_orig_impl; + TensorEvaluator >, Device> m_impl; + const int m_return_dim; + StrideDims m_strides; + Index m_stride_mod; + Index m_stride_div; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h new file mode 100644 index 000000000..166be200c --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h @@ -0,0 +1,181 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H +#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H + +namespace Eigen { + +/** \class TensorAssign + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor assignment class. + * + * This class is represents the assignment of the values resulting from the evaluation of + * the rhs expression to the memory locations denoted by the lhs expression. + */ +namespace internal { +template +struct traits > +{ + typedef typename LhsXprType::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const std::size_t NumDimensions = internal::traits::NumDimensions; + static const int Layout = internal::traits::Layout; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorAssignOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorAssignOp type; +}; + +} // end namespace internal + + + +template +class TensorAssignOp : public TensorBase > +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename LhsXprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs) {} + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + typename internal::remove_all::type& + lhsExpression() const { return *((typename internal::remove_all::type*)&m_lhs_xpr); } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename internal::remove_all::type& m_lhs_xpr; + const typename internal::remove_all::type& m_rhs_xpr; +}; + + +template +struct TensorEvaluator, Device> +{ + typedef TensorAssignOp XprType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename TensorEvaluator::Dimensions Dimensions; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + RawAccess = TensorEvaluator::RawAccess + }; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) : + m_leftImpl(op.lhsExpression(), device), + m_rightImpl(op.rhsExpression(), device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + } + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // The dimensions of the lhs and the rhs tensors should be equal to prevent + // overflows and ensure the result is fully initialized. + // TODO: use left impl instead if right impl dimensions are known at compile time. + return m_rightImpl.dimensions(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { + eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions())); + m_leftImpl.evalSubExprsIfNeeded(NULL); + // If the lhs provides raw access to its storage area (i.e. if m_leftImpl.data() returns a non + // null value), attempt to evaluate the rhs expression in place. Returns true iff in place + // evaluation isn't supported and the caller still needs to manually assign the values generated + // by the rhs to the lhs. + return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) { + m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) { + const int LhsStoreMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; + const int RhsLoadMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; + m_leftImpl.template writePacket(i, m_rightImpl.template packet(i)); + } + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_leftImpl.coeff(index); + } + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + { + return m_leftImpl.template packet(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + // We assume that evalPacket or evalScalar is called to perform the + // assignment and account for the cost of the write here, but reduce left + // cost by one load because we are using m_leftImpl.coeffRef. + TensorOpCost left = m_leftImpl.costPerCoeff(vectorized); + return m_rightImpl.costPerCoeff(vectorized) + + TensorOpCost( + numext::maxi(0.0, left.bytes_loaded() - sizeof(CoeffReturnType)), + left.bytes_stored(), left.compute_cycles()) + + TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize); + } + + /// required by sycl in order to extract the accessor + const TensorEvaluator& left_impl() const { return m_leftImpl; } + /// required by sycl in order to extract the accessor + const TensorEvaluator& right_impl() const { return m_rightImpl; } + + EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_leftImpl.data(); } + + private: + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; +}; + +} + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h new file mode 100644 index 000000000..f573608d9 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h @@ -0,0 +1,1012 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_BASE_H +#define EIGEN_CXX11_TENSOR_TENSOR_BASE_H + +// clang-format off + +namespace Eigen { + +/** \class TensorBase + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor base class. + * + * This class is the common parent of the Tensor and TensorMap class, thus + * making it possible to use either class interchangably in expressions. + */ +#ifndef EIGEN_PARSED_BY_DOXYGEN +// FIXME Doxygen does not like the inheritance with different template parameters +// Since there is no doxygen documentation inside, we disable it for now +template +class TensorBase +{ + public: + typedef internal::traits DerivedTraits; + typedef typename DerivedTraits::Scalar Scalar; + typedef typename DerivedTraits::Index Index; + typedef typename internal::remove_const::type CoeffReturnType; + static const int NumDimensions = DerivedTraits::NumDimensions; + + // Generic nullary operation support. + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp + nullaryExpr(const CustomNullaryOp& func) const { + return TensorCwiseNullaryOp(derived(), func); + } + + // Coefficient-wise nullary operators + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp, const Derived> + constant(const Scalar& value) const { + return nullaryExpr(internal::scalar_constant_op(value)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp, const Derived> + random() const { + return nullaryExpr(internal::UniformRandomGenerator()); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp + random(const RandomGenerator& gen = RandomGenerator()) const { + return nullaryExpr(gen); + } + + // Tensor generation + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorGeneratorOp + generate(const Generator& generator) const { + return TensorGeneratorOp(derived(), generator); + } + + // Generic unary operation support. + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp + unaryExpr(const CustomUnaryOp& func) const { + return TensorCwiseUnaryOp(derived(), func); + } + + // Coefficient-wise unary operators + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + operator-() const { + return unaryExpr(internal::scalar_opposite_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + sqrt() const { + return unaryExpr(internal::scalar_sqrt_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + sign() const { + return unaryExpr(internal::scalar_sign_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + rsqrt() const { + return unaryExpr(internal::scalar_rsqrt_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + square() const { + return unaryExpr(internal::scalar_square_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + cube() const { + return unaryExpr(internal::scalar_cube_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + inverse() const { + return unaryExpr(internal::scalar_inverse_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + tanh() const { + return unaryExpr(internal::scalar_tanh_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + lgamma() const { + return unaryExpr(internal::scalar_lgamma_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + digamma() const { + return unaryExpr(internal::scalar_digamma_op()); + } + + // igamma(a = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + igamma(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_igamma_op()); + } + + // igammac(a = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + igammac(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_igammac_op()); + } + + // zeta(x = this, q = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + zeta(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_zeta_op()); + } + + // polygamma(n = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + polygamma(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_polygamma_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + erf() const { + return unaryExpr(internal::scalar_erf_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + erfc() const { + return unaryExpr(internal::scalar_erfc_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + sigmoid() const { + return unaryExpr(internal::scalar_sigmoid_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + exp() const { + return unaryExpr(internal::scalar_exp_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + log() const { + return unaryExpr(internal::scalar_log_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + log1p() const { + return unaryExpr(internal::scalar_log1p_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + abs() const { + return unaryExpr(internal::scalar_abs_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + conjugate() const { + return unaryExpr(internal::scalar_conjugate_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + pow(Scalar exponent) const { + return unaryExpr(internal::bind2nd_op >(exponent)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + real() const { + return unaryExpr(internal::scalar_real_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + imag() const { + return unaryExpr(internal::scalar_imag_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator+ (Scalar rhs) const { + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator+ (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator- (Scalar rhs) const { + EIGEN_STATIC_ASSERT((NumTraits::IsSigned || internal::is_same >::value), YOU_MADE_A_PROGRAMMING_MISTAKE); + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator- (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator* (Scalar rhs) const { + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator* (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator/ (Scalar rhs) const { + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator/ (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + operator% (Scalar rhs) const { + EIGEN_STATIC_ASSERT(NumTraits::IsInteger, YOU_MADE_A_PROGRAMMING_MISTAKE_TRY_MOD); + return unaryExpr(internal::scalar_mod_op(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + cwiseMax(Scalar threshold) const { + return cwiseMax(constant(threshold)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + cwiseMin(Scalar threshold) const { + return cwiseMin(constant(threshold)); + } + + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorConversionOp + cast() const { + return TensorConversionOp(derived()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + round() const { + return unaryExpr(internal::scalar_round_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + ceil() const { + return unaryExpr(internal::scalar_ceil_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + floor() const { + return unaryExpr(internal::scalar_floor_op()); + } + + // Generic binary operation support. + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp + binaryExpr(const OtherDerived& other, const CustomBinaryOp& func) const { + return TensorCwiseBinaryOp(derived(), other, func); + } + + // Coefficient-wise binary operators. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator+(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_sum_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator-(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_difference_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator*(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_product_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator/(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_quotient_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + cwiseMax(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_max_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + cwiseMin(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_min_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp + operator&&(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_boolean_and_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp + operator||(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_boolean_or_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp + operator^(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_boolean_xor_op()); + } + + // Comparisons and tests. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator<(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator<=(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator>(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator>=(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator==(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator!=(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + + // comparisons and tests for Scalars + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator<(Scalar threshold) const { + return operator<(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator<=(Scalar threshold) const { + return operator<=(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator>(Scalar threshold) const { + return operator>(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator>=(Scalar threshold) const { + return operator>=(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator==(Scalar threshold) const { + return operator==(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator!=(Scalar threshold) const { + return operator!=(constant(threshold)); + } + + // Checks + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + (isnan)() const { + return unaryExpr(internal::scalar_isnan_op()); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + (isinf)() const { + return unaryExpr(internal::scalar_isinf_op()); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + (isfinite)() const { + return unaryExpr(internal::scalar_isfinite_op()); + } + + // Coefficient-wise ternary operators. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorSelectOp + select(const ThenDerived& thenTensor, const ElseDerived& elseTensor) const { + return TensorSelectOp(derived(), thenTensor.derived(), elseTensor.derived()); + } + + // Contractions. + typedef Eigen::IndexPair DimensionPair; + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorContractionOp + contract(const OtherDerived& other, const Dimensions& dims) const { + return TensorContractionOp(derived(), other.derived(), dims); + } + + // Convolutions. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorConvolutionOp + convolve(const KernelDerived& kernel, const Dimensions& dims) const { + return TensorConvolutionOp(derived(), kernel.derived(), dims); + } + + // Fourier transforms + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorFFTOp + fft(const FFT& fft) const { + return TensorFFTOp(derived(), fft); + } + + // Scan. + typedef TensorScanOp, const Derived> TensorScanSumOp; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorScanSumOp + cumsum(const Index& axis, bool exclusive = false) const { + return TensorScanSumOp(derived(), axis, exclusive); + } + + typedef TensorScanOp, const Derived> TensorScanProdOp; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorScanProdOp + cumprod(const Index& axis, bool exclusive = false) const { + return TensorScanProdOp(derived(), axis, exclusive); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorScanOp + scan(const Index& axis, const Reducer& reducer, bool exclusive = false) const { + return TensorScanOp(derived(), axis, exclusive, reducer); + } + + // Reductions. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + sum(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::SumReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + sum() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::SumReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + mean(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::MeanReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + mean() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::MeanReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + prod(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::ProdReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + prod() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::ProdReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + maximum(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::MaxReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + maximum() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::MaxReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + minimum(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::MinReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + minimum() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::MinReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp > + all(const Dims& dims) const { + return cast().reduce(dims, internal::AndReducer()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const TensorConversionOp > + all() const { + DimensionList in_dims; + return cast().reduce(in_dims, internal::AndReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp > + any(const Dims& dims) const { + return cast().reduce(dims, internal::OrReducer()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const TensorConversionOp > + any() const { + DimensionList in_dims; + return cast().reduce(in_dims, internal::OrReducer()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, const Derived> + argmax() const { + array in_dims; + for (int d = 0; d < NumDimensions; ++d) in_dims[d] = d; + return TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMaxTupleReducer >(), -1, in_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, const Derived> + argmin() const { + array in_dims; + for (int d = 0; d < NumDimensions; ++d) in_dims[d] = d; + return TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMinTupleReducer >(), -1, in_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, const Derived> + argmax(const int return_dim) const { + array in_dims; + in_dims[0] = return_dim; + return TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMaxTupleReducer >(), return_dim, in_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, const Derived> + argmin(const int return_dim) const { + array in_dims; + in_dims[0] = return_dim; + return TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMinTupleReducer >(), return_dim, in_dims); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp + reduce(const Dims& dims, const Reducer& reducer) const { + return TensorReductionOp(derived(), dims, reducer); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorBroadcastingOp + broadcast(const Broadcast& broadcast) const { + return TensorBroadcastingOp(derived(), broadcast); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorConcatenationOp + concatenate(const OtherDerived& other, Axis axis) const { + return TensorConcatenationOp(derived(), other.derived(), axis); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorPatchOp + extract_patches(const PatchDims& patch_dims) const { + return TensorPatchOp(derived(), patch_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorImagePatchOp + extract_image_patches(const Index patch_rows = 1, const Index patch_cols = 1, + const Index row_stride = 1, const Index col_stride = 1, + const Index in_row_stride = 1, const Index in_col_stride = 1, + const PaddingType padding_type = PADDING_SAME, const Scalar padding_value = Scalar(0)) const { + return TensorImagePatchOp(derived(), patch_rows, patch_cols, row_stride, col_stride, + in_row_stride, in_col_stride, 1, 1, padding_type, padding_value); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorImagePatchOp + extract_image_patches(const Index patch_rows, const Index patch_cols, + const Index row_stride, const Index col_stride, + const Index in_row_stride, const Index in_col_stride, + const Index row_inflate_stride, const Index col_inflate_stride, + const Index padding_top, const Index padding_bottom, + const Index padding_left,const Index padding_right, + const Scalar padding_value) const { + return TensorImagePatchOp(derived(), patch_rows, patch_cols, row_stride, col_stride, + in_row_stride, in_col_stride, row_inflate_stride, col_inflate_stride, + padding_top, padding_bottom, padding_left, padding_right, padding_value); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorVolumePatchOp + extract_volume_patches(const Index patch_planes, const Index patch_rows, const Index patch_cols, + const Index plane_stride = 1, const Index row_stride = 1, const Index col_stride = 1, + const PaddingType padding_type = PADDING_SAME, const Scalar padding_value = Scalar(0)) const { + return TensorVolumePatchOp(derived(), patch_planes, patch_rows, patch_cols, plane_stride, row_stride, col_stride, 1, 1, 1, 1, 1, 1, padding_type, padding_value); + } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorVolumePatchOp + extract_volume_patches(const Index patch_planes, const Index patch_rows, const Index patch_cols, + const Index plane_stride, const Index row_stride, const Index col_stride, + const Index plane_inflate_stride, const Index row_inflate_stride, const Index col_inflate_stride, + const Index padding_top_z, const Index padding_bottom_z, + const Index padding_top, const Index padding_bottom, + const Index padding_left, const Index padding_right, const Scalar padding_value = Scalar(0)) const { + return TensorVolumePatchOp(derived(), patch_planes, patch_rows, patch_cols, plane_stride, row_stride, col_stride, 1, 1, 1, plane_inflate_stride, row_inflate_stride, col_inflate_stride, padding_top_z, padding_bottom_z, padding_top, padding_bottom, padding_left, padding_right, padding_value); + } + + // Morphing operators. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorLayoutSwapOp + swap_layout() const { + return TensorLayoutSwapOp(derived()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReshapingOp + reshape(const NewDimensions& newDimensions) const { + return TensorReshapingOp(derived(), newDimensions); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorSlicingOp + slice(const StartIndices& startIndices, const Sizes& sizes) const { + return TensorSlicingOp(derived(), startIndices, sizes); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingSlicingOp + stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) const { + return TensorStridingSlicingOp(derived(), startIndices, stopIndices, strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset) const { + return TensorChippingOp(derived(), offset, DimId); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset, const Index dim) const { + return TensorChippingOp(derived(), offset, dim); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReverseOp + reverse(const ReverseDimensions& rev) const { + return TensorReverseOp(derived(), rev); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorPaddingOp + pad(const PaddingDimensions& padding) const { + return TensorPaddingOp(derived(), padding, internal::scalar_cast_op()(0)); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorPaddingOp + pad(const PaddingDimensions& padding, const Scalar padding_value) const { + return TensorPaddingOp(derived(), padding, padding_value); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorShufflingOp + shuffle(const Shuffle& shuffle) const { + return TensorShufflingOp(derived(), shuffle); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingOp + stride(const Strides& strides) const { + return TensorStridingOp(derived(), strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorInflationOp + inflate(const Strides& strides) const { + return TensorInflationOp(derived(), strides); + } + + // Returns a tensor containing index/value tuples + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorIndexTupleOp + index_tuples() const { + return TensorIndexTupleOp(derived()); + } + + // Support for custom unary and binary operations + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCustomUnaryOp customOp(const CustomUnaryFunc& op) const { + return TensorCustomUnaryOp(derived(), op); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCustomBinaryOp customOp(const OtherDerived& other, const CustomBinaryFunc& op) const { + return TensorCustomBinaryOp(derived(), other, op); + } + + // Force the evaluation of the expression. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorForcedEvalOp eval() const { + return TensorForcedEvalOp(derived()); + } + + protected: + template friend class Tensor; + template friend class TensorFixedSize; + template friend class TensorBase; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast(this); } +}; + +template::value> +class TensorBase : public TensorBase { + public: + typedef internal::traits DerivedTraits; + typedef typename DerivedTraits::Scalar Scalar; + typedef typename DerivedTraits::Index Index; + typedef Scalar CoeffReturnType; + static const int NumDimensions = DerivedTraits::NumDimensions; + + template friend class Tensor; + template friend class TensorFixedSize; + template friend class TensorBase; + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setZero() { + return setConstant(Scalar(0)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setConstant(const Scalar& val) { + return derived() = this->constant(val); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setRandom() { + return derived() = this->random(); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setRandom() { + return derived() = this->template random(); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setValues( + const typename internal::Initializer::InitList& vals) { + TensorEvaluator eval(derived(), DefaultDevice()); + internal::initialize_tensor(eval, vals); + return derived(); + } +#endif // EIGEN_HAS_VARIADIC_TEMPLATES + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator+=(const OtherDerived& other) { + return derived() = derived() + other.derived(); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator-=(const OtherDerived& other) { + return derived() = derived() - other.derived(); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator*=(const OtherDerived& other) { + return derived() = derived() * other.derived(); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator/=(const OtherDerived& other) { + return derived() = derived() / other.derived(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorLayoutSwapOp + swap_layout() const { + return TensorLayoutSwapOp(derived()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorLayoutSwapOp + swap_layout() { + return TensorLayoutSwapOp(derived()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorConcatenationOp + concatenate(const OtherDerived& other, const Axis& axis) const { + return TensorConcatenationOp(derived(), other, axis); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorConcatenationOp + concatenate(const OtherDerived& other, const Axis& axis) { + return TensorConcatenationOp(derived(), other, axis); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReshapingOp + reshape(const NewDimensions& newDimensions) const { + return TensorReshapingOp(derived(), newDimensions); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReshapingOp + reshape(const NewDimensions& newDimensions) { + return TensorReshapingOp(derived(), newDimensions); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorSlicingOp + slice(const StartIndices& startIndices, const Sizes& sizes) const { + return TensorSlicingOp(derived(), startIndices, sizes); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorSlicingOp + slice(const StartIndices& startIndices, const Sizes& sizes) { + return TensorSlicingOp(derived(), startIndices, sizes); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingSlicingOp + stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) const { + return TensorStridingSlicingOp(derived(), startIndices, stopIndices, strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorStridingSlicingOp + stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) { + return TensorStridingSlicingOp(derived(), startIndices, stopIndices, strides); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset) const { + return TensorChippingOp(derived(), offset, DimId); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorChippingOp + chip(const Index offset) { + return TensorChippingOp(derived(), offset, DimId); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset, const Index dim) const { + return TensorChippingOp(derived(), offset, dim); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorChippingOp + chip(const Index offset, const Index dim) { + return TensorChippingOp(derived(), offset, dim); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReverseOp + reverse(const ReverseDimensions& rev) const { + return TensorReverseOp(derived(), rev); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReverseOp + reverse(const ReverseDimensions& rev) { + return TensorReverseOp(derived(), rev); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorShufflingOp + shuffle(const Shuffle& shuffle) const { + return TensorShufflingOp(derived(), shuffle); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorShufflingOp + shuffle(const Shuffle& shuffle) { + return TensorShufflingOp(derived(), shuffle); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingOp + stride(const Strides& strides) const { + return TensorStridingOp(derived(), strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorStridingOp + stride(const Strides& strides) { + return TensorStridingOp(derived(), strides); + } + + // Select the device on which to evaluate the expression. + template + TensorDevice device(const DeviceType& device) { + return TensorDevice(device, derived()); + } + + protected: + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& derived() { return *static_cast(this); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast(this); } +}; +#endif // EIGEN_PARSED_BY_DOXYGEN +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_BASE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h new file mode 100644 index 000000000..4cfe300eb --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h @@ -0,0 +1,392 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H +#define EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H + +namespace Eigen { + +/** \class TensorBroadcasting + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor broadcasting class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorBroadcastingOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorBroadcastingOp type; +}; + +template +struct is_input_scalar { + static const bool value = false; +}; +template <> +struct is_input_scalar > { + static const bool value = true; +}; +#ifndef EIGEN_EMULATE_CXX11_META_H +template +struct is_input_scalar > { + static const bool value = (Sizes::total_size == 1); +}; +#endif + +} // end namespace internal + + + +template +class TensorBroadcastingOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBroadcastingOp(const XprType& expr, const Broadcast& broadcast) + : m_xpr(expr), m_broadcast(broadcast) {} + + EIGEN_DEVICE_FUNC + const Broadcast& broadcast() const { return m_broadcast; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Broadcast m_broadcast; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorBroadcastingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename TensorEvaluator::Dimensions InputDimensions; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = true, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_broadcast(op.broadcast()),m_impl(op.expression(), device) + { + // The broadcasting op doesn't change the rank of the tensor. One can't broadcast a scalar + // and store the result in a scalar. Instead one should reshape the scalar into a a N-D + // tensor with N >= 1 of 1 element first and then broadcast. + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + const InputDimensions& input_dims = m_impl.dimensions(); + const Broadcast& broadcast = op.broadcast(); + for (int i = 0; i < NumDims; ++i) { + eigen_assert(input_dims[i] > 0); + m_dimensions[i] = input_dims[i] * broadcast[i]; + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = 1; + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + } + } else { + m_inputStrides[NumDims-1] = 1; + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims-2; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const + { + if (internal::is_input_scalar::type>::value) { + return m_impl.coeff(0); + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + return coeffColMajor(index); + } else { + return coeffRowMajor(index); + } + } + + // TODO: attempt to speed this up. The integer divisions and modulo are slow + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffColMajor(Index index) const + { + Index inputIndex = 0; + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index < m_impl.dimensions()[0]); + inputIndex += index; + } else { + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index % m_impl.dimensions()[0] == 0); + } else { + inputIndex += (index % m_impl.dimensions()[0]); + } + } + return m_impl.coeff(inputIndex); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffRowMajor(Index index) const + { + Index inputIndex = 0; + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + if (internal::index_statically_eq(NumDims-1, 1)) { + eigen_assert(index < m_impl.dimensions()[NumDims-1]); + inputIndex += index; + } else { + if (internal::index_statically_eq(NumDims-1, 1)) { + eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0); + } else { + inputIndex += (index % m_impl.dimensions()[NumDims-1]); + } + } + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const + { + if (internal::is_input_scalar::type>::value) { + return internal::pset1(m_impl.coeff(0)); + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + return packetColMajor(index); + } else { + return packetRowMajor(index); + } + } + + // Ignore the LoadMode and always use unaligned loads since we can't guarantee + // the alignment at compile time. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index originalIndex = index; + + Index inputIndex = 0; + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + Index innermostLoc; + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index < m_impl.dimensions()[0]); + innermostLoc = index; + } else { + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index % m_impl.dimensions()[0] == 0); + innermostLoc = 0; + } else { + innermostLoc = index % m_impl.dimensions()[0]; + } + } + inputIndex += innermostLoc; + + // Todo: this could be extended to the second dimension if we're not + // broadcasting alongside the first dimension, and so on. + if (innermostLoc + PacketSize <= m_impl.dimensions()[0]) { + return m_impl.template packet(inputIndex); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + values[0] = m_impl.coeff(inputIndex); + for (int i = 1; i < PacketSize; ++i) { + values[i] = coeffColMajor(originalIndex+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index originalIndex = index; + + Index inputIndex = 0; + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + Index innermostLoc; + if (internal::index_statically_eq(NumDims-1, 1)) { + eigen_assert(index < m_impl.dimensions()[NumDims-1]); + innermostLoc = index; + } else { + if (internal::index_statically_eq(NumDims-1, 1)) { + eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0); + innermostLoc = 0; + } else { + innermostLoc = index % m_impl.dimensions()[NumDims-1]; + } + } + inputIndex += innermostLoc; + + // Todo: this could be extended to the second dimension if we're not + // broadcasting alongside the first dimension, and so on. + if (innermostLoc + PacketSize <= m_impl.dimensions()[NumDims-1]) { + return m_impl.template packet(inputIndex); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + values[0] = m_impl.coeff(inputIndex); + for (int i = 1; i < PacketSize; ++i) { + values[i] = coeffRowMajor(originalIndex+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + double compute_cost = TensorOpCost::AddCost(); + if (NumDims > 0) { + for (int i = NumDims - 1; i > 0; --i) { + compute_cost += TensorOpCost::DivCost(); + if (internal::index_statically_eq(i, 1)) { + compute_cost += + TensorOpCost::MulCost() + TensorOpCost::AddCost(); + } else { + if (!internal::index_statically_eq(i, 1)) { + compute_cost += TensorOpCost::MulCost() + + TensorOpCost::ModCost() + + TensorOpCost::AddCost(); + } + } + compute_cost += + TensorOpCost::MulCost() + TensorOpCost::AddCost(); + } + } + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + const TensorEvaluator& impl() const { return m_impl; } + + Broadcast functor() const { return m_broadcast; } + + protected: + const Broadcast m_broadcast; + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h new file mode 100644 index 000000000..1ba7ef170 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h @@ -0,0 +1,384 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H +#define EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H + +namespace Eigen { + +/** \class TensorKChippingReshaping + * \ingroup CXX11_Tensor_Module + * + * \brief A chip is a thin slice, corresponding to a column or a row in a 2-d tensor. + * + * + */ + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions - 1; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorChippingOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorChippingOp type; +}; + +template +struct DimensionId +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) { + eigen_assert(dim == DimId); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const { + return DimId; + } +}; +template <> +struct DimensionId +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) : actual_dim(dim) { + eigen_assert(dim >= 0); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const { + return actual_dim; + } + private: + const DenseIndex actual_dim; +}; + + +} // end namespace internal + + + +template +class TensorChippingOp : public TensorBase > +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset, const Index dim) + : m_xpr(expr), m_offset(offset), m_dim(dim) { + } + + EIGEN_DEVICE_FUNC + const Index offset() const { return m_offset; } + EIGEN_DEVICE_FUNC + const Index dim() const { return m_dim.actualDim(); } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorChippingOp& operator = (const TensorChippingOp& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorChippingOp& operator = (const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + protected: + typename XprType::Nested m_xpr; + const Index m_offset; + const internal::DimensionId m_dim; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorChippingOp XprType; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims-1; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + + enum { + // Alignment can't be guaranteed at compile time since it depends on the + // slice offsets. + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_dim(op.dim()), m_device(device) + { + EIGEN_STATIC_ASSERT((NumInputDims >= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(NumInputDims > m_dim.actualDim()); + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + eigen_assert(op.offset() < input_dims[m_dim.actualDim()]); + + int j = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (i != m_dim.actualDim()) { + m_dimensions[j] = input_dims[i]; + ++j; + } + } + + m_stride = 1; + m_inputStride = 1; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < m_dim.actualDim(); ++i) { + m_stride *= input_dims[i]; + m_inputStride *= input_dims[i]; + } + } else { + for (int i = NumInputDims-1; i > m_dim.actualDim(); --i) { + m_stride *= input_dims[i]; + m_inputStride *= input_dims[i]; + } + } + m_inputStride *= input_dims[m_dim.actualDim()]; + m_inputOffset = m_stride * op.offset(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(srcCoeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + if ((static_cast(Layout) == static_cast(ColMajor) && m_dim.actualDim() == 0) || + (static_cast(Layout) == static_cast(RowMajor) && m_dim.actualDim() == NumInputDims-1)) { + // m_stride is equal to 1, so let's avoid the integer division. + eigen_assert(m_stride == 1); + Index inputIndex = index * m_inputStride + m_inputOffset; + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + for (int i = 0; i < PacketSize; ++i) { + values[i] = m_impl.coeff(inputIndex); + inputIndex += m_inputStride; + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } else if ((static_cast(Layout) == static_cast(ColMajor) && m_dim.actualDim() == NumInputDims - 1) || + (static_cast(Layout) == static_cast(RowMajor) && m_dim.actualDim() == 0)) { + // m_stride is aways greater than index, so let's avoid the integer division. + eigen_assert(m_stride > index); + return m_impl.template packet(index + m_inputOffset); + } else { + const Index idx = index / m_stride; + const Index rem = index - idx * m_stride; + if (rem + PacketSize <= m_stride) { + Index inputIndex = idx * m_inputStride + m_inputOffset + rem; + return m_impl.template packet(inputIndex); + } else { + // Cross the stride boundary. Fallback to slow path. + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index); + ++index; + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + double cost = 0; + if ((static_cast(Layout) == static_cast(ColMajor) && + m_dim.actualDim() == 0) || + (static_cast(Layout) == static_cast(RowMajor) && + m_dim.actualDim() == NumInputDims - 1)) { + cost += TensorOpCost::MulCost() + TensorOpCost::AddCost(); + } else if ((static_cast(Layout) == static_cast(ColMajor) && + m_dim.actualDim() == NumInputDims - 1) || + (static_cast(Layout) == static_cast(RowMajor) && + m_dim.actualDim() == 0)) { + cost += TensorOpCost::AddCost(); + } else { + cost += 3 * TensorOpCost::MulCost() + TensorOpCost::DivCost() + + 3 * TensorOpCost::AddCost(); + } + + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const { + CoeffReturnType* result = const_cast(m_impl.data()); + if (((static_cast(Layout) == static_cast(ColMajor) && m_dim.actualDim() == NumDims) || + (static_cast(Layout) == static_cast(RowMajor) && m_dim.actualDim() == 0)) && + result) { + return result + m_inputOffset; + } else { + return NULL; + } + } + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex; + if ((static_cast(Layout) == static_cast(ColMajor) && m_dim.actualDim() == 0) || + (static_cast(Layout) == static_cast(RowMajor) && m_dim.actualDim() == NumInputDims-1)) { + // m_stride is equal to 1, so let's avoid the integer division. + eigen_assert(m_stride == 1); + inputIndex = index * m_inputStride + m_inputOffset; + } else if ((static_cast(Layout) == static_cast(ColMajor) && m_dim.actualDim() == NumInputDims-1) || + (static_cast(Layout) == static_cast(RowMajor) && m_dim.actualDim() == 0)) { + // m_stride is aways greater than index, so let's avoid the integer division. + eigen_assert(m_stride > index); + inputIndex = index + m_inputOffset; + } else { + const Index idx = index / m_stride; + inputIndex = idx * m_inputStride + m_inputOffset; + index -= idx * m_stride; + inputIndex += index; + } + return inputIndex; + } + + Dimensions m_dimensions; + Index m_stride; + Index m_inputOffset; + Index m_inputStride; + TensorEvaluator m_impl; + const internal::DimensionId m_dim; + const Device& m_device; +}; + + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorChippingOp XprType; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims-1; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + + if ((static_cast(this->Layout) == static_cast(ColMajor) && this->m_dim.actualDim() == 0) || + (static_cast(this->Layout) == static_cast(RowMajor) && this->m_dim.actualDim() == NumInputDims-1)) { + // m_stride is equal to 1, so let's avoid the integer division. + eigen_assert(this->m_stride == 1); + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + internal::pstore(values, x); + Index inputIndex = index * this->m_inputStride + this->m_inputOffset; + for (int i = 0; i < PacketSize; ++i) { + this->m_impl.coeffRef(inputIndex) = values[i]; + inputIndex += this->m_inputStride; + } + } else if ((static_cast(this->Layout) == static_cast(ColMajor) && this->m_dim.actualDim() == NumInputDims-1) || + (static_cast(this->Layout) == static_cast(RowMajor) && this->m_dim.actualDim() == 0)) { + // m_stride is aways greater than index, so let's avoid the integer division. + eigen_assert(this->m_stride > index); + this->m_impl.template writePacket(index + this->m_inputOffset, x); + } else { + const Index idx = index / this->m_stride; + const Index rem = index - idx * this->m_stride; + if (rem + PacketSize <= this->m_stride) { + const Index inputIndex = idx * this->m_inputStride + this->m_inputOffset + rem; + this->m_impl.template writePacket(inputIndex, x); + } else { + // Cross stride boundary. Fallback to slow path. + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + internal::pstore(values, x); + for (int i = 0; i < PacketSize; ++i) { + this->coeffRef(index) = values[i]; + ++index; + } + } + } + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h new file mode 100644 index 000000000..59bf90d93 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h @@ -0,0 +1,361 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H + +namespace Eigen { + +/** \class TensorConcatenationOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor concatenation class. + * + * + */ +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename promote_storage_type::ret Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + enum { Flags = 0 }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorConcatenationOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorConcatenationOp type; +}; + +} // end namespace internal + + +template +class TensorConcatenationOp : public TensorBase, WriteAccessors> +{ + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::nested::type Nested; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename NumTraits::Real RealScalar; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConcatenationOp(const LhsXprType& lhs, const RhsXprType& rhs, Axis axis) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_axis(axis) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + EIGEN_DEVICE_FUNC const Axis& axis() const { return m_axis; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorConcatenationOp& operator = (const TensorConcatenationOp& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorConcatenationOp& operator = (const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const Axis m_axis; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorConcatenationOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + static const int RightNumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device), m_axis(op.axis()) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || NumDims == 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((NumDims == RightNumDims), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(0 <= m_axis && m_axis < NumDims); + const Dimensions& lhs_dims = m_leftImpl.dimensions(); + const Dimensions& rhs_dims = m_rightImpl.dimensions(); + { + int i = 0; + for (; i < m_axis; ++i) { + eigen_assert(lhs_dims[i] > 0); + eigen_assert(lhs_dims[i] == rhs_dims[i]); + m_dimensions[i] = lhs_dims[i]; + } + eigen_assert(lhs_dims[i] > 0); // Now i == m_axis. + eigen_assert(rhs_dims[i] > 0); + m_dimensions[i] = lhs_dims[i] + rhs_dims[i]; + for (++i; i < NumDims; ++i) { + eigen_assert(lhs_dims[i] > 0); + eigen_assert(lhs_dims[i] == rhs_dims[i]); + m_dimensions[i] = lhs_dims[i]; + } + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_leftStrides[0] = 1; + m_rightStrides[0] = 1; + m_outputStrides[0] = 1; + + for (int j = 1; j < NumDims; ++j) { + m_leftStrides[j] = m_leftStrides[j-1] * lhs_dims[j-1]; + m_rightStrides[j] = m_rightStrides[j-1] * rhs_dims[j-1]; + m_outputStrides[j] = m_outputStrides[j-1] * m_dimensions[j-1]; + } + } else { + m_leftStrides[NumDims - 1] = 1; + m_rightStrides[NumDims - 1] = 1; + m_outputStrides[NumDims - 1] = 1; + + for (int j = NumDims - 2; j >= 0; --j) { + m_leftStrides[j] = m_leftStrides[j+1] * lhs_dims[j+1]; + m_rightStrides[j] = m_rightStrides[j+1] * rhs_dims[j+1]; + m_outputStrides[j] = m_outputStrides[j+1] * m_dimensions[j+1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + // TODO(phli): Add short-circuit memcpy evaluation if underlying data are linear? + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) + { + m_leftImpl.evalSubExprsIfNeeded(NULL); + m_rightImpl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() + { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + } + + // TODO(phli): attempt to speed this up. The integer divisions and modulo are slow. + // See CL/76180724 comments for more ideas. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + // Collect dimension-wise indices (subs). + array subs; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + subs[i] = index / m_outputStrides[i]; + index -= subs[i] * m_outputStrides[i]; + } + subs[0] = index; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + subs[i] = index / m_outputStrides[i]; + index -= subs[i] * m_outputStrides[i]; + } + subs[NumDims - 1] = index; + } + + const Dimensions& left_dims = m_leftImpl.dimensions(); + if (subs[m_axis] < left_dims[m_axis]) { + Index left_index; + if (static_cast(Layout) == static_cast(ColMajor)) { + left_index = subs[0]; + for (int i = 1; i < NumDims; ++i) { + left_index += (subs[i] % left_dims[i]) * m_leftStrides[i]; + } + } else { + left_index = subs[NumDims - 1]; + for (int i = NumDims - 2; i >= 0; --i) { + left_index += (subs[i] % left_dims[i]) * m_leftStrides[i]; + } + } + return m_leftImpl.coeff(left_index); + } else { + subs[m_axis] -= left_dims[m_axis]; + const Dimensions& right_dims = m_rightImpl.dimensions(); + Index right_index; + if (static_cast(Layout) == static_cast(ColMajor)) { + right_index = subs[0]; + for (int i = 1; i < NumDims; ++i) { + right_index += (subs[i] % right_dims[i]) * m_rightStrides[i]; + } + } else { + right_index = subs[NumDims - 1]; + for (int i = NumDims - 2; i >= 0; --i) { + right_index += (subs[i] % right_dims[i]) * m_rightStrides[i]; + } + } + return m_rightImpl.coeff(right_index); + } + } + + // TODO(phli): Add a real vectorization. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + const int packetSize = internal::unpacket_traits::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + packetSize - 1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX CoeffReturnType values[packetSize]; + for (int i = 0; i < packetSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double compute_cost = NumDims * (2 * TensorOpCost::AddCost() + + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost() + + TensorOpCost::ModCost()); + const double lhs_size = m_leftImpl.dimensions().TotalSize(); + const double rhs_size = m_rightImpl.dimensions().TotalSize(); + return (lhs_size / (lhs_size + rhs_size)) * + m_leftImpl.costPerCoeff(vectorized) + + (rhs_size / (lhs_size + rhs_size)) * + m_rightImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + protected: + Dimensions m_dimensions; + array m_outputStrides; + array m_leftStrides; + array m_rightStrides; + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; + const Axis m_axis; +}; + +// Eval as lvalue +template + struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorConcatenationOp XprType; + typedef typename Base::Dimensions Dimensions; + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(XprType& op, const Device& device) + : Base(op, device) + { + EIGEN_STATIC_ASSERT((static_cast(Layout) == static_cast(ColMajor)), YOU_MADE_A_PROGRAMMING_MISTAKE); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + // Collect dimension-wise indices (subs). + array subs; + for (int i = Base::NumDims - 1; i > 0; --i) { + subs[i] = index / this->m_outputStrides[i]; + index -= subs[i] * this->m_outputStrides[i]; + } + subs[0] = index; + + const Dimensions& left_dims = this->m_leftImpl.dimensions(); + if (subs[this->m_axis] < left_dims[this->m_axis]) { + Index left_index = subs[0]; + for (int i = 1; i < Base::NumDims; ++i) { + left_index += (subs[i] % left_dims[i]) * this->m_leftStrides[i]; + } + return this->m_leftImpl.coeffRef(left_index); + } else { + subs[this->m_axis] -= left_dims[this->m_axis]; + const Dimensions& right_dims = this->m_rightImpl.dimensions(); + Index right_index = subs[0]; + for (int i = 1; i < Base::NumDims; ++i) { + right_index += (subs[i] % right_dims[i]) * this->m_rightStrides[i]; + } + return this->m_rightImpl.coeffRef(right_index); + } + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + const int packetSize = internal::unpacket_traits::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + packetSize - 1 < this->dimensions().TotalSize()); + + EIGEN_ALIGN_MAX CoeffReturnType values[packetSize]; + internal::pstore(values, x); + for (int i = 0; i < packetSize; ++i) { + coeffRef(index+i) = values[i]; + } + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h new file mode 100644 index 000000000..20b29e5fd --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -0,0 +1,628 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H + +namespace Eigen { + +/** \class TensorContraction + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor contraction class. + * + * + */ +namespace internal { + +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename gebp_traits::type, + typename remove_const::type>::ResScalar Scalar; + + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + + // From NumDims below. + static const int NumDimensions = traits::NumDimensions + traits::NumDimensions - 2 * array_size::value; + static const int Layout = traits::Layout; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorContractionOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorContractionOp type; +}; + +template +struct traits, Device_> > { + typedef Indices_ Indices; + typedef LeftArgType_ LeftArgType; + typedef RightArgType_ RightArgType; + typedef Device_ Device; + + // From NumDims below. + static const int NumDimensions = traits::NumDimensions + traits::NumDimensions - 2 * array_size::value; +}; + +} // end namespace internal + +template +class TensorContractionOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename internal::gebp_traits::ResScalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp( + const LhsXprType& lhs, const RhsXprType& rhs, const Indices& dims) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims) {} + + EIGEN_DEVICE_FUNC + const Indices& indices() const { return m_indices; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const Indices m_indices; +}; + + +template +struct TensorContractionEvaluatorBase +{ + typedef typename internal::traits::Indices Indices; + typedef typename internal::traits::LeftArgType LeftArgType; + typedef typename internal::traits::RightArgType RightArgType; + typedef typename internal::traits::Device Device; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + enum { + IsAligned = true, + PacketAccess = (internal::unpacket_traits::size > 1), + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = true + }; + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + typedef DSizes Dimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorContractionEvaluatorBase(const XprType& op, const Device& device) + : m_leftImpl(choose(Cond(Layout) == static_cast(ColMajor)>(), + op.lhsExpression(), op.rhsExpression()), device), + m_rightImpl(choose(Cond(Layout) == static_cast(ColMajor)>(), + op.rhsExpression(), op.lhsExpression()), device), + m_device(device), + m_result(NULL) { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == + static_cast(TensorEvaluator::Layout)), + YOU_MADE_A_PROGRAMMING_MISTAKE); + + + DSizes eval_left_dims; + DSizes eval_right_dims; + array, ContractDims> eval_op_indices; + if (static_cast(Layout) == static_cast(ColMajor)) { + // For ColMajor, we keep using the existing dimensions + for (int i = 0; i < LDims; i++) { + eval_left_dims[i] = m_leftImpl.dimensions()[i]; + } + for (int i = 0; i < RDims; i++) { + eval_right_dims[i] = m_rightImpl.dimensions()[i]; + } + // We keep the pairs of contracting indices. + for (int i = 0; i < ContractDims; i++) { + eval_op_indices[i].first = op.indices()[i].first; + eval_op_indices[i].second = op.indices()[i].second; + } + } else { + // For RowMajor, we need to reverse the existing dimensions + for (int i = 0; i < LDims; i++) { + eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1]; + } + for (int i = 0; i < RDims; i++) { + eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1]; + } + // We need to flip all the pairs of contracting indices as well as + // reversing the dimensions. + for (int i = 0; i < ContractDims; i++) { + eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second; + eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first; + } + } + + // Check for duplicate axes and make sure the first index in eval_op_indices + // is increasing. Using O(n^2) sorting is OK since ContractDims is small + for (int i = 0; i < ContractDims; i++) { + for (int j = i + 1; j < ContractDims; j++) { + eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first && + eval_op_indices[j].second != eval_op_indices[i].second && + "contraction axes should be unique"); + if (eval_op_indices[j].first < eval_op_indices[i].first) { + numext::swap(eval_op_indices[j], eval_op_indices[i]); + } + } + } + + array lhs_strides; + lhs_strides[0] = 1; + for (int i = 0; i < LDims-1; ++i) { + lhs_strides[i+1] = lhs_strides[i] * eval_left_dims[i]; + } + + array rhs_strides; + rhs_strides[0] = 1; + for (int i = 0; i < RDims-1; ++i) { + rhs_strides[i+1] = rhs_strides[i] * eval_right_dims[i]; + } + + if (m_i_strides.size() > 0) m_i_strides[0] = 1; + if (m_j_strides.size() > 0) m_j_strides[0] = 1; + if (m_k_strides.size() > 0) m_k_strides[0] = 1; + + m_i_size = 1; + m_j_size = 1; + m_k_size = 1; + + // To compute the dimension, we simply concatenate the non-contracting + // dimensions of the left and then the right tensor. Additionally, we also + // compute the strides corresponding to the left non-contracting + // dimensions and right non-contracting dimensions. + m_lhs_inner_dim_contiguous = true; + int dim_idx = 0; + unsigned int nocontract_idx = 0; + + for (int i = 0; i < LDims; i++) { + // find if we are contracting on index i of left tensor + bool contracting = false; + for (int j = 0; j < ContractDims; j++) { + if (eval_op_indices[j].first == i) { + contracting = true; + break; + } + } + if (!contracting) { + // add dimension size to output dimensions + m_dimensions[dim_idx] = eval_left_dims[i]; + m_left_nocontract_strides[nocontract_idx] = lhs_strides[i]; + if (dim_idx != i) { + m_lhs_inner_dim_contiguous = false; + } + if (nocontract_idx+1 < internal::array_size::value) { + m_i_strides[nocontract_idx+1] = + m_i_strides[nocontract_idx] * eval_left_dims[i]; + } else { + m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i]; + } + dim_idx++; + nocontract_idx++; + } + } + + nocontract_idx = 0; + for (int i = 0; i < RDims; i++) { + bool contracting = false; + // find if we are contracting on index i of right tensor + for (int j = 0; j < ContractDims; j++) { + if (eval_op_indices[j].second == i) { + contracting = true; + break; + } + } + if (!contracting) { + m_dimensions[dim_idx] = eval_right_dims[i]; + if (nocontract_idx+1 < internal::array_size::value) { + m_j_strides[nocontract_idx+1] = + m_j_strides[nocontract_idx] * eval_right_dims[i]; + } else { + m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i]; + } + m_right_nocontract_strides[nocontract_idx] = rhs_strides[i]; + dim_idx++; + nocontract_idx++; + } + } + + // Now compute the strides corresponding to the contracting dimensions. We + // assumed above that non-contracting axes are represented in the same order + // in the matrix as they are in the tensor. This is not the case for + // contracting axes. As the contracting axes must be of the same size in + // each tensor, we'll only look at the first tensor here. + m_rhs_inner_dim_contiguous = true; + m_rhs_inner_dim_reordered = false; + for (int i = 0; i < ContractDims; i++) { + Index left = eval_op_indices[i].first; + Index right = eval_op_indices[i].second; + + Index size = eval_left_dims[left]; + eigen_assert(size == eval_right_dims[right] && + "Contraction axes must be same size"); + + if (i+1 < static_cast(internal::array_size::value)) { + m_k_strides[i+1] = m_k_strides[i] * size; + } else { + m_k_size = m_k_strides[i] * size; + } + m_left_contracting_strides[i] = lhs_strides[left]; + m_right_contracting_strides[i] = rhs_strides[right]; + + if (i > 0 && right < eval_op_indices[i-1].second) { + m_rhs_inner_dim_reordered = true; + } + if (right != i) { + m_rhs_inner_dim_contiguous = false; + } + } + + // If the layout is RowMajor, we need to reverse the m_dimensions + if (static_cast(Layout) == static_cast(RowMajor)) { + for (int i = 0, j = NumDims - 1; i < j; i++, j--) { + numext::swap(m_dimensions[i], m_dimensions[j]); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) { + m_leftImpl.evalSubExprsIfNeeded(NULL); + m_rightImpl.evalSubExprsIfNeeded(NULL); + if (data) { + evalTo(data); + return false; + } else { + m_result = static_cast(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar))); + evalTo(m_result); + return true; + } + } + + EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + if (this->m_lhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + static_cast(this)->template evalProduct(buffer); + } + else { + static_cast(this)->template evalProduct(buffer); + } + } + else { + if (this->m_rhs_inner_dim_reordered) { + static_cast(this)->template evalProduct(buffer); + } + else { + static_cast(this)->template evalProduct(buffer); + } + } + } + else { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + static_cast(this)->template evalProduct(buffer); + } + else { + static_cast(this)->template evalProduct(buffer); + } + } + else { + if (this->m_rhs_inner_dim_reordered) { + static_cast(this)->template evalProduct(buffer); + } + else { + static_cast(this)->template evalProduct(buffer); + } + } + } + } + + template + EIGEN_DEVICE_FUNC void evalGemv(Scalar* buffer) const { + const Index rows = m_i_size; + const Index cols = m_k_size; + + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + const Index lhs_packet_size = internal::unpacket_traits::size; + const Index rhs_packet_size = internal::unpacket_traits::size; + const int lhs_alignment = LeftEvaluator::IsAligned ? Aligned : Unaligned; + const int rhs_alignment = RightEvaluator::IsAligned ? Aligned : Unaligned; + typedef internal::TensorContractionInputMapper LhsMapper; + + typedef internal::TensorContractionInputMapper RhsMapper; + + LhsMapper lhs(m_leftImpl, m_left_nocontract_strides, m_i_strides, + m_left_contracting_strides, m_k_strides); + RhsMapper rhs(m_rightImpl, m_right_nocontract_strides, m_j_strides, + m_right_contracting_strides, m_k_strides); + + const Scalar alpha(1); + const Index resIncr(1); + + // zero out the result buffer (which must be of size at least rows * sizeof(Scalar) + m_device.memset(buffer, 0, rows * sizeof(Scalar)); + + internal::general_matrix_vector_product::run( + rows, cols, lhs, rhs, + buffer, resIncr, alpha); + } + + template + EIGEN_DEVICE_FUNC void evalGemm(Scalar* buffer) const { + // columns in left side, rows in right side + const Index k = this->m_k_size; + + // rows in left side + const Index m = this->m_i_size; + + // columns in right side + const Index n = this->m_j_size; + + // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar) + this->m_device.memset(buffer, 0, m * n * sizeof(Scalar)); + + // define mr, nr, and all of my data mapper types + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + typedef typename internal::gebp_traits Traits; + + const Index nr = Traits::nr; + const Index mr = Traits::mr; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + + const Index lhs_packet_size = internal::unpacket_traits::size; + const Index rhs_packet_size = internal::unpacket_traits::size; + + typedef internal::TensorContractionInputMapper LhsMapper; + + typedef internal::TensorContractionInputMapper RhsMapper; + + typedef internal::blas_data_mapper OutputMapper; + + // Declare GEBP packing and kernel structs + internal::gemm_pack_lhs pack_lhs; + internal::gemm_pack_rhs pack_rhs; + + internal::gebp_kernel gebp; + + // initialize data mappers + LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides, + this->m_left_contracting_strides, this->m_k_strides); + + RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides, + this->m_right_contracting_strides, this->m_k_strides); + + OutputMapper output(buffer, m); + + // Sizes of the blocks to load in cache. See the Goto paper for details. + internal::TensorContractionBlocking blocking(k, m, n, 1); + const Index kc = blocking.kc(); + const Index mc = numext::mini(m, blocking.mc()); + const Index nc = numext::mini(n, blocking.nc()); + const Index sizeA = mc * kc; + const Index sizeB = kc * nc; + + LhsScalar* blockA = static_cast(this->m_device.allocate(sizeA * sizeof(LhsScalar))); + RhsScalar* blockB = static_cast(this->m_device.allocate(sizeB * sizeof(RhsScalar))); + + for(Index i2=0; i2m_device.deallocate(blockA); + this->m_device.deallocate(blockB); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + + if (m_result != NULL) { + m_device.deallocate(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_result[index]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { + return internal::ploadt(m_result + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const { return m_result; } + + protected: + // Prevent assignment + TensorContractionEvaluatorBase& operator = (const TensorContractionEvaluatorBase&); + Dimensions m_dimensions; + + contract_t m_k_strides; + contract_t m_left_contracting_strides; + contract_t m_right_contracting_strides; + + bool m_lhs_inner_dim_contiguous; + bool m_rhs_inner_dim_contiguous; + bool m_rhs_inner_dim_reordered; + + left_nocontract_t m_i_strides; + right_nocontract_t m_j_strides; + left_nocontract_t m_left_nocontract_strides; + right_nocontract_t m_right_nocontract_strides; + + Index m_i_size; + Index m_j_size; + Index m_k_size; + + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; + const Device& m_device; + Scalar* m_result; +}; + + +// evaluator for default device +template +struct TensorEvaluator, Device> : + public TensorContractionEvaluatorBase< + TensorEvaluator, Device> > { + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + enum { + Layout = TensorEvaluator::Layout + }; + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + // Could we use NumDimensions here? + typedef DSizes Dimensions; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) : + Base(op, device) { } + + template + EIGEN_DEVICE_FUNC void evalProduct(Scalar* buffer) const { + if (this->m_j_size == 1) { + this->template evalGemv(buffer); + return; + } + + this->template evalGemm(buffer); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h new file mode 100644 index 000000000..5cf7b4f71 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h @@ -0,0 +1,56 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H + + +namespace Eigen { +namespace internal { + +enum { + ShardByRow = 0, + ShardByCol = 1 +}; + + +// Default Blocking Strategy +template +class TensorContractionBlocking { + public: + + typedef typename LhsMapper::Scalar LhsScalar; + typedef typename RhsMapper::Scalar RhsScalar; + + EIGEN_DEVICE_FUNC TensorContractionBlocking(Index k, Index m, Index n, Index num_threads = 1) : + kc_(k), mc_(m), nc_(n) + { + if (ShardingType == ShardByCol) { + computeProductBlockingSizes(kc_, mc_, nc_, num_threads); + } + else { + computeProductBlockingSizes(kc_, nc_, mc_, num_threads); + } + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index kc() const { return kc_; } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index mc() const { return mc_; } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index nc() const { return nc_; } + + private: + Index kc_; + Index mc_; + Index nc_; +}; + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h new file mode 100644 index 000000000..d65dbb40f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h @@ -0,0 +1,1391 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014-2015 Benoit Steiner +// Copyright (C) 2015 Navdeep Jaitly +// Copyright (C) 2014 Eric Martin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H + +#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) + +namespace Eigen { + +template +__device__ EIGEN_STRONG_INLINE void +EigenContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, Scalar* lhs_shmem, Scalar* rhs_shmem, + const Index m_size, const Index n_size, const Index k_size) { + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 64 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + // declare and initialize 64 registers for output 8x8 block + + // prefetch registers + Scalar lhs_pf0; + Scalar lhs_pf1; + Scalar lhs_pf2; + Scalar lhs_pf3; + Scalar lhs_pf4; + Scalar lhs_pf5; + Scalar lhs_pf6; + Scalar lhs_pf7; + + Scalar rhs_pf0; + Scalar rhs_pf1; + Scalar rhs_pf2; + Scalar rhs_pf3; + Scalar rhs_pf4; + Scalar rhs_pf5; + Scalar rhs_pf6; + Scalar rhs_pf7; + + // shared memory is formatted + // (contract idx in block, nocontract idx in block, block idx) + // where block idx is column major. This transposition limits the number of + // bank conflicts when reading the LHS. The core idea is that since the contracting + // index is shared by both sides, then the contracting index should be in threadIdx.x. + + // On the LHS, we pad each row inside of each block with an extra element. This makes + // each block 8 rows of 9 elements, which is 72 elements. This gives no bank conflicts + // on writes and very few 2-way conflicts on reads. There is an 8x8 grid of these blocks. + + // On the RHS we just add 8 padding elements to the end of each block. This gives no bank + // conflicts on writes and also none on reads. + + // storage indices + const Index lhs_store_idx_base = threadIdx.y * 72 + threadIdx.x * 9 + threadIdx.z; + const Index rhs_store_idx_base = threadIdx.y * 72 + threadIdx.z * 8 + threadIdx.x; + + const Index lhs_store_idx_0 = lhs_store_idx_base + 576 * 0; + const Index lhs_store_idx_1 = lhs_store_idx_base + 576 * 1; + const Index lhs_store_idx_2 = lhs_store_idx_base + 576 * 2; + const Index lhs_store_idx_3 = lhs_store_idx_base + 576 * 3; + const Index lhs_store_idx_4 = lhs_store_idx_base + 576 * 4; + const Index lhs_store_idx_5 = lhs_store_idx_base + 576 * 5; + const Index lhs_store_idx_6 = lhs_store_idx_base + 576 * 6; + const Index lhs_store_idx_7 = lhs_store_idx_base + 576 * 7; + + const Index rhs_store_idx_0 = rhs_store_idx_base + 576 * 0; + const Index rhs_store_idx_1 = rhs_store_idx_base + 576 * 1; + const Index rhs_store_idx_2 = rhs_store_idx_base + 576 * 2; + const Index rhs_store_idx_3 = rhs_store_idx_base + 576 * 3; + const Index rhs_store_idx_4 = rhs_store_idx_base + 576 * 4; + const Index rhs_store_idx_5 = rhs_store_idx_base + 576 * 5; + const Index rhs_store_idx_6 = rhs_store_idx_base + 576 * 6; + const Index rhs_store_idx_7 = rhs_store_idx_base + 576 * 7; + + // in the loading code, the following variables are important: + // threadIdx.x: the vertical position in an 8x8 block + // threadIdx.y: the vertical index of the 8x8 block in the grid + // threadIdx.z: the horizontal position in an 8x8 block + // k: the horizontal index of the 8x8 block in the grid + // + // The k parameter is implicit (it was the loop counter for a loop that went + // from 0 to <8, but now that loop is unrolled in the below code. + + const Index load_idx_vert = threadIdx.x + 8 * threadIdx.y; + const Index lhs_vert = base_m + load_idx_vert; + +#define prefetchIntoRegisters(base_k) \ + { \ + lhs_pf0 = conv(0); \ + lhs_pf1 = conv(0); \ + lhs_pf2 = conv(0); \ + lhs_pf3 = conv(0); \ + lhs_pf4 = conv(0); \ + lhs_pf5 = conv(0); \ + lhs_pf6 = conv(0); \ + lhs_pf7 = conv(0); \ + \ + rhs_pf0 = conv(0); \ + rhs_pf1 = conv(0); \ + rhs_pf2 = conv(0); \ + rhs_pf3 = conv(0); \ + rhs_pf4 = conv(0); \ + rhs_pf5 = conv(0); \ + rhs_pf6 = conv(0); \ + rhs_pf7 = conv(0); \ + \ + if (!needs_edge_check || lhs_vert < m_size) { \ + const Index lhs_horiz_0 = base_k + threadIdx.z + 0 * 8; \ + const Index lhs_horiz_1 = base_k + threadIdx.z + 1 * 8; \ + const Index lhs_horiz_2 = base_k + threadIdx.z + 2 * 8; \ + const Index lhs_horiz_3 = base_k + threadIdx.z + 3 * 8; \ + const Index lhs_horiz_4 = base_k + threadIdx.z + 4 * 8; \ + const Index lhs_horiz_5 = base_k + threadIdx.z + 5 * 8; \ + const Index lhs_horiz_6 = base_k + threadIdx.z + 6 * 8; \ + const Index lhs_horiz_7 = base_k + threadIdx.z + 7 * 8; \ + \ + if (!needs_edge_check || lhs_horiz_7 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \ + lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \ + lhs_pf7 = lhs(lhs_vert, lhs_horiz_7); \ + } else if (lhs_horiz_6 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \ + lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \ + } else if (lhs_horiz_5 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \ + } else if (lhs_horiz_4 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + } else if (lhs_horiz_3 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + } else if (lhs_horiz_2 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + } else if (lhs_horiz_1 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + } else if (lhs_horiz_0 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + } \ + } \ + \ + const Index rhs_vert = base_k + load_idx_vert; \ + if (!needs_edge_check || rhs_vert < k_size) { \ + const Index rhs_horiz_0 = base_n + threadIdx.z + 0 * 8; \ + const Index rhs_horiz_1 = base_n + threadIdx.z + 1 * 8; \ + const Index rhs_horiz_2 = base_n + threadIdx.z + 2 * 8; \ + const Index rhs_horiz_3 = base_n + threadIdx.z + 3 * 8; \ + const Index rhs_horiz_4 = base_n + threadIdx.z + 4 * 8; \ + const Index rhs_horiz_5 = base_n + threadIdx.z + 5 * 8; \ + const Index rhs_horiz_6 = base_n + threadIdx.z + 6 * 8; \ + const Index rhs_horiz_7 = base_n + threadIdx.z + 7 * 8; \ + \ + if (rhs_horiz_7 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \ + rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \ + rhs_pf7 = rhs(rhs_vert, rhs_horiz_7); \ + } else if (rhs_horiz_6 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \ + rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \ + } else if (rhs_horiz_5 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \ + } else if (rhs_horiz_4 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + } else if (rhs_horiz_3 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + } else if (rhs_horiz_2 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + } else if (rhs_horiz_1 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + } else if (rhs_horiz_0 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + } \ + } \ + } \ + +#define writeRegToShmem(_) \ + lhs_shmem[lhs_store_idx_0] = lhs_pf0; \ + rhs_shmem[rhs_store_idx_0] = rhs_pf0; \ + \ + lhs_shmem[lhs_store_idx_1] = lhs_pf1; \ + rhs_shmem[rhs_store_idx_1] = rhs_pf1; \ + \ + lhs_shmem[lhs_store_idx_2] = lhs_pf2; \ + rhs_shmem[rhs_store_idx_2] = rhs_pf2; \ + \ + lhs_shmem[lhs_store_idx_3] = lhs_pf3; \ + rhs_shmem[rhs_store_idx_3] = rhs_pf3; \ + \ + lhs_shmem[lhs_store_idx_4] = lhs_pf4; \ + rhs_shmem[rhs_store_idx_4] = rhs_pf4; \ + \ + lhs_shmem[lhs_store_idx_5] = lhs_pf5; \ + rhs_shmem[rhs_store_idx_5] = rhs_pf5; \ + \ + lhs_shmem[lhs_store_idx_6] = lhs_pf6; \ + rhs_shmem[rhs_store_idx_6] = rhs_pf6; \ + \ + lhs_shmem[lhs_store_idx_7] = lhs_pf7; \ + rhs_shmem[rhs_store_idx_7] = rhs_pf7; \ + + // declare and initialize result array +#define res(i, j) _res_##i##j +#define initResultRow(i) \ + Scalar res(i, 0) = conv(0); \ + Scalar res(i, 1) = conv(0); \ + Scalar res(i, 2) = conv(0); \ + Scalar res(i, 3) = conv(0); \ + Scalar res(i, 4) = conv(0); \ + Scalar res(i, 5) = conv(0); \ + Scalar res(i, 6) = conv(0); \ + Scalar res(i, 7) = conv(0); \ + + internal::scalar_cast_op conv; + initResultRow(0); + initResultRow(1); + initResultRow(2); + initResultRow(3); + initResultRow(4); + initResultRow(5); + initResultRow(6); + initResultRow(7); +#undef initResultRow + + for (Index base_k = 0; base_k < k_size; base_k += 64) { + // wait for previous iteration to finish with shmem. Despite common sense, + // the code is a bit faster with this here then at bottom of loop + __syncthreads(); + + prefetchIntoRegisters(base_k); + writeRegToShmem(); + + #undef prefetchIntoRegisters + #undef writeRegToShmem + + // wait for shared mem packing to be done before starting computation + __syncthreads(); + + // compute 8x8 matrix product by outer product. This involves packing one column + // of LHS and one row of RHS into registers (takes 16 registers). + +#define lcol(i) _lcol##i + Scalar lcol(0); + Scalar lcol(1); + Scalar lcol(2); + Scalar lcol(3); + Scalar lcol(4); + Scalar lcol(5); + Scalar lcol(6); + Scalar lcol(7); + +#define rrow(j) _rrow##j + Scalar rrow(0); + Scalar rrow(1); + Scalar rrow(2); + Scalar rrow(3); + Scalar rrow(4); + Scalar rrow(5); + Scalar rrow(6); + Scalar rrow(7); + + // Now x corresponds to k, y to m, and z to n + const Scalar* lhs_block = &lhs_shmem[threadIdx.x + 9 * threadIdx.y]; + const Scalar* rhs_block = &rhs_shmem[threadIdx.x + 8 * threadIdx.z]; + +#define lhs_element(i, j) lhs_block[72 * ((i) + 8 * (j))] +#define rhs_element(i, j) rhs_block[72 * ((i) + 8 * (j))] + +#define loadData(i, j) \ + lcol(0) = lhs_element(0, j); \ + rrow(0) = rhs_element(i, 0); \ + lcol(1) = lhs_element(1, j); \ + rrow(1) = rhs_element(i, 1); \ + lcol(2) = lhs_element(2, j); \ + rrow(2) = rhs_element(i, 2); \ + lcol(3) = lhs_element(3, j); \ + rrow(3) = rhs_element(i, 3); \ + lcol(4) = lhs_element(4, j); \ + rrow(4) = rhs_element(i, 4); \ + lcol(5) = lhs_element(5, j); \ + rrow(5) = rhs_element(i, 5); \ + lcol(6) = lhs_element(6, j); \ + rrow(6) = rhs_element(i, 6); \ + lcol(7) = lhs_element(7, j); \ + rrow(7) = rhs_element(i, 7); \ + +#define computeCol(j) \ + res(0, j) += lcol(0) * rrow(j); \ + res(1, j) += lcol(1) * rrow(j); \ + res(2, j) += lcol(2) * rrow(j); \ + res(3, j) += lcol(3) * rrow(j); \ + res(4, j) += lcol(4) * rrow(j); \ + res(5, j) += lcol(5) * rrow(j); \ + res(6, j) += lcol(6) * rrow(j); \ + res(7, j) += lcol(7) * rrow(j); \ + +#define computePass(i) \ + loadData(i, i); \ + \ + computeCol(0); \ + computeCol(1); \ + computeCol(2); \ + computeCol(3); \ + computeCol(4); \ + computeCol(5); \ + computeCol(6); \ + computeCol(7); \ + + computePass(0); + computePass(1); + computePass(2); + computePass(3); + computePass(4); + computePass(5); + computePass(6); + computePass(7); + +#undef lcol +#undef rrow +#undef lhs_element +#undef rhs_element +#undef loadData +#undef computeCol +#undef computePass + } // end loop over k + + // we've now iterated over all of the large (ie width 64) k blocks and + // accumulated results in registers. At this point thread (x, y, z) contains + // the sum across all big k blocks of the product of little k block of index (x, y) + // with block of index (y, z). To compute the final output, we need to reduce + // the 8 threads over y by summation. +#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor(res(i, j), mask) + +#define reduceRow(i, mask) \ + shuffleInc(i, 0, mask); \ + shuffleInc(i, 1, mask); \ + shuffleInc(i, 2, mask); \ + shuffleInc(i, 3, mask); \ + shuffleInc(i, 4, mask); \ + shuffleInc(i, 5, mask); \ + shuffleInc(i, 6, mask); \ + shuffleInc(i, 7, mask); \ + +#define reduceMatrix(mask) \ + reduceRow(0, mask); \ + reduceRow(1, mask); \ + reduceRow(2, mask); \ + reduceRow(3, mask); \ + reduceRow(4, mask); \ + reduceRow(5, mask); \ + reduceRow(6, mask); \ + reduceRow(7, mask); \ + + // actually perform the reduction, now each thread of index (_, y, z) + // contains the correct values in its registers that belong in the output + // block + reduceMatrix(1); + reduceMatrix(2); + reduceMatrix(4); + +#undef shuffleInc +#undef reduceRow +#undef reduceMatrix + + // now we need to copy the 64 values into main memory. We can't split work + // among threads because all variables are in registers. There's 2 ways + // to do this: + // (1) have 1 thread do 64 writes from registers into global memory + // (2) have 1 thread do 64 writes into shared memory, and then 8 threads + // each do 8 writes into global memory. We can just overwrite the shared + // memory from the problem we just solved. + // (2) is slightly faster than (1) due to less branching and more ILP + + // TODO: won't yield much gain, but could just use currently unused shared mem + // and then we won't have to sync + // wait for shared mem to be out of use + __syncthreads(); + +#define writeResultShmem(i, j) \ + lhs_shmem[i + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j] = res(i, j); \ + +#define writeRow(i) \ + writeResultShmem(i, 0); \ + writeResultShmem(i, 1); \ + writeResultShmem(i, 2); \ + writeResultShmem(i, 3); \ + writeResultShmem(i, 4); \ + writeResultShmem(i, 5); \ + writeResultShmem(i, 6); \ + writeResultShmem(i, 7); \ + + if (threadIdx.x == 0) { + writeRow(0); + writeRow(1); + writeRow(2); + writeRow(3); + writeRow(4); + writeRow(5); + writeRow(6); + writeRow(7); + } +#undef writeResultShmem +#undef writeRow + + const int max_i_write = numext::mini((int)((m_size - base_m - threadIdx.y + 7) / 8), 8); + const int max_j_write = numext::mini((int)((n_size - base_n - threadIdx.z + 7) / 8), 8); + + if (threadIdx.x < max_i_write) { + if (max_j_write == 8) { + // TODO: can i trade bank conflicts for coalesced writes? + Scalar val0 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 0]; + Scalar val1 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 1]; + Scalar val2 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 2]; + Scalar val3 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 3]; + Scalar val4 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 4]; + Scalar val5 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 5]; + Scalar val6 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 6]; + Scalar val7 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 7]; + + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 0) = val0; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 1) = val1; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 2) = val2; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 3) = val3; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 4) = val4; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 5) = val5; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 6) = val6; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 7) = val7; + } else { +#pragma unroll 7 + for (int j = 0; j < max_j_write; j++) { + Scalar val = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j]; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * j) = val; + } + } + } +#undef res +} + + +template +__global__ void +__launch_bounds__(512) +EigenContractionKernel(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, + const Index m_size, const Index n_size, const Index k_size) { + __shared__ Scalar lhs_shmem[72 * 64]; + __shared__ Scalar rhs_shmem[72 * 64]; + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 64 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + if (base_m + 63 < m_size && base_n + 63 < n_size) { + EigenContractionKernelInternal(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size); + } else { + EigenContractionKernelInternal(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size); + } +} + + +template +__device__ EIGEN_STRONG_INLINE void +EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, float2 lhs_shmem2[][16], + float2 rhs_shmem2[][8], const Index m_size, + const Index n_size, const Index k_size, + const Index base_m, const Index base_n) { + typedef float Scalar; + + // prefetch registers + float4 lhs_pf0, rhs_pf0; + + float4 results[4]; + for (int i=0; i < 4; i++) { + results[i].x = results[i].y = results[i].z = results[i].w = 0; + } + + +#define prefetch_lhs(reg, row, col) \ + if (!CHECK_LHS_BOUNDARY) { \ + if (col < k_size) { \ + reg =lhs.loadPacket(row, col); \ + } \ + } else { \ + if (col < k_size) { \ + if (row + 3 < m_size) { \ + reg =lhs.loadPacket(row, col); \ + } else if (row + 2 < m_size) { \ + reg.x =lhs(row + 0, col); \ + reg.y =lhs(row + 1, col); \ + reg.z =lhs(row + 2, col); \ + } else if (row + 1 < m_size) { \ + reg.x =lhs(row + 0, col); \ + reg.y =lhs(row + 1, col); \ + } else if (row < m_size) { \ + reg.x =lhs(row + 0, col); \ + } \ + } \ + } \ + + + Index lhs_vert = base_m+threadIdx.x*4; + + for (Index k = 0; k < k_size; k += 16) { + lhs_pf0 = internal::pset1(0); + rhs_pf0 = internal::pset1(0); + + Index lhs_horiz = threadIdx.y+k; + prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz) + + Index rhs_vert = k+(threadIdx.x%4)*4; + Index rhs_horiz0 = (threadIdx.x>>2)+threadIdx.y*4+base_n; + + if (!CHECK_RHS_BOUNDARY) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0); + } else if (rhs_vert + 2 < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + } else if (rhs_vert + 1 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + } + } else { + if (rhs_horiz0 < n_size) { + if ((rhs_vert + 3) < k_size) { + rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0); + } else if ((rhs_vert + 2) < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + } else if ((rhs_vert + 1) < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + } + } + } + float x1, x2 ; + // the following can be a bitwise operation..... some day. + if((threadIdx.x%8) < 4) { + x1 = rhs_pf0.y; + x2 = rhs_pf0.w; + } else { + x1 = rhs_pf0.x; + x2 = rhs_pf0.z; + } + x1 = __shfl_xor(x1, 4); + x2 = __shfl_xor(x2, 4); + if((threadIdx.x%8) < 4) { + rhs_pf0.y = x1; + rhs_pf0.w = x2; + } else { + rhs_pf0.x = x1; + rhs_pf0.z = x2; + } + + // We have 64 features. + // Row 0 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 0, 1. + // Row 1 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 2, 3. + // ... + // Row 31 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 62, 63 + // Row 32 -> times (2, 6, 10, 14, 3, 7, 11, 15) for features 0, 1 + // ... + rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2][threadIdx.x%8] = make_float2(rhs_pf0.x, rhs_pf0.y); + rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2+32][threadIdx.x%8] = make_float2(rhs_pf0.z, rhs_pf0.w); + + // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) + // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) + // ... + // Row 15 (time 15) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) + // Row 16 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) + // ... + + lhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(lhs_pf0.x, lhs_pf0.y); + lhs_shmem2[threadIdx.y+16][threadIdx.x] = make_float2(lhs_pf0.z, lhs_pf0.w); + + +#define add_vals(fl1, fl2, fr1, fr2)\ + results[0].x += fl1.x * fr1.x;\ + results[0].y += fl1.y * fr1.x;\ + results[0].z += fl2.x * fr1.x;\ + results[0].w += fl2.y * fr1.x;\ +\ + results[1].x += fl1.x * fr1.y;\ + results[1].y += fl1.y * fr1.y;\ + results[1].z += fl2.x * fr1.y;\ + results[1].w += fl2.y * fr1.y;\ +\ + results[2].x += fl1.x * fr2.x;\ + results[2].y += fl1.y * fr2.x;\ + results[2].z += fl2.x * fr2.x;\ + results[2].w += fl2.y * fr2.x;\ +\ + results[3].x += fl1.x * fr2.y;\ + results[3].y += fl1.y * fr2.y;\ + results[3].z += fl2.x * fr2.y;\ + results[3].w += fl2.y * fr2.y;\ + + __syncthreads(); + + // Do the multiplies. + #pragma unroll + for (int koff = 0; koff < 16; koff ++) { + // 32 x threads. + float2 fl1 = lhs_shmem2[koff][threadIdx.x]; + float2 fl2 = lhs_shmem2[koff + 16][threadIdx.x]; + + int start_feature = threadIdx.y * 4; + float2 fr1 = rhs_shmem2[(start_feature>>1) + 32*((koff%4)/2)][koff/4 + (koff%2)*4]; + float2 fr2 = rhs_shmem2[(start_feature>>1) + 1 + 32*((koff%4)/2)][koff/4 + (koff%2)*4]; + + add_vals(fl1, fl2, fr1, fr2) + } + __syncthreads(); + } + +#undef prefetch_lhs +#undef add_vals + + Index horiz_base = threadIdx.y*4+base_n; + if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (!CHECK_RHS_BOUNDARY) { + // CHECK LHS + if (lhs_vert + 3 < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (lhs_vert + 2 < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + } + } else if (lhs_vert + 1 < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + } + } else if (lhs_vert < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + } + } + } else if (!CHECK_LHS_BOUNDARY) { + // CHECK RHS + /* + int ncols_rem = fminf(n_size- horiz_base, 4); + for (int i = 0; i < ncols_rem; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + }*/ + for (int i = 0; i < 4; i++) { + if (horiz_base+i < n_size) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } else { + // CHECK both boundaries. + for (int i = 0; i < 4; i++) { + if (horiz_base+i < n_size) { + if (lhs_vert < m_size) + output(lhs_vert, horiz_base + i) = results[i].x; + if (lhs_vert + 1 < m_size) + output(lhs_vert + 1, horiz_base + i) = results[i].y; + if (lhs_vert + 2 < m_size) + output(lhs_vert + 2, horiz_base + i) = results[i].z; + if (lhs_vert + 3 < m_size) + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } +} + + +template +__device__ EIGEN_STRONG_INLINE void +EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, float2 lhs_shmem2[][32], + float2 rhs_shmem2[][8], const Index m_size, + const Index n_size, const Index k_size, + const Index base_m, const Index base_n) { + typedef float Scalar; + + // prefetch registers + float4 lhs_pf0, lhs_pf1, lhs_pf2, lhs_pf3; + float4 rhs_pf0, rhs_pf1; + + float4 results[8]; + for (int i=0; i < 8; i++) { + results[i].x = results[i].y = results[i].z = results[i].w = 0; + } + + + Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32; + for (Index k = 0; k < k_size; k += 32) { + lhs_pf0 = internal::pset1(0); + lhs_pf1 = internal::pset1(0); + lhs_pf2 = internal::pset1(0); + lhs_pf3 = internal::pset1(0); + + rhs_pf0 = internal::pset1(0); + rhs_pf1 = internal::pset1(0); + + if (!CHECK_LHS_BOUNDARY) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + lhs_pf3 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k)); + } + } else { + // just CHECK_LHS_BOUNDARY + if (lhs_vert + 3 < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + lhs_pf3 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k)); + } + } else if (lhs_vert + 2 < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16)); + lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24)); + lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24)); + lhs_pf3.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + } + } else if (lhs_vert + 1 < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24)); + lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + } + } else if (lhs_vert < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + } + } + } + __syncthreads(); + Index rhs_vert = k+threadIdx.x*4; + Index rhs_horiz0 = threadIdx.y*2+base_n; + Index rhs_horiz1 = threadIdx.y*2+1+base_n; + if (!CHECK_RHS_BOUNDARY) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0); + rhs_pf1 = rhs.loadPacket(rhs_vert, rhs_horiz1); + } else if (rhs_vert + 2 < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1); + } else if (rhs_vert + 1 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + } + } else { + if (rhs_horiz1 < n_size) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0); + rhs_pf1 = rhs.loadPacket(rhs_vert, rhs_horiz1); + } else if (rhs_vert + 2 < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1); + } else if (k+threadIdx.x*4 + 1 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + } else if (k+threadIdx.x*4 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + } + } else if (rhs_horiz0 < n_size) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0); + } else if ((rhs_vert + 2) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + } else if ((rhs_vert + 1) < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + } + } + } + __syncthreads(); + // Loaded. Do computation + // Row 0 -> times (0, 4, 8, .. 28) for features 0, 1. + // Row 1 -> times (0, 4, 8, .. 28) for features 2, 3. + // .. + // Row 31 -> times (0, 4, 8, .. 28) for features 62, 63 + rhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(rhs_pf0.x, rhs_pf1.x); + // Row 32 -> times (1, 5, 9, .. 29) for features 0, 1. + // Row 33 -> times (1, 5, 9, .. 29) for features 2, 3. + // .. + rhs_shmem2[threadIdx.y+32][threadIdx.x] = make_float2(rhs_pf0.y, rhs_pf1.y); + // Row 64 -> times (2, 6, 10, .. 30) for features 0, 1. + // Row 65 -> times (2, 6, 10, .. 30) for features 2, 3. + rhs_shmem2[threadIdx.y+64][threadIdx.x] = make_float2(rhs_pf0.z, rhs_pf1.z); + // Row 96 -> times (3, 7, 11, .. 31) for features 0, 1. + // Row 97 -> times (3, 7, 11, .. 31) for features 2, 3. + rhs_shmem2[threadIdx.y+96][threadIdx.x] = make_float2(rhs_pf0.w, rhs_pf1.w); + + // LHS. + // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125) + // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125) + // ... + // Row 8 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127) + // Row 15 (time 7) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127) + + +#define add_vals(a_feat1, a_feat2, f1, f2, f3, f4)\ + results[0].x += a_feat1.x * f1.x;\ + results[1].x += a_feat1.x * f1.y;\ + results[2].x += a_feat1.x * f2.x;\ + results[3].x += a_feat1.x * f2.y;\ + results[4].x += a_feat1.x * f3.x;\ + results[5].x += a_feat1.x * f3.y;\ + results[6].x += a_feat1.x * f4.x;\ + results[7].x += a_feat1.x * f4.y;\ +\ + results[0].y += a_feat1.y * f1.x;\ + results[1].y += a_feat1.y * f1.y;\ + results[2].y += a_feat1.y * f2.x;\ + results[3].y += a_feat1.y * f2.y;\ + results[4].y += a_feat1.y * f3.x;\ + results[5].y += a_feat1.y * f3.y;\ + results[6].y += a_feat1.y * f4.x;\ + results[7].y += a_feat1.y * f4.y;\ +\ + results[0].z += a_feat2.x * f1.x;\ + results[1].z += a_feat2.x * f1.y;\ + results[2].z += a_feat2.x * f2.x;\ + results[3].z += a_feat2.x * f2.y;\ + results[4].z += a_feat2.x * f3.x;\ + results[5].z += a_feat2.x * f3.y;\ + results[6].z += a_feat2.x * f4.x;\ + results[7].z += a_feat2.x * f4.y;\ +\ + results[0].w += a_feat2.y * f1.x;\ + results[1].w += a_feat2.y * f1.y;\ + results[2].w += a_feat2.y * f2.x;\ + results[3].w += a_feat2.y * f2.y;\ + results[4].w += a_feat2.y * f3.x;\ + results[5].w += a_feat2.y * f3.y;\ + results[6].w += a_feat2.y * f4.x;\ + results[7].w += a_feat2.y * f4.y;\ + + lhs_shmem2[threadIdx.y/4][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.x, lhs_pf0.y); + lhs_shmem2[threadIdx.y/4+8][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.x, lhs_pf1.y); + lhs_shmem2[threadIdx.y/4+16][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.x, lhs_pf2.y); + lhs_shmem2[threadIdx.y/4+24][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.x, lhs_pf3.y); + + lhs_shmem2[threadIdx.y/4 + 32][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.z, lhs_pf0.w); + lhs_shmem2[threadIdx.y/4 + 40][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.z, lhs_pf1.w); + lhs_shmem2[threadIdx.y/4 + 48][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.z, lhs_pf2.w); + lhs_shmem2[threadIdx.y/4 + 56][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.z, lhs_pf3.w); + + __syncthreads(); + + // Do the multiplies. + #pragma unroll + for (int koff = 0; koff < 32; koff ++) { + float2 a3 = lhs_shmem2[koff][threadIdx.x + (threadIdx.y % 4) * 8]; + float2 a4 = lhs_shmem2[koff + 32][threadIdx.x + (threadIdx.y % 4) * 8]; + + // first feature is at (threadIdx.y/4) * 8 last is at start + 8. + int start_feature = (threadIdx.y / 4) * 8; + + float2 br1 = rhs_shmem2[start_feature/2 + (koff % 4) * 32][koff/4]; + float2 br2 = rhs_shmem2[start_feature/2 + 1 + (koff % 4) * 32][koff/4]; + float2 br3 = rhs_shmem2[start_feature/2 + 2 + (koff % 4) * 32][koff/4]; + float2 br4 = rhs_shmem2[start_feature/2 + 3 + (koff % 4) * 32][koff/4]; + + add_vals(a3, a4, br1, br2, br3, br4) + } + __syncthreads(); + } // end loop over k + + + __syncthreads(); + Index horiz_base = (threadIdx.y/4)*8+base_n; + if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (!CHECK_RHS_BOUNDARY) { + if (lhs_vert + 3 < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (lhs_vert + 2 < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + } + } else if (lhs_vert + 1 < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + } + } else if (lhs_vert < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + } + } + } else if (!CHECK_LHS_BOUNDARY) { + // CHECK BOUNDARY_B + for (int i = 0; i < 8; i++) { + if (horiz_base + i < n_size) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } else { + // CHECK both boundaries. + for (int i = 0; i < 8; i++) { + if (horiz_base + i < n_size) { + if (lhs_vert < m_size) + output(lhs_vert, horiz_base + i) = results[i].x; + if (lhs_vert + 1 < m_size) + output(lhs_vert + 1, horiz_base + i) = results[i].y; + if (lhs_vert + 2 < m_size) + output(lhs_vert + 2, horiz_base + i) = results[i].z; + if (lhs_vert + 3 < m_size) + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } +} + + +template +__global__ void +__launch_bounds__(256) +EigenFloatContractionKernel(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, + const Index m_size, const Index n_size, const Index k_size) { + __shared__ float2 lhs_shmem[64*32]; + __shared__ float2 rhs_shmem[128*8]; + + typedef float2 LHS_MEM[64][32]; + typedef float2 RHS_MEM[128][8]; + + typedef float2 LHS_MEM16x16[32][16]; + typedef float2 RHS_MEM16x16[64][8]; + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 128 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + bool check_rhs = (base_n + 63) >= n_size; + bool check_lhs128 = (base_m + 127) >= m_size; + + if (!check_rhs) { + if (!check_lhs128) { + // >= 128 rows left + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } + } else { + if (!check_lhs128) { + // >= 128 rows left + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } + } +} + +template +__global__ void +__launch_bounds__(256) +EigenFloatContractionKernel16x16(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, + const Index m_size, const Index n_size, const Index k_size) { + __shared__ float2 lhs_shmem[32][16]; + __shared__ float2 rhs_shmem[64][8]; + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 64 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + if (base_m + 63 < m_size) { + if (base_n + 63 < n_size) { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } + } else { + if (base_n + 63 < n_size) { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } + } +} + + +template +struct TensorEvaluator, GpuDevice> : + public TensorContractionEvaluatorBase, GpuDevice> > { + + typedef GpuDevice Device; + + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + enum { + Layout = TensorEvaluator::Layout, + }; + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + + typedef array left_dim_mapper_t; + typedef array right_dim_mapper_t; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef DSizes Dimensions; + + // typedefs needed in evalTo + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + + typedef typename LeftEvaluator::Dimensions LeftDimensions; + typedef typename RightEvaluator::Dimensions RightDimensions; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) : + Base(op, device) {} + + // We need to redefine this method to make nvcc happy + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) { + this->m_leftImpl.evalSubExprsIfNeeded(NULL); + this->m_rightImpl.evalSubExprsIfNeeded(NULL); + if (data) { + evalTo(data); + return false; + } else { + this->m_result = static_cast(this->m_device.allocate(this->dimensions().TotalSize() * sizeof(Scalar))); + evalTo(this->m_result); + return true; + } + } + + void evalTo(Scalar* buffer) const { + if (this->m_lhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + else { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + } + else { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + else { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + } + } + + template struct LaunchKernels { + static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) { + const Index m_blocks = (m + 63) / 64; + const Index n_blocks = (n + 63) / 64; + const dim3 num_blocks(m_blocks, n_blocks, 1); + const dim3 block_size(8, 8, 8); + LAUNCH_CUDA_KERNEL((EigenContractionKernel), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k); + } + }; + + template struct LaunchKernels { + static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) { + if (m < 768 || n < 768) { + const Index m_blocks = (m + 63) / 64; + const Index n_blocks = (n + 63) / 64; + const dim3 num_blocks(m_blocks, n_blocks, 1); + const dim3 block_size(16, 16, 1); + LAUNCH_CUDA_KERNEL((EigenFloatContractionKernel16x16), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k); + } else { + const Index m_blocks = (m + 127) / 128; + const Index n_blocks = (n + 63) / 64; + const dim3 num_blocks(m_blocks, n_blocks, 1); + const dim3 block_size(8, 32, 1); + LAUNCH_CUDA_KERNEL((EigenFloatContractionKernel), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k); + } + } + }; + + template + void evalTyped(Scalar* buffer) const { + // columns in left side, rows in right side + const Index k = this->m_k_size; + EIGEN_UNUSED_VARIABLE(k) + + // rows in left side + const Index m = this->m_i_size; + + // columns in right side + const Index n = this->m_j_size; + + // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar) + this->m_device.memset(buffer, 0, m * n * sizeof(Scalar)); + + typedef internal::TensorContractionInputMapper LhsMapper; + + typedef internal::TensorContractionInputMapper RhsMapper; + + typedef internal::blas_data_mapper OutputMapper; + + + // initialize data mappers + LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides, + this->m_left_contracting_strides, this->m_k_strides); + + RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides, + this->m_right_contracting_strides, this->m_k_strides); + + OutputMapper output(buffer, m); + + setCudaSharedMemConfig(cudaSharedMemBankSizeEightByte); + LaunchKernels::Run(lhs, rhs, output, m, n, k, this->m_device); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_USE_GPU and __CUDACC__ +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h new file mode 100644 index 000000000..9b2cb3ff6 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h @@ -0,0 +1,467 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H + +namespace Eigen { + +namespace internal { + +enum { + Rhs = 0, + Lhs = 1 +}; + +/* + * Implementation of the Eigen blas_data_mapper class for tensors. + */ + +template struct CoeffLoader { + enum { + DirectOffsets = false + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_tensor(tensor) { } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index) { + eigen_assert(false && "unsupported"); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return m_tensor.coeff(index); } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename Tensor::PacketReturnType packet(typename Tensor::Index index) const + { + return m_tensor.template packet(index); + } + + + private: + const Tensor m_tensor; +}; + +template struct CoeffLoader { + enum { + DirectOffsets = true + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_data(tensor.data()) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) { + m_data += offset; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return loadConstant(m_data+index); } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename Tensor::PacketReturnType packet(typename Tensor::Index index) const + { + return internal::ploadt_ro(m_data + index); + } + private: + typedef typename Tensor::Scalar Scalar; + const Scalar* m_data; +}; + +template +class SimpleTensorContractionMapper { + public: + EIGEN_DEVICE_FUNC + SimpleTensorContractionMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) : + m_tensor(tensor), + m_nocontract_strides(nocontract_strides), + m_ij_strides(ij_strides), + m_contract_strides(contract_strides), + m_k_strides(k_strides) { } + + enum { + DirectOffsets = CoeffLoader::DirectOffsets + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) { + m_tensor.offsetBuffer(offset); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void prefetch(Index /*i*/) { } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar operator()(Index row) const { + // column major assumption + return operator()(row, 0); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar operator()(Index row, Index col) const { + return m_tensor.coeff(computeIndex(row, col)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index computeIndex(Index row, Index col) const { + const bool left = (side == Lhs); + Index nocontract_val = left ? row : col; + Index linidx = 0; + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx = nocontract_val / m_ij_strides[i]; + linidx += idx * m_nocontract_strides[i]; + nocontract_val -= idx * m_ij_strides[i]; + } + if (array_size::value > array_size::value) { + if (side == Lhs && inner_dim_contiguous) { + eigen_assert(m_nocontract_strides[0] == 1); + linidx += nocontract_val; + } else { + linidx += nocontract_val * m_nocontract_strides[0]; + } + } + + Index contract_val = left ? col : row; + if(array_size::value > 0) { + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx = contract_val / m_k_strides[i]; + linidx += idx * m_contract_strides[i]; + contract_val -= idx * m_k_strides[i]; + } + + if (side == Rhs && inner_dim_contiguous) { + eigen_assert(m_contract_strides[0] == 1); + linidx += contract_val; + } else { + linidx += contract_val * m_contract_strides[0]; + } + } + + return linidx; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE IndexPair computeIndexPair(Index row, Index col, const Index distance) const { + const bool left = (side == Lhs); + Index nocontract_val[2] = {left ? row : col, left ? row + distance : col}; + Index linidx[2] = {0, 0}; + if (array_size::value > array_size::value) { + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx0 = nocontract_val[0] / m_ij_strides[i]; + const Index idx1 = nocontract_val[1] / m_ij_strides[i]; + linidx[0] += idx0 * m_nocontract_strides[i]; + linidx[1] += idx1 * m_nocontract_strides[i]; + nocontract_val[0] -= idx0 * m_ij_strides[i]; + nocontract_val[1] -= idx1 * m_ij_strides[i]; + } + if (side == Lhs && inner_dim_contiguous) { + eigen_assert(m_nocontract_strides[0] == 1); + linidx[0] += nocontract_val[0]; + linidx[1] += nocontract_val[1]; + } else { + linidx[0] += nocontract_val[0] * m_nocontract_strides[0]; + linidx[1] += nocontract_val[1] * m_nocontract_strides[0]; + } + } + + Index contract_val[2] = {left ? col : row, left ? col : row + distance}; + if (array_size::value> 0) { + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx0 = contract_val[0] / m_k_strides[i]; + const Index idx1 = contract_val[1] / m_k_strides[i]; + linidx[0] += idx0 * m_contract_strides[i]; + linidx[1] += idx1 * m_contract_strides[i]; + contract_val[0] -= idx0 * m_k_strides[i]; + contract_val[1] -= idx1 * m_k_strides[i]; + } + + if (side == Rhs && inner_dim_contiguous) { + eigen_assert(m_contract_strides[0] == 1); + linidx[0] += contract_val[0]; + linidx[1] += contract_val[1]; + } else { + linidx[0] += contract_val[0] * m_contract_strides[0]; + linidx[1] += contract_val[1] * m_contract_strides[0]; + } + } + return IndexPair(linidx[0], linidx[1]); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index firstAligned(Index size) const { + // Only claim alignment when we can compute the actual stride (ie when we're + // dealing with the lhs with inner_dim_contiguous. This is because the + // matrix-vector product relies on the stride when dealing with aligned inputs. + return (Alignment == Aligned) && (side == Lhs) && inner_dim_contiguous ? 0 : size; + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index stride() const { + return ((side == Lhs) && inner_dim_contiguous && array_size::value > 0) ? m_contract_strides[0] : 1; + } + + protected: + CoeffLoader m_tensor; + const nocontract_t m_nocontract_strides; + const nocontract_t m_ij_strides; + const contract_t m_contract_strides; + const contract_t m_k_strides; +}; + + +template +class BaseTensorContractionMapper : public SimpleTensorContractionMapper +{ + public: + typedef SimpleTensorContractionMapper ParentMapper; + + EIGEN_DEVICE_FUNC + BaseTensorContractionMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) : + ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { } + + typedef typename Tensor::PacketReturnType Packet; + typedef typename unpacket_traits::half HalfPacket; + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Packet loadPacket(Index i, Index j) const { + // whole method makes column major assumption + + // don't need to add offsets for now (because operator handles that) + // current code assumes packet size must be a multiple of 2 + EIGEN_STATIC_ASSERT(packet_size % 2 == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + + if (Tensor::PacketAccess && inner_dim_contiguous && !inner_dim_reordered) { + const Index index = this->computeIndex(i, j); + eigen_assert(this->computeIndex(i+packet_size-1, j) == index + packet_size-1); + return this->m_tensor.template packet(index); + } + + const IndexPair indexPair = this->computeIndexPair(i, j, packet_size - 1); + const Index first = indexPair.first; + const Index last = indexPair.second; + + // We can always do optimized packet reads from left hand side right now, because + // the vertical matrix dimension on the left hand side is never contracting. + // On the right hand side we need to check if the contracting dimensions may have + // been shuffled first. + if (Tensor::PacketAccess && + (side == Lhs || internal::array_size::value <= 1 || !inner_dim_reordered) && + (last - first) == (packet_size - 1)) { + + return this->m_tensor.template packet(first); + } + + EIGEN_ALIGN_MAX Scalar data[packet_size]; + + data[0] = this->m_tensor.coeff(first); + for (Index k = 1; k < packet_size - 1; k += 2) { + const IndexPair internal_pair = this->computeIndexPair(i + k, j, 1); + data[k] = this->m_tensor.coeff(internal_pair.first); + data[k + 1] = this->m_tensor.coeff(internal_pair.second); + } + data[packet_size - 1] = this->m_tensor.coeff(last); + + return pload(data); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE HalfPacket loadHalfPacket(Index i, Index j) const { + // whole method makes column major assumption + + // don't need to add offsets for now (because operator handles that) + const Index half_packet_size = unpacket_traits::size; + if (half_packet_size == packet_size) { + return loadPacket(i, j); + } + EIGEN_ALIGN_MAX Scalar data[half_packet_size]; + for (Index k = 0; k < half_packet_size; k++) { + data[k] = operator()(i + k, j); + } + return pload(data); + } +}; + + +template +class BaseTensorContractionMapper : public SimpleTensorContractionMapper +{ + public: + typedef SimpleTensorContractionMapper ParentMapper; + + EIGEN_DEVICE_FUNC + BaseTensorContractionMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) : + ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { } + + typedef typename Tensor::PacketReturnType Packet; + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Packet loadPacket(Index i, Index j) const { + EIGEN_ALIGN_MAX Scalar data[1]; + data[0] = this->m_tensor.coeff(this->computeIndex(i, j)); + return pload(data); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Packet loadHalfPacket(Index i, Index j) const { + return loadPacket(i, j); + } +}; + + +template +class TensorContractionSubMapper { + public: + typedef typename Tensor::PacketReturnType Packet; + typedef typename unpacket_traits::half HalfPacket; + + typedef BaseTensorContractionMapper ParentMapper; + typedef TensorContractionSubMapper Self; + typedef Self LinearMapper; + + enum { + // We can use direct offsets iff the parent mapper supports then and we can compute the strides. + // TODO: we should also enable direct offsets for the Rhs case. + UseDirectOffsets = ParentMapper::DirectOffsets && (side == Lhs) && inner_dim_contiguous && (array_size::value > 0) + }; + + EIGEN_DEVICE_FUNC TensorContractionSubMapper(const ParentMapper& base_mapper, Index vert_offset, Index horiz_offset) + : m_base_mapper(base_mapper), m_vert_offset(vert_offset), m_horiz_offset(horiz_offset) { + // Bake the offsets into the buffer used by the base mapper whenever possible. This avoids the need to recompute + // this offset every time we attempt to access a coefficient. + if (UseDirectOffsets) { + Index stride = m_base_mapper.stride(); + m_base_mapper.offsetBuffer(vert_offset + horiz_offset * stride); + } + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const { + if (UseDirectOffsets) { + return m_base_mapper(i, 0); + } + return m_base_mapper(i + m_vert_offset, m_horiz_offset); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i, Index j) const { + if (UseDirectOffsets) { + return m_base_mapper(i, j); + } + return m_base_mapper(i + m_vert_offset, j + m_horiz_offset); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const { + if (UseDirectOffsets) { + return m_base_mapper.template loadPacket(i, 0); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, m_horiz_offset); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, Index j) const { + if (UseDirectOffsets) { + return m_base_mapper.template loadPacket(i, j); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, j + m_horiz_offset); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const { + if (UseDirectOffsets) { + return m_base_mapper.template loadHalfPacket(i, 0); + } + return m_base_mapper.template loadHalfPacket(i + m_vert_offset, m_horiz_offset); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, Packet p) const { + if (UseDirectOffsets) { + m_base_mapper.storePacket(i, 0, p); + } + m_base_mapper.storePacket(i + m_vert_offset, m_horiz_offset, p); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const { + if (UseDirectOffsets) { + return LinearMapper(m_base_mapper, i, j); + } + return LinearMapper(m_base_mapper, i + m_vert_offset, j + m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i) const { + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MADE_A_PROGRAMMING_MISTAKE); + const int ActualAlignment = (AlignmentType == Aligned) && (Alignment == Aligned) ? Aligned : Unaligned; + if (UseDirectOffsets) { + return m_base_mapper.template loadPacket(i, 0); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool aligned(Index) const { + return false; + } + + private: + ParentMapper m_base_mapper; + const Index m_vert_offset; + const Index m_horiz_offset; +}; + + +template +class TensorContractionInputMapper + : public BaseTensorContractionMapper { + + public: + typedef Scalar_ Scalar; + typedef BaseTensorContractionMapper Base; + typedef TensorContractionSubMapper SubMapper; + typedef SubMapper VectorMapper; + + EIGEN_DEVICE_FUNC TensorContractionInputMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) + : Base(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE SubMapper getSubMapper(Index i, Index j) const { + return SubMapper(*this, i, j); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const { + return VectorMapper(*this, i, j); + } +}; + + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h new file mode 100644 index 000000000..c70dea053 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h @@ -0,0 +1,1043 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H + +// evaluator for thread pool device +#ifdef EIGEN_USE_THREADS + +namespace Eigen { + +#ifdef EIGEN_USE_SIMPLE_THREAD_POOL +namespace internal { + +template +struct packLhsArg { + LhsScalar* blockA; + const LhsMapper& lhs; + const Index m_start; + const Index k_start; + const Index mc; + const Index kc; +}; + +template +struct packRhsAndKernelArg { + const MaxSizeVector* blockAs; + RhsScalar* blockB; + const RhsMapper& rhs; + OutputMapper& output; + const Index m; + const Index k; + const Index n; + const Index mc; + const Index kc; + const Index nc; + const Index num_threads; + const Index num_blockAs; + const Index max_m; + const Index k_block_idx; + const Index m_block_idx; + const Index n_block_idx; + const Index m_blocks; + const Index n_blocks; + MaxSizeVector* kernel_notifications; + const MaxSizeVector* lhs_notifications; + const bool need_to_pack; +}; + +} // end namespace internal +#endif // EIGEN_USE_SIMPLE_THREAD_POOL + +template +struct TensorEvaluator, ThreadPoolDevice> : + public TensorContractionEvaluatorBase, ThreadPoolDevice> > { + + typedef ThreadPoolDevice Device; + + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + enum { + Layout = TensorEvaluator::Layout, + }; + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + + typedef array left_dim_mapper_t; + typedef array right_dim_mapper_t; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef DSizes Dimensions; + + // typedefs needed in evalTo + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + typedef typename internal::gebp_traits Traits; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + + TensorEvaluator(const XprType& op, const Device& device) : + Base(op, device) {} + +#ifndef EIGEN_USE_SIMPLE_THREAD_POOL + template + void evalProduct(Scalar* buffer) const { + typedef internal::TensorContractionInputMapper< + LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t, + contract_t, internal::packet_traits::size, + lhs_inner_dim_contiguous, false, Unaligned> + LhsMapper; + typedef internal::TensorContractionInputMapper< + RhsScalar, Index, internal::Rhs, RightEvaluator, right_nocontract_t, + contract_t, internal::packet_traits::size, + rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Unaligned> + RhsMapper; + typedef internal::blas_data_mapper OutputMapper; + typedef internal::gemm_pack_lhs + LhsPacker; + typedef internal::gemm_pack_rhs< + RhsScalar, Index, typename RhsMapper::SubMapper, Traits::nr, ColMajor> + RhsPacker; + typedef internal::gebp_kernel + GebpKernel; + + const Index m = this->m_i_size; + const Index n = this->m_j_size; + const Index k = this->m_k_size; + if (m == 0 || n == 0 || k == 0) return; + + // Compute a set of algorithm parameters: + // - kernel block sizes (bm, bn, bk) + // - task grain sizes (number of kernels executed per task: gm, gn) + // - number of threads + // - sharding by row/column + // - parallel packing or first lhs then rhs + // and some derived parameters: + // - number of tasks (nm, nn, nk) + // - number of kernels (nm0, nn0) + // Unfortunately, all these parameters are tightly interdependent. + // So in some cases we first compute approximate values, then compute other + // values based on these approximations and then refine the approximations. + + // There are lots of heuristics here. There is some reasoning behind them, + // but ultimately they are just tuned on contraction benchmarks for + // different input configurations, thread counts and instruction sets. + // So feel free to question any of them. + + // Compute whether we want to shard by row or by column. + // This is a first approximation, it will be refined later. Since we don't + // know number of threads yet we use 2, because what's we are most + // interested in at this point is whether it makes sense to use + // parallelization at all or not. + bool shard_by_col = shardByCol(m, n, 2); + + // First approximation of kernel blocking sizes. + // Again, we don't know number of threads yet, so we use 2. + Index bm, bn, bk; + if (shard_by_col) { + internal::TensorContractionBlocking + blocking(k, m, n, 2); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } else { + internal::TensorContractionBlocking + blocking(k, m, n, 2); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } + + // Compute optimal number of threads. + // Note: we use bk instead of k here because we are interested in amount of + // _parallelizable_ computations, and computations are not parallelizable + // across k dimension. + const TensorOpCost cost = + contractionCost(m, n, bm, bn, bk, shard_by_col, false); + int num_threads = TensorCostModel::numThreads( + static_cast(n) * m, cost, this->m_device.numThreads()); + + // TODO(dvyukov): this is a stop-gap to prevent regressions while the cost + // model is not tuned. Remove this when the cost model is tuned. + if (n == 1) num_threads = 1; + + if (num_threads == 1) { + // The single-threaded algorithm should be faster in this case. + if (n == 1) + this->template evalGemv(buffer); + else + this->template evalGemm(buffer); + return; + } + + // Now that we know number of threads, recalculate sharding and blocking. + shard_by_col = shardByCol(m, n, num_threads); + if (shard_by_col) { + internal::TensorContractionBlocking + blocking(k, m, n, num_threads); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } else { + internal::TensorContractionBlocking + blocking(k, m, n, num_threads); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } + + // Number of kernels for each dimension. + Index nm0 = divup(m, bm); + Index nn0 = divup(n, bn); + Index nk = divup(k, bk); + + // Calculate task grain size (number of kernels executed per task). + // This task size coarsening serves two purposes: + // 1. It reduces per-task overheads including synchronization overheads. + // 2. It allows to use caches better (reuse the same packed rhs in several + // consecutive kernels). + Index gm = 1; + Index gn = 1; + // If we are sharding by column, then we prefer to reduce rows first. + if (shard_by_col) { + gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col); + gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col); + } else { + gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col); + gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col); + } + // Number of tasks in each dimension. + Index nm = divup(nm0, gm); + Index nn = divup(nn0, gn); + + // Last by not least, decide whether we want to issue both lhs and rhs + // packing in parallel; or issue lhs packing first, and then issue rhs + // packing when lhs packing completes (for !shard_by_col lhs and rhs are + // swapped). Parallel packing allows more parallelism (for both packing and + // kernels), while sequential packing provides better locality (once + // a thread finishes rhs packing it proceed to kernels with that rhs). + // First, we are interested in parallel packing if there are few tasks. + bool parallel_pack = num_threads >= nm * nn; + // Also do parallel packing if all data fits into L2$. + if (m * bk * Index(sizeof(LhsScalar)) + n * bk * Index(sizeof(RhsScalar)) <= + l2CacheSize() * num_threads) + parallel_pack = true; + // But don't do it if we will use each rhs only once. Locality seems to be + // more important in this case. + if ((shard_by_col ? nm : nn) == 1) parallel_pack = false; + + LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, + this->m_i_strides, this->m_left_contracting_strides, + this->m_k_strides); + + RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, + this->m_j_strides, this->m_right_contracting_strides, + this->m_k_strides); + + Context(this->m_device, num_threads, lhs, rhs, buffer, m, n, + k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, + shard_by_col, parallel_pack) + .run(); + } + + // Context coordinates a single parallel gemm operation. + template + class Context { + public: + Context(const Device& device, int num_threads, LhsMapper& lhs, + RhsMapper& rhs, Scalar* buffer, Index tm, Index tn, Index tk, Index bm, + Index bn, Index bk, Index nm, Index nn, Index nk, Index gm, + Index gn, Index nm0, Index nn0, bool shard_by_col, + bool parallel_pack) + : device_(device), + lhs_(lhs), + rhs_(rhs), + buffer_(buffer), + output_(buffer, tm), + num_threads_(num_threads), + shard_by_col_(shard_by_col), + parallel_pack_(parallel_pack), + m_(tm), + n_(tn), + k_(tk), + bm_(bm), + bn_(bn), + bk_(bk), + nm_(nm), + nn_(nn), + nk_(nk), + gm_(gm), + gn_(gn), + nm0_(nm0), + nn0_(nn0) + { + for (Index x = 0; x < P; x++) { + // Normal number of notifications for k slice switch is + // nm_ + nn_ + nm_ * nn_. However, first P - 1 slices will receive only + // nm_ + nn_ notifications, because they will not receive notifications + // from preceeding kernels. + state_switch_[x] = + x == 0 + ? 1 + : (parallel_pack_ ? nn_ + nm_ : (shard_by_col_ ? nn_ : nm_)) + + (x == P - 1 ? nm_ * nn_ : 0); + state_packing_ready_[x] = + parallel_pack_ ? 0 : (shard_by_col_ ? nm_ : nn_); + state_kernel_[x] = new std::atomic*[nm_]; + for (Index m = 0; m < nm_; m++) { + state_kernel_[x][m] = new std::atomic[nn_]; + // Kernels generally receive 3 notifications (previous kernel + 2 + // packing), but the first slice won't get notifications from previous + // kernels. + for (Index n = 0; n < nn_; n++) + state_kernel_[x][m][n].store( + (x == 0 ? 0 : 1) + (parallel_pack_ ? 2 : 1), + std::memory_order_relaxed); + } + } + + // Allocate memory for packed rhs/lhs matrices. + size_t align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1); + size_t lhs_size = + divup(bm_ * bk_ * sizeof(LhsScalar), align) * align; + size_t rhs_size = + divup(bn_ * bk_ * sizeof(RhsScalar), align) * align; + packed_mem_ = static_cast(internal::aligned_malloc( + (nm0_ * lhs_size + nn0_ * rhs_size) * std::min(nk_, P - 1))); + char* mem = static_cast(packed_mem_); + for (Index x = 0; x < numext::mini(nk_, P - 1); x++) { + packed_lhs_[x].resize(nm0_); + for (Index m = 0; m < nm0_; m++) { + packed_lhs_[x][m] = reinterpret_cast(mem); + mem += lhs_size; + } + packed_rhs_[x].resize(nn0_); + for (Index n = 0; n < nn0_; n++) { + packed_rhs_[x][n] = reinterpret_cast(mem); + mem += rhs_size; + } + } + } + + ~Context() { + for (Index x = 0; x < P; x++) { + for (Index m = 0; m < nm_; m++) delete[] state_kernel_[x][m]; + delete[] state_kernel_[x]; + } + internal::aligned_free(packed_mem_); + } + + void run() { + // Kick off packing of the first slice. + signal_switch(0, 1); + // Wait for overall completion. + // TODO(dvyukov): this wait can lead to deadlock. + // If nthreads contractions are concurrently submitted from worker + // threads, this wait will block all worker threads and the system will + // deadlock. + done_.Wait(); + } + + private: + Notification done_; + const Device& device_; + LhsMapper& lhs_; + RhsMapper& rhs_; + Scalar* const buffer_; + OutputMapper output_; + const int num_threads_; + const bool shard_by_col_; + const bool parallel_pack_; + // Matrix sizes. + const Index m_; + const Index n_; + const Index k_; + // Block sizes. + const Index bm_; + const Index bn_; + const Index bk_; + // Number of tasks. + const Index nm_; + const Index nn_; + const Index nk_; + // Task grain sizes (number of kernels executed per task). + const Index gm_; + const Index gn_; + // Number of blocks (this is different from ni_/nn_ because of task size + // coarsening). + const Index nm0_; + const Index nn0_; + + // Parallelization strategy. + // + // Blocks related to the same k block can run in parallel because they write + // to different output blocks. So we parallelize within k slices, this + // gives us parallelism level of m x n. Before we can start any kernels + // related to k-th slice, we need to issue m lhs packing tasks and n rhs + // packing tasks. + // + // However, there is a bottleneck when we are finishing kernels for k-th + // slice (at the very end there is only 1 runnable kernel). To mitigate this + // bottleneck we allow kernels from k-th and k+1-th slices to run in + // parallel. Note that (m, n, k) and (m, n, k+1) kernels write to the same + // output block, so they must not run in parallel. + // + // This gives us the following dependency graph. + // On each k slice we have m x n kernel tasks, m lhs paking tasks and n rhs + // packing tasks. + // Kernel (m, n, k) can start when: + // - kernel (m, n, k-1) has finished + // - lhs packing (m, k) has finished + // - rhs packing (n, k) has finished + // Lhs/rhs packing can start when: + // - all k-1 packing has finished (artificially imposed to limit amount of + // parallel packing) + // + // On top of that we limit runnable tasks to two consecutive k slices. + // This is done to limit amount of memory we need for packed lhs/rhs + // (for each k slice we need m*bk + n*bk memory in packed_lhs_/packed_rhs_). + // + // state_switch_ tracks when we are ready to switch to the next k slice. + // state_kernel_[m][n] tracks when we are ready to kick off kernel (m, n). + // These variable are rolling over 3 consecutive k slices: first two we are + // actively executing + one to track completion of kernels in the second + // slice. + static const Index P = 3; + void* packed_mem_; + std::vector packed_lhs_[P - 1]; + std::vector packed_rhs_[P - 1]; + std::atomic** state_kernel_[P]; + // state_switch_ is frequently modified by worker threads, while other + // fields are read-only after constructor. Let's move it to a separate cache + // line to reduce cache-coherency traffic. + char pad_[128]; + std::atomic state_packing_ready_[P]; + std::atomic state_switch_[P]; + + void pack_lhs(Index m, Index k) { + const Index mend = m * gm_ + gm(m); + for (Index m1 = m * gm_; m1 < mend; m1++) + LhsPacker()(packed_lhs_[k % (P - 1)][m1], + lhs_.getSubMapper(m1 * bm_, k * bk_), bk(k), bm(m1)); + + if (!parallel_pack_ && shard_by_col_) { + signal_packing(k); + } else { + signal_switch(k + 1); + for (Index n = nn_ - 1; n >= 0; n--) signal_kernel(m, n, k, n == 0); + } + } + + void pack_rhs(Index n, Index k) { + const Index nend = n * gn_ + gn(n); + for (Index n1 = n * gn_; n1 < nend; n1++) { + if (k == 0) { + // Zero the output memory in parallel. + // On 10000x2x10000 mm zeroing can easily take half of time. + // Zero (bn x m) row. Safe to do here because all kernels that will + // write to this memory depend on completion of this task. + // Note: don't call device_.memset() here. device_.memset() blocks on + // thread pool worker thread, which can lead to underutilization and + // deadlocks. + memset(buffer_ + n1 * bn_ * m_, 0, bn(n1) * m_ * sizeof(Scalar)); + } + RhsPacker()(packed_rhs_[k % (P - 1)][n1], + rhs_.getSubMapper(k * bk_, n1 * bn_), bk(k), bn(n1)); + } + + if (parallel_pack_ || shard_by_col_) { + signal_switch(k + 1); + for (Index m = nm_ - 1; m >= 0; m--) signal_kernel(m, n, k, m == 0); + } else { + signal_packing(k); + } + } + + void kernel(Index m, Index n, Index k) { + // Note: order of iteration matters here. Iteration over m is innermost + // because we want to reuse the same packed rhs in consequetive tasks + // (rhs fits into L2$ while lhs only into L3$). + const Index nend = n * gn_ + gn(n); + const Index mend = m * gm_ + gm(m); + if (shard_by_col_) { + for (Index n1 = n * gn_; n1 < nend; n1++) { + for (Index m1 = m * gm_; m1 < mend; m1++) + GebpKernel()(output_.getSubMapper(m1 * bm_, n1 * bn_), + packed_lhs_[k % (P - 1)][m1], + packed_rhs_[k % (P - 1)][n1], bm(m1), bk(k), bn(n1), + Scalar(1), -1, -1, 0, 0); + } + } else { + for (Index m1 = m * gm_; m1 < mend; m1++) + for (Index n1 = n * gn_; n1 < nend; n1++) { + GebpKernel()(output_.getSubMapper(m1 * bm_, n1 * bn_), + packed_lhs_[k % (P - 1)][m1], + packed_rhs_[k % (P - 1)][n1], bm(m1), bk(k), bn(n1), + Scalar(1), -1, -1, 0, 0); + } + } + signal_kernel(m, n, k + 1, false); + signal_switch(k + 2); + } + + void signal_packing(Index k) { + eigen_assert(!parallel_pack_); + Index s = state_packing_ready_[k % P].fetch_sub(1); + eigen_assert(s > 0); + if (s != 1) return; + state_packing_ready_[k % P] = shard_by_col_ ? nm_ : nn_; + enqueue_packing(k, shard_by_col_); + } + + void signal_kernel(Index m, Index n, Index k, bool sync) { + std::atomic* state = &state_kernel_[k % P][m][n]; + Index s = state->load(); + eigen_assert(s > 0); + if (s != 1 && state->fetch_sub(1) != 1) return; + state->store(parallel_pack_ ? 3 : 2, std::memory_order_relaxed); + if (sync) + kernel(m, n, k); + else + device_.enqueueNoNotification([=]() { kernel(m, n, k); }); + } + + void signal_switch(Index k, Index v = 1) { + Index s = state_switch_[k % P].fetch_sub(v); + eigen_assert(s >= v); + if (s != v) return; + + // Ready to switch to the next k slice. + // Reset counter for the next iteration. + state_switch_[k % P] = + (parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_)) + + nm_ * nn_; + if (k < nk_) { + // Issue lhs/rhs packing. Their completion will in turn kick off + // kernels. + if (parallel_pack_) { + enqueue_packing(k, !shard_by_col_); + enqueue_packing(k, shard_by_col_); + } else if (shard_by_col_) { + enqueue_packing(k, false); + } else { + enqueue_packing(k, true); + } + + // Termination handling. + // Because kernel completion signals k + 2 switch, we need to finish nk + // + 2 slices without issuing any tasks on nk + 1 slice. So here we + // pretend that all nk + 1 packing tasks just finish instantly; so that + // nk + 2 switch only waits for completion of nk kernels. + } else if (k == nk_) { + signal_switch(k + 1, + parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_)); + } else { + done_.Notify(); + } + } + + // Enqueue all rhs/lhs packing for k-th slice. + void enqueue_packing(Index k, bool rhs) { + enqueue_packing_helper(0, rhs ? nn_ : nm_, k, rhs); + } + + void enqueue_packing_helper(Index start, Index end, Index k, bool rhs) { + if (end - start == 1) { + if (rhs) + pack_rhs(start, k); + else + pack_lhs(start, k); + } else { + Index mid = (start + end) / 2; + device_.enqueueNoNotification( + [=]() { enqueue_packing_helper(mid, end, k, rhs); }); + device_.enqueueNoNotification( + [=]() { enqueue_packing_helper(start, mid, k, rhs); }); + } + } + + // Block sizes with accounting for potentially incomplete last block. + Index bm(Index m) const { return m + 1 < nm0_ ? bm_ : m_ + bm_ - bm_ * nm0_; } + Index bn(Index n) const { return n + 1 < nn0_ ? bn_ : n_ + bn_ - bn_ * nn0_; } + Index bk(Index k) const { return k + 1 < nk_ ? bk_ : k_ + bk_ - bk_ * nk_; } + // Task grain sizes accounting for potentially incomplete last task. + Index gm(Index m) const { return m + 1 < nm_ ? gm_ : nm0_ + gm_ - gm_ * nm_; } + Index gn(Index n) const { return n + 1 < nn_ ? gn_ : nn0_ + gn_ - gn_ * nn_; } + + Context(const Context&) = delete; + void operator=(const Context&) = delete; + }; + + // Decide whether we want to shard m x n contraction by columns or by rows. + static bool shardByCol(Index m, Index n, Index num_threads) { + // Note: we are comparing both n and m against Traits::nr, it is not + // a mistake. We are trying to figure out how both n and m will fit into + // the main sharding dimension. + + // Sharding by column is the default + // ... unless there is enough data for vectorization over rows + if (m / num_threads >= Traits::nr && + // and not enough data for vectorization over columns + (n / num_threads < Traits::nr || + // ... or barely enough data for vectorization over columns, + // but it is not evenly dividable across threads + (n / num_threads < 4 * Traits::nr && + (n % (num_threads * Traits::nr)) != 0 && + // ... and it is evenly dividable across threads for rows + ((m % (num_threads * Traits::nr)) == 0 || + // .. or it is not evenly dividable for both dimensions but + // there is much more data over rows so that corner effects are + // mitigated. + (m / n >= 6))))) + return false; + // Wait, or if matrices are just substantially prolonged over the other + // dimension. + if (n / num_threads < 16 * Traits::nr && m > n * 32) return false; + return true; + } + + Index coarsenM(Index m, Index n, Index bm, Index bn, Index bk, Index gn, + int num_threads, bool shard_by_col) const { + Index gm = 1; + Index gm1 = 1; + Index nm0 = divup(m, bm); + Index nm1 = nm0; + for (;;) { + // Find the next candidate for m grain size. It needs to result in + // different number of blocks. E.g. if we have 10 kernels, we want to try + // 5 and 10, but not 6, 7, 8 and 9. + while (gm1 <= nm0 && nm1 == divup(nm0, gm1)) gm1++; + if (gm1 > nm0) break; + // Check the candidate. + int res = checkGrain(m, n, bm, bn, bk, gm1, gn, gm, gn, num_threads, + shard_by_col); + if (res < 0) break; + nm1 = divup(nm0, gm1); + if (res == 0) continue; + // Commit new grain size. + gm = gm1; + } + return gm; + } + + Index coarsenN(Index m, Index n, Index bm, Index bn, Index bk, Index gm, + int num_threads, bool shard_by_col) const { + Index gn = 1; + Index gn1 = 1; + Index nn0 = divup(n, bn); + Index nn1 = nn0; + for (;;) { + while (gn1 <= nn0 && nn1 == divup(nn0, gn1)) gn1++; + if (gn1 > nn0) break; + int res = checkGrain(m, n, bm, bn, bk, gm, gn1, gm, gn, num_threads, + shard_by_col); + if (res < 0) break; + nn1 = divup(nn0, gn1); + if (res == 0) continue; + gn = gn1; + } + return gn; + } + + // checkGrain checks whether grain (gm, gn) is suitable and is better than + // (oldgm, oldgn). + int checkGrain(Index m, Index n, Index bm, Index bn, Index bk, Index gm, + Index gn, Index oldgm, Index oldgn, int num_threads, + bool shard_by_col) const { + const TensorOpCost cost = + contractionCost(bm * gm, bn * gn, bm, bn, bk, shard_by_col, true); + double taskSize = TensorCostModel::taskSize( + static_cast(bm) * gm * bn * gn, cost); + // If the task is too small, then we agree on it regardless of anything + // else. Otherwise synchronization overheads will dominate. + if (taskSize < 1) return 1; + // If it is too large, then we reject it and all larger tasks. + if (taskSize > 2) return -1; + // Now we are in presumably good task size range. + // The main deciding factor here is parallelism. Consider that we have 12 + // kernels and 4 threads. Grains of 2, 3 and 4 all yield good task sizes. + // But 2/4 yield 6/3 tasks, which gives us parallelism of 0.75 (at most 3/4 + // of cores will be busy). While grain size 3 gives us 4 tasks, which gives + // us parallelism of 1 (we can load all cores). + Index nm0 = divup(m, bm); + Index nn0 = divup(n, bn); + Index new_tasks = divup(nm0, gm) * divup(nn0, gn); + double new_parallelism = static_cast(new_tasks) / + (divup(new_tasks, num_threads) * num_threads); + Index old_tasks = divup(nm0, oldgm) * divup(nn0, oldgn); + double old_parallelism = static_cast(old_tasks) / + (divup(old_tasks, num_threads) * num_threads); + if (new_parallelism > old_parallelism || new_parallelism == 1) return 1; + return 0; + } + +#else // EIGEN_USE_SIMPLE_THREAD_POOL + + template + void evalProduct(Scalar* buffer) const { + if (this->m_j_size == 1) { + this->template evalGemv(buffer); + return; + } + + evalGemm(buffer); + } + + template + void evalGemm(Scalar* buffer) const { + // columns in left side, rows in right side + const Index k = this->m_k_size; + + // rows in left side + const Index m = this->m_i_size; + + // columns in right side + const Index n = this->m_j_size; + + // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar) + this->m_device.memset(buffer, 0, m * n * sizeof(Scalar)); + + + const int lhs_packet_size = internal::unpacket_traits::size; + const int rhs_packet_size = internal::unpacket_traits::size; + + typedef internal::TensorContractionInputMapper LhsMapper; + + typedef internal::TensorContractionInputMapper RhsMapper; + + typedef internal::blas_data_mapper OutputMapper; + + // TODO: packing could be faster sometimes if we supported row major tensor mappers + typedef internal::gemm_pack_lhs LhsPacker; + typedef internal::gemm_pack_rhs RhsPacker; + + // TODO: replace false, false with conjugate values? + typedef internal::gebp_kernel GebpKernel; + + typedef internal::packLhsArg packLArg; + typedef internal::packRhsAndKernelArg packRKArg; + + // initialize data mappers + LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides, + this->m_left_contracting_strides, this->m_k_strides); + + RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides, + this->m_right_contracting_strides, this->m_k_strides); + + OutputMapper output(buffer, m); + + // compute block sizes (which depend on number of threads) + const Index num_threads = this->m_device.numThreads(); + internal::TensorContractionBlocking blocking(k, m, n, num_threads); + Index mc = blocking.mc(); + Index nc = blocking.nc(); + Index kc = blocking.kc(); + eigen_assert(mc <= m); + eigen_assert(nc <= n); + eigen_assert(kc <= k); + +#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b)) + const Index k_blocks = CEIL_DIV(k, kc); + const Index n_blocks = CEIL_DIV(n, nc); + const Index m_blocks = CEIL_DIV(m, mc); + const Index sizeA = mc * kc; + const Index sizeB = kc * nc; + + /* cout << "m: " << m << " n: " << n << " k: " << k << endl; + cout << "mc: " << mc << " nc: " << nc << " kc: " << kc << endl; + cout << "m_blocks: " << m_blocks << " n_blocks: " << n_blocks << " k_blocks: " << k_blocks << endl; + cout << "num threads: " << num_threads << endl; + */ + + // note: m_device.allocate should return 16 byte aligned pointers, but if blockA and blockB + // aren't 16 byte aligned segfaults will happen due to SIMD instructions + // note: You can get away with allocating just a single blockA and offsets and meet the + // the alignment requirements with the assumption that + // (Traits::mr * sizeof(ResScalar)) % 16 == 0 + const Index numBlockAs = numext::mini(num_threads, m_blocks); + MaxSizeVector blockAs(num_threads); + for (int i = 0; i < num_threads; i++) { + blockAs.push_back(static_cast(this->m_device.allocate(sizeA * sizeof(LhsScalar)))); + } + + // To circumvent alignment issues, I'm just going to separately allocate the memory for each thread + // TODO: is this too much memory to allocate? This simplifies coding a lot, but is wasteful. + // Other options: (1) reuse memory when a thread finishes. con: tricky + // (2) allocate block B memory in each thread. con: overhead + MaxSizeVector blockBs(n_blocks); + for (int i = 0; i < n_blocks; i++) { + blockBs.push_back(static_cast(this->m_device.allocate(sizeB * sizeof(RhsScalar)))); + } + + // lhs_notifications starts with all null Notifications + MaxSizeVector lhs_notifications(num_threads, nullptr); + + // this should really be numBlockAs * n_blocks; + const Index num_kernel_notifications = num_threads * n_blocks; + MaxSizeVector kernel_notifications(num_kernel_notifications, + nullptr); + + for (Index k_block_idx = 0; k_block_idx < k_blocks; k_block_idx++) { + const Index k_start = k_block_idx * kc; + // make sure we don't overshoot right edge of left matrix + const Index actual_kc = numext::mini(k_start + kc, k) - k_start; + + for (Index m_block_idx = 0; m_block_idx < m_blocks; m_block_idx += numBlockAs) { + const Index num_blocks = numext::mini(m_blocks-m_block_idx, numBlockAs); + + for (Index mt_block_idx = m_block_idx; mt_block_idx < m_block_idx+num_blocks; mt_block_idx++) { + const Index m_start = mt_block_idx * mc; + const Index actual_mc = numext::mini(m_start + mc, m) - m_start; + eigen_assert(actual_mc > 0); + + Index blockAId = (k_block_idx * m_blocks + mt_block_idx) % num_threads; + + for (int i = 0; i < n_blocks; ++i) { + Index notification_id = (blockAId * n_blocks + i); + // Wait for any current kernels using this slot to complete + // before using it. + if (kernel_notifications[notification_id]) { + wait_until_ready(kernel_notifications[notification_id]); + delete kernel_notifications[notification_id]; + } + kernel_notifications[notification_id] = new Notification(); + } + const packLArg arg = { + blockAs[blockAId], // blockA + lhs, // lhs + m_start, // m + k_start, // k + actual_mc, // mc + actual_kc, // kc + }; + + // Delete any existing notification since we may be + // replacing it. The algorithm should ensure that there are + // no existing waiters on this notification. + delete lhs_notifications[blockAId]; + lhs_notifications[blockAId] = + this->m_device.enqueue(&Self::packLhs, arg); + } + + // now start kernels. + const Index m_base_start = m_block_idx * mc; + const bool need_to_pack = m_block_idx == 0; + + for (Index n_block_idx = 0; n_block_idx < n_blocks; n_block_idx++) { + const Index n_start = n_block_idx * nc; + const Index actual_nc = numext::mini(n_start + nc, n) - n_start; + + // first make sure the previous kernels are all done before overwriting rhs. Also wait if + // we're going to start new k. In both cases need_to_pack is true. + if (need_to_pack) { + for (Index i = num_blocks; i < num_threads; ++i) { + Index blockAId = (k_block_idx * m_blocks + i + m_block_idx) % num_threads; + Index future_id = (blockAId * n_blocks + n_block_idx); + wait_until_ready(kernel_notifications[future_id]); + } + } + + packRKArg arg = { + &blockAs, // blockA + blockBs[n_block_idx], // blockB + rhs, // rhs + output, // output + m_base_start, // m + k_start, // k + n_start, // n + mc, // mc + actual_kc, // kc + actual_nc, // nc + num_threads, + numBlockAs, + m, + k_block_idx, + m_block_idx, + n_block_idx, // n_block_idx + m_blocks, // m_blocks + n_blocks, // n_blocks + &kernel_notifications, // kernel notifications + &lhs_notifications, // lhs notifications + need_to_pack, // need_to_pack + }; + + // We asynchronously kick off this function, which ends up + // notifying the appropriate kernel_notifications objects, + // which this thread waits on before exiting. + this->m_device.enqueueNoNotification(&Self::packRhsAndKernel, arg); + } + } + } + + // Make sure all the kernels are done. + for (size_t i = 0; i < kernel_notifications.size(); ++i) { + wait_until_ready(kernel_notifications[i]); + delete kernel_notifications[i]; + } + + // No need to wait for lhs notifications since they should have + // already been waited on. Just clean them up. + for (size_t i = 0; i < lhs_notifications.size(); ++i) { + delete lhs_notifications[i]; + } + + // deallocate all of the memory for both A and B's + for (size_t i = 0; i < blockAs.size(); i++) { + this->m_device.deallocate(blockAs[i]); + } + for (size_t i = 0; i < blockBs.size(); i++) { + this->m_device.deallocate(blockBs[i]); + } + +#undef CEIL_DIV + } + + /* + * Packs a LHS block of size (mt, kc) starting at lhs(m, k). Before packing + * the LHS block, check that all of the kernels that worked on the same + * mt_block_idx in the previous m_block are done. + */ + template + static void packLhs(const packLArg arg) { + // perform actual packing + LhsPacker pack_lhs; + pack_lhs(arg.blockA, arg.lhs.getSubMapper(arg.m_start, arg.k_start), arg.kc, arg.mc); + } + + /* + * Packs a RHS block of size (kc, nc) starting at (k, n) after checking that + * all kernels in the previous block are done. + * Then for each LHS future, we wait on the future and then call GEBP + * on the area packed by the future (which starts at + * blockA + future_idx * mt * kc) on the LHS and with the full packed + * RHS block. + * The output of this GEBP is written to output(m + i * mt, n). + */ + template + static void packRhsAndKernel(packRKArg arg) { + if (arg.need_to_pack) { + RhsPacker pack_rhs; + pack_rhs(arg.blockB, arg.rhs.getSubMapper(arg.k, arg.n), arg.kc, arg.nc); + } + + GebpKernel gebp; + for (Index mt_block_idx = 0; mt_block_idx < arg.num_blockAs; mt_block_idx++) { + const Index m_base_start = arg.m + arg.mc*mt_block_idx; + if (m_base_start < arg.max_m) { + Index blockAId = (arg.k_block_idx * arg.m_blocks + mt_block_idx + arg.m_block_idx) % arg.num_threads; + wait_until_ready((*arg.lhs_notifications)[blockAId]); + const Index actual_mc = numext::mini(m_base_start + arg.mc, arg.max_m) - m_base_start; + gebp(arg.output.getSubMapper(m_base_start, arg.n), + (*arg.blockAs)[blockAId], arg.blockB, + actual_mc, arg.kc, arg.nc, Scalar(1), -1, -1, 0, 0); + + // Notify that the kernel is done. + const Index set_idx = blockAId * arg.n_blocks + arg.n_block_idx; + (*arg.kernel_notifications)[set_idx]->Notify(); + } + } + } +#endif // EIGEN_USE_SIMPLE_THREAD_POOL + + TensorOpCost contractionCost(Index m, Index n, Index bm, Index bn, Index bk, + bool shard_by_col, bool prepacked) const { + const int packed_size = std::min(PacketType::size, + PacketType::size); + const int output_packet_size = internal::unpacket_traits::size; + const double kd = static_cast(bk); + // Peak VFMA bandwidth is 0.5. However if we have not enough data for + // vectorization bandwidth drops. The 4.0 and 2.0 bandwidth is determined + // experimentally. + double computeBandwidth = bk == 1 ? 4.0 : + (shard_by_col ? bn : bm) < Traits::nr || + (shard_by_col ? bm : bn) < Traits::mr ? 2.0 : 0.5; +#ifndef EIGEN_VECTORIZE_FMA + // Bandwidth of all of VFMA/MULPS/ADDPS is 0.5 on latest Intel processors. + // However for MULPS/ADDPS we have dependent sequence of 2 such instructions, + // so overall bandwidth is 1.0. + if (computeBandwidth == 0.5) computeBandwidth = 1.0; +#endif + // Computations. + TensorOpCost cost = TensorOpCost(0, 0, kd * computeBandwidth, true, packed_size); + // Output stores. + cost += TensorOpCost(0, sizeof(CoeffReturnType), 0, true, output_packet_size); + if (prepacked) { + // Packing and kernels are executed in different tasks. When we calculate + // task grain size we look only at kernel cost assuming that kernel + // is more expensive than packing. + return cost; + } + // Lhs/rhs loads + computations. + TensorOpCost lhsCost = this->m_leftImpl.costPerCoeff(true) * (kd / n); + TensorOpCost rhsCost = this->m_rightImpl.costPerCoeff(true) * (kd / m); + // Lhs packing memory cost does not contribute considerably to overall + // execution time because lhs is prefetched early and accessed sequentially. + if (shard_by_col) + lhsCost.dropMemoryCost(); + else + rhsCost.dropMemoryCost(); + return cost + lhsCost + rhsCost; + } +}; + +} // end namespace Eigen + +#endif // EIGEN_USE_THREADS +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h new file mode 100644 index 000000000..860a6949a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h @@ -0,0 +1,279 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H + +namespace Eigen { + +/** \class TensorConversionOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor conversion class. This class makes it possible to vectorize + * type casting operations when the number of scalars per packet in the source + * and the destination type differ + */ +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef TargetType Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + enum { Flags = 0 }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorConversionOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorConversionOp type; +}; + +} // end namespace internal + + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + return internal::pcast(m_impl.template packet(index)); + } + + private: + const TensorEvaluator& m_impl; +}; + + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + + SrcPacket src1 = m_impl.template packet(index); + SrcPacket src2 = m_impl.template packet(index + SrcPacketSize); + TgtPacket result = internal::pcast(src1, src2); + return result; + } + + private: + const TensorEvaluator& m_impl; +}; + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + + SrcPacket src1 = m_impl.template packet(index); + SrcPacket src2 = m_impl.template packet(index + SrcPacketSize); + SrcPacket src3 = m_impl.template packet(index + 2 * SrcPacketSize); + SrcPacket src4 = m_impl.template packet(index + 3 * SrcPacketSize); + TgtPacket result = internal::pcast(src1, src2, src3, src4); + return result; + } + + private: + const TensorEvaluator& m_impl; +}; + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl), m_maxIndex(impl.dimensions().TotalSize()) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + // Only call m_impl.packet() when we have direct access to the underlying data. This + // ensures that we don't compute the subexpression twice. We may however load some + // coefficients twice, but in practice this doesn't negatively impact performance. + if (m_impl.data() && (index + SrcPacketSize < m_maxIndex)) { + // Force unaligned memory loads since we can't ensure alignment anymore + return internal::pcast(m_impl.template packet(index)); + } else { + const int TgtPacketSize = internal::unpacket_traits::size; + typedef typename internal::unpacket_traits::type SrcType; + typedef typename internal::unpacket_traits::type TgtType; + internal::scalar_cast_op converter; + EIGEN_ALIGN_MAX typename internal::unpacket_traits::type values[TgtPacketSize]; + for (int i = 0; i < TgtPacketSize; ++i) { + values[i] = converter(m_impl.coeff(index+i)); + } + TgtPacket rslt = internal::pload(values); + return rslt; + } + } + + private: + const TensorEvaluator& m_impl; + const typename TensorEvaluator::Index m_maxIndex; +}; + +template +class TensorConversionOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::nested::type Nested; + typedef Scalar CoeffReturnType; + typedef typename NumTraits::Real RealScalar; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConversionOp(const XprType& xpr) + : m_xpr(xpr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; +}; + +template struct ConversionSubExprEval { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Eval& impl, Scalar*) { + impl.evalSubExprsIfNeeded(NULL); + return true; + } +}; + +template struct ConversionSubExprEval { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Eval& impl, Scalar* data) { + return impl.evalSubExprsIfNeeded(data); + } +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorConversionOp XprType; + typedef typename XprType::Index Index; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef TargetType Scalar; + typedef TargetType CoeffReturnType; + typedef typename internal::remove_all::Scalar>::type SrcType; + typedef typename PacketType::type PacketReturnType; + typedef typename PacketType::type PacketSourceType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = false, + PacketAccess = true, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) + { + return ConversionSubExprEval::value, TensorEvaluator, Scalar>::run(m_impl, data); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() + { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + internal::scalar_cast_op converter; + return converter(m_impl.coeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + const bool Vectorizable = TensorEvaluator::PacketAccess & + internal::type_casting_traits::VectorizedCast; + return PacketConv::run(m_impl, index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double cast_cost = TensorOpCost::CastCost(); + if (vectorized) { + const double SrcCoeffRatio = + internal::type_casting_traits::SrcCoeffRatio; + const double TgtCoeffRatio = + internal::type_casting_traits::TgtCoeffRatio; + return m_impl.costPerCoeff(vectorized) * (SrcCoeffRatio / PacketSize) + + TensorOpCost(0, 0, TgtCoeffRatio * (cast_cost / PacketSize)); + } else { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, cast_cost); + } + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + protected: + template + struct PacketConv { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType run(const TensorEvaluator& impl, Index index) { + internal::scalar_cast_op converter; + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + for (int i = 0; i < PacketSize; ++i) { + values[i] = converter(impl.coeff(index+i)); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + }; + + template + struct PacketConv { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType run(const TensorEvaluator& impl, Index index) { + const int SrcCoeffRatio = internal::type_casting_traits::SrcCoeffRatio; + const int TgtCoeffRatio = internal::type_casting_traits::TgtCoeffRatio; + PacketConverter, PacketSourceType, PacketReturnType, + SrcCoeffRatio, TgtCoeffRatio> converter(impl); + return converter.template packet(index); + } + }; + + TensorEvaluator m_impl; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h new file mode 100644 index 000000000..abdf742c6 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h @@ -0,0 +1,1104 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H + +namespace Eigen { + +/** \class TensorConvolution + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor convolution class. + * + * + */ +namespace internal { + +template +class IndexMapper { + public: + IndexMapper(const InputDims& input_dims, const array& kernel_dims, + const array& indices) { + + array dimensions = input_dims; + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = indices[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + dimensions[index] = result_dim; + } + + array inputStrides; + array outputStrides; + if (static_cast(Layout) == static_cast(ColMajor)) { + inputStrides[0] = 1; + outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + inputStrides[i] = inputStrides[i-1] * input_dims[i-1]; + outputStrides[i] = outputStrides[i-1] * dimensions[i-1]; + } + } else { + inputStrides[NumDims - 1] = 1; + outputStrides[NumDims - 1] = 1; + for (int i = static_cast(NumDims) - 2; i >= 0; --i) { + inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1]; + outputStrides[i] = outputStrides[i + 1] * dimensions[i + 1]; + } + } + + array cudaInputDimensions; + array cudaOutputDimensions; + array tmp = dimensions; + array ordering; + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = i + offset; + ordering[index] = indices[i]; + tmp[indices[i]] = -1; + cudaInputDimensions[index] = input_dims[indices[i]]; + cudaOutputDimensions[index] = dimensions[indices[i]]; + } + + int written = static_cast(Layout) == static_cast(ColMajor) + ? NumKernelDims + : 0; + for (int i = 0; i < NumDims; ++i) { + if (tmp[i] >= 0) { + ordering[written] = i; + cudaInputDimensions[written] = input_dims[i]; + cudaOutputDimensions[written] = dimensions[i]; + ++written; + } + } + + for (int i = 0; i < NumDims; ++i) { + m_inputStrides[i] = inputStrides[ordering[i]]; + m_outputStrides[i] = outputStrides[ordering[i]]; + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims; ++i) { + if (i > NumKernelDims) { + m_cudaInputStrides[i] = + m_cudaInputStrides[i - 1] * cudaInputDimensions[i - 1]; + m_cudaOutputStrides[i] = + m_cudaOutputStrides[i - 1] * cudaOutputDimensions[i - 1]; + } else { + m_cudaInputStrides[i] = 1; + m_cudaOutputStrides[i] = 1; + } + } + } else { + for (int i = NumDims - 1; i >= 0; --i) { + if (i + 1 < offset) { + m_cudaInputStrides[i] = + m_cudaInputStrides[i + 1] * cudaInputDimensions[i + 1]; + m_cudaOutputStrides[i] = + m_cudaOutputStrides[i + 1] * cudaOutputDimensions[i + 1]; + } else { + m_cudaInputStrides[i] = 1; + m_cudaOutputStrides[i] = 1; + } + } + } + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputPlaneToTensorInputOffset(Index p) const { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int d = NumDims - 1; d > NumKernelDims; --d) { + const Index idx = p / m_cudaInputStrides[d]; + inputIndex += idx * m_inputStrides[d]; + p -= idx * m_cudaInputStrides[d]; + } + inputIndex += p * m_inputStrides[NumKernelDims]; + } else { + std::ptrdiff_t limit = 0; + if (NumKernelDims < NumDims) { + limit = NumDims - NumKernelDims - 1; + } + for (int d = 0; d < limit; ++d) { + const Index idx = p / m_cudaInputStrides[d]; + inputIndex += idx * m_inputStrides[d]; + p -= idx * m_cudaInputStrides[d]; + } + inputIndex += p * m_inputStrides[limit]; + } + return inputIndex; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputPlaneToTensorOutputOffset(Index p) const { + Index outputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int d = NumDims - 1; d > NumKernelDims; --d) { + const Index idx = p / m_cudaOutputStrides[d]; + outputIndex += idx * m_outputStrides[d]; + p -= idx * m_cudaOutputStrides[d]; + } + outputIndex += p * m_outputStrides[NumKernelDims]; + } else { + std::ptrdiff_t limit = 0; + if (NumKernelDims < NumDims) { + limit = NumDims - NumKernelDims - 1; + } + for (int d = 0; d < limit; ++d) { + const Index idx = p / m_cudaOutputStrides[d]; + outputIndex += idx * m_outputStrides[d]; + p -= idx * m_cudaOutputStrides[d]; + } + outputIndex += p * m_outputStrides[limit]; + } + return outputIndex; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_inputStrides[offset]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_outputStrides[offset]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i, Index j) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i, Index j) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i, Index j, Index k) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1] + + k * m_inputStrides[offset + 2]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i, Index j, Index k) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1] + + k * m_outputStrides[offset + 2]; + } + + private: + static const int NumDims = internal::array_size::value; + array m_inputStrides; + array m_outputStrides; + array m_cudaInputStrides; + array m_cudaOutputStrides; +}; + + + +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename promote_storage_type::ret Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename InputXprType::Nested LhsNested; + typedef typename KernelXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorConvolutionOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorConvolutionOp type; +}; + +} // end namespace internal + + + +template +class TensorConvolutionOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConvolutionOp(const InputXprType& input, const KernelXprType& kernel, const Indices& dims) + : m_input_xpr(input), m_kernel_xpr(kernel), m_indices(dims) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Indices& indices() const { return m_indices; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& + inputExpression() const { return m_input_xpr; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& + kernelExpression() const { return m_kernel_xpr; } + + protected: + typename InputXprType::Nested m_input_xpr; + typename KernelXprType::Nested m_kernel_xpr; + const Indices m_indices; +}; + + +template +struct TensorEvaluator, Device> +{ + typedef TensorConvolutionOp XprType; + + static const int NumDims = internal::array_size::Dimensions>::value; + static const int NumKernelDims = internal::array_size::value; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_inputImpl(op.inputExpression(), device), m_kernelImpl(op.kernelExpression(), device), m_kernelArg(op.kernelExpression()), m_kernel(NULL), m_local_kernel(false), m_device(device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + + const typename TensorEvaluator::Dimensions& input_dims = m_inputImpl.dimensions(); + const typename TensorEvaluator::Dimensions& kernel_dims = m_kernelImpl.dimensions(); + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStride[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStride[i] = m_inputStride[i - 1] * input_dims[i - 1]; + } + } else { + m_inputStride[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_inputStride[i] = m_inputStride[i + 1] * input_dims[i + 1]; + } + } + + m_dimensions = m_inputImpl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + if (i > 0) { + m_kernelStride[i] = m_kernelStride[i - 1] * kernel_dims[i - 1]; + } else { + m_kernelStride[0] = 1; + } + m_indexStride[i] = m_inputStride[index]; + } + + m_outputStride[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStride[i] = m_outputStride[i - 1] * m_dimensions[i - 1]; + } + } else { + for (int i = NumKernelDims - 1; i >= 0; --i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + if (i < NumKernelDims - 1) { + m_kernelStride[i] = m_kernelStride[i + 1] * kernel_dims[i + 1]; + } else { + m_kernelStride[NumKernelDims - 1] = 1; + } + m_indexStride[i] = m_inputStride[index]; + } + + m_outputStride[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStride[i] = m_outputStride[i + 1] * m_dimensions[i + 1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { + m_inputImpl.evalSubExprsIfNeeded(NULL); + preloadKernel(); + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_inputImpl.cleanup(); + if (m_local_kernel) { + m_device.deallocate((void*)m_kernel); + m_local_kernel = false; + } + m_kernel = NULL; + } + + void evalTo(typename XprType::Scalar* buffer) { + evalSubExprsIfNeeded(NULL); + for (int i = 0; i < dimensions().TotalSize(); ++i) { + buffer[i] += coeff(i); + } + cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + CoeffReturnType result = CoeffReturnType(0); + convolve(firstInput(index), 0, NumKernelDims-1, result); + return result; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(const Index index) const + { + Index indices[2] = {index, index+PacketSize-1}; + Index startInputs[2] = {0, 0}; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / m_outputStride[i]; + const Index idx1 = indices[1] / m_outputStride[i]; + startInputs[0] += idx0 * m_inputStride[i]; + startInputs[1] += idx1 * m_inputStride[i]; + indices[0] -= idx0 * m_outputStride[i]; + indices[1] -= idx1 * m_outputStride[i]; + } + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / m_outputStride[i]; + const Index idx1 = indices[1] / m_outputStride[i]; + startInputs[0] += idx0 * m_inputStride[i]; + startInputs[1] += idx1 * m_inputStride[i]; + indices[0] -= idx0 * m_outputStride[i]; + indices[1] -= idx1 * m_outputStride[i]; + } + } + startInputs[0] += indices[0]; + startInputs[1] += indices[1]; + + if (startInputs[1]-startInputs[0] == PacketSize-1) { + PacketReturnType result = internal::pset1(0); + convolvePacket(startInputs[0], 0, NumKernelDims-1, result); + return result; + } else { + EIGEN_ALIGN_MAX Scalar data[PacketSize]; + data[0] = Scalar(0); + convolve(startInputs[0], 0, NumKernelDims-1, data[0]); + for (int i = 1; i < PacketSize-1; ++i) { + data[i] = Scalar(0); + convolve(firstInput(index+i), 0, NumKernelDims-1, data[i]); + } + data[PacketSize-1] = Scalar(0); + convolve(startInputs[1], 0, NumKernelDims-1, data[PacketSize-1]); + return internal::pload(data); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double kernel_size = m_kernelImpl.dimensions().TotalSize(); + // We ignore the use of fused multiply-add. + const double convolve_compute_cost = + TensorOpCost::AddCost() + TensorOpCost::MulCost(); + const double firstIndex_compute_cost = + NumDims * + (2 * TensorOpCost::AddCost() + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost()); + return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) + + kernel_size * (m_inputImpl.costPerCoeff(vectorized) + + m_kernelImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, convolve_compute_cost, vectorized, + PacketSize)); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + private: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const { + Index startInput = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStride[i]; + startInput += idx * m_inputStride[i]; + index -= idx * m_outputStride[i]; + } + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStride[i]; + startInput += idx * m_inputStride[i]; + index -= idx * m_outputStride[i]; + } + } + startInput += index; + return startInput; + } + + EIGEN_DEVICE_FUNC void convolve(Index firstIndex, Index firstKernel, int DimIndex, CoeffReturnType& accum) const { + for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) { + const Index input = firstIndex + j * m_indexStride[DimIndex]; + const Index kernel = firstKernel + j * m_kernelStride[DimIndex]; + if (DimIndex > 0) { + convolve(input, kernel, DimIndex-1, accum); + } else { + accum += m_inputImpl.coeff(input) * m_kernel[kernel]; + } + } + } + + template + EIGEN_DEVICE_FUNC void convolvePacket(Index firstIndex, Index firstKernel, int DimIndex, Packet& accum) const { + for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) { + const Index input = firstIndex + j * m_indexStride[DimIndex]; + const Index kernel = firstKernel + j * m_kernelStride[DimIndex]; + if (DimIndex > 0) { + convolvePacket(input, kernel, DimIndex-1, accum); + } else { + accum = internal::pmadd(m_inputImpl.template packet(input), internal::pset1(m_kernel[kernel]), accum); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void preloadKernel() { + // Don't make a local copy of the kernel unless we have to (i.e. it's an + // expression that needs to be evaluated) + const Scalar* in_place = m_kernelImpl.data(); + if (in_place) { + m_kernel = in_place; + m_local_kernel = false; + } else { + size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar); + Scalar* local = (Scalar*)m_device.allocate(kernel_sz); + typedef TensorEvalToOp EvalTo; + EvalTo evalToTmp(local, m_kernelArg); + const bool PacketAccess = internal::IsVectorizable::value; + internal::TensorExecutor::run(evalToTmp, m_device); + + m_kernel = local; + m_local_kernel = true; + } + } + + array m_inputStride; + array m_outputStride; + + array m_indexStride; + array m_kernelStride; + TensorEvaluator m_inputImpl; + TensorEvaluator m_kernelImpl; + Dimensions m_dimensions; + + KernelArgType m_kernelArg; + const Scalar* m_kernel; + bool m_local_kernel; + const Device& m_device; +}; + + + + +// Use an optimized implementation of the evaluation code for GPUs whenever possible. +#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) + +template +struct GetKernelSize { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int /*kernelSize*/) const { + return StaticKernelSize; + } +}; +template <> +struct GetKernelSize { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int kernelSize) const { + return kernelSize; + } +}; + +template +__global__ void EigenConvolutionKernel1D( + InputEvaluator eval, + const internal::IndexMapper + indexMapper, + const float* __restrict kernel, const int numPlanes, const int numX, + const int maxX, const int kernelSize, float* buffer) { + extern __shared__ float s[]; + + const int first_x = blockIdx.x * maxX; + const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1; + const int num_x_input = last_x - first_x + GetKernelSize()(kernelSize); + const int num_x_output = last_x - first_x + 1; + + const int first_plane = blockIdx.y * blockDim.y; + const int plane_stride = blockDim.y * gridDim.y; + + for (int p = first_plane + threadIdx.y; p < numPlanes; p += plane_stride) { + // Load inputs to shared memory + const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p); + const int plane_kernel_offset = threadIdx.y * num_x_input; + #pragma unroll + for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { + const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x); + s[i + plane_kernel_offset] = eval.coeff(tensor_index); + } + + __syncthreads(); + + // Compute the convolution + const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p); + + #pragma unroll + for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { + const int kernel_offset = plane_kernel_offset + i; + float result = 0.0f; + #pragma unroll + for (int k = 0; k < GetKernelSize()(kernelSize); ++k) { + result += s[k + kernel_offset] * kernel[k]; + } + const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x); + buffer[tensor_index] = result; + } + __syncthreads(); + } +}; + +template +__global__ void EigenConvolutionKernel2D( + InputEvaluator eval, + const internal::IndexMapper + indexMapper, + const float* __restrict kernel, const int numPlanes, const int numX, + const int maxX, const int numY, const int maxY, const int kernelSizeX, + const int kernelSizeY, float* buffer) { + extern __shared__ float s[]; + + const int first_x = blockIdx.x * maxX; + const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1; + const int num_x_input = last_x - first_x + GetKernelSize()(kernelSizeX); + const int num_x_output = last_x - first_x + 1; + + const int first_y = blockIdx.y * maxY; + const int last_y = (first_y + maxY < numY ? first_y + maxY : numY) - 1; + const int num_y_input = last_y - first_y + GetKernelSize()(kernelSizeY); + const int num_y_output = last_y - first_y + 1; + + const int first_plane = blockIdx.z * blockDim.z; + const int plane_stride = blockDim.z * gridDim.z; + + for (int p = first_plane + threadIdx.z; p < numPlanes; p += plane_stride) { + + const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p); + const int plane_kernel_offset = threadIdx.z * num_y_input; + + // Load inputs to shared memory + #pragma unroll + for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) { + const int input_offset = num_x_input * (j + plane_kernel_offset); + #pragma unroll + for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { + const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x, j+first_y); + s[i + input_offset] = eval.coeff(tensor_index); + } + } + + __syncthreads(); + + // Convolution + const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p); + + #pragma unroll + for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) { + #pragma unroll + for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { + float result = 0.0f; + #pragma unroll + for (int l = 0; l < GetKernelSize()(kernelSizeY); ++l) { + const int kernel_offset = kernelSizeX * l; + const int input_offset = i + num_x_input * (j + l + plane_kernel_offset); + #pragma unroll + for (int k = 0; k < GetKernelSize()(kernelSizeX); ++k) { + result += s[k + input_offset] * kernel[k + kernel_offset]; + } + } + const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x, j+first_y); + buffer[tensor_index] = result; + } + } + + __syncthreads(); + } +}; + +template +__global__ void EigenConvolutionKernel3D( + InputEvaluator eval, + const internal::IndexMapper + indexMapper, + const float* __restrict kernel, const size_t numPlanes, const size_t numX, + const size_t maxX, const size_t numY, const size_t maxY, const size_t numZ, + const size_t maxZ, const size_t kernelSizeX, const size_t kernelSizeY, + const size_t kernelSizeZ, float* buffer) { + extern __shared__ float s[]; + + // Load inputs to shared memory + const int first_x = blockIdx.x * maxX; + const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1; + const int num_x_input = last_x - first_x + kernelSizeX; + + const int first_y = blockIdx.y * maxY; + const int last_y = (first_y + maxY < numY ? first_y + maxY : numY) - 1; + const int num_y_input = last_y - first_y + kernelSizeY; + + const int first_z = blockIdx.z * maxZ; + const int last_z = (first_z + maxZ < numZ ? first_z + maxZ : numZ) - 1; + const int num_z_input = last_z - first_z + kernelSizeZ; + + for (int p = 0; p < numPlanes; ++p) { + + const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p); + const int plane_kernel_offset = 0; + + for (int k = threadIdx.z; k < num_z_input; k += blockDim.z) { + for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) { + for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { + const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x, j+first_y, k+first_z); + s[i + num_x_input * (j + num_y_input * (k + plane_kernel_offset))] = eval.coeff(tensor_index); + } + } + } + + __syncthreads(); + + // Convolution + const int num_z_output = last_z - first_z + 1; + const int num_y_output = last_y - first_y + 1; + const int num_x_output = last_x - first_x + 1; + const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p); + + for (int k = threadIdx.z; k < num_z_output; k += blockDim.z) { + for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) { + for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { + float result = 0.0f; + for (int n = 0; n < kernelSizeZ; ++n) { + for (int m = 0; m < kernelSizeY; ++m) { + for (int l = 0; l < kernelSizeX; ++l) { + result += s[i + l + num_x_input * (j + m + num_y_input * (k + n + plane_kernel_offset))] * kernel[l + kernelSizeX * (m + kernelSizeY * n)]; + } + } + } + const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x, j+first_y, k+first_z); + buffer[tensor_index] = result; + } + } + } + __syncthreads(); + } +}; + + + +template +struct TensorEvaluator, GpuDevice> +{ + typedef TensorConvolutionOp XprType; + + static const int NumDims = internal::array_size::Dimensions>::value; + static const int NumKernelDims = internal::array_size::value; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename TensorEvaluator::Dimensions KernelDimensions; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const GpuDevice& device) + : m_inputImpl(op.inputExpression(), device), m_kernelArg(op.kernelExpression()), m_kernelImpl(op.kernelExpression(), device), m_indices(op.indices()), m_buf(NULL), m_kernel(NULL), m_local_kernel(false), m_device(device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + + const typename TensorEvaluator::Dimensions& input_dims = m_inputImpl.dimensions(); + const typename TensorEvaluator::Dimensions& kernel_dims = m_kernelImpl.dimensions(); + + m_dimensions = m_inputImpl.dimensions(); + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + } + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename InputArgType::Scalar Scalar; + static const int PacketSize = internal::unpacket_traits::size; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) { + preloadKernel(); + m_inputImpl.evalSubExprsIfNeeded(NULL); + if (data) { + executeEval(data); + return false; + } else { + m_buf = (Scalar*)m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)); + executeEval(m_buf); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + m_inputImpl.cleanup(); + if (m_buf) { + m_device.deallocate(m_buf); + m_buf = NULL; + } + if (m_local_kernel) { + m_device.deallocate((void*)m_kernel); + m_local_kernel = false; + } + m_kernel = NULL; + } + + EIGEN_STRONG_INLINE void preloadKernel() { + // Don't make a local copy of the kernel unless we have to (i.e. it's an + // expression that needs to be evaluated) + const Scalar* in_place = m_kernelImpl.data(); + if (in_place) { + m_kernel = in_place; + m_local_kernel = false; + } else { + size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar); + Scalar* local = (Scalar*)m_device.allocate(kernel_sz); + typedef TensorEvalToOp EvalTo; + EvalTo evalToTmp(local, m_kernelArg); + const bool PacketAccess = internal::IsVectorizable::value; + internal::TensorExecutor::run(evalToTmp, m_device); + + m_kernel = local; + m_local_kernel = true; + } + } + + static unsigned int ceil(unsigned int num, unsigned int denom) { + const unsigned int rounded_toward_zero = num / denom; + if (num > rounded_toward_zero * denom) { + return rounded_toward_zero + 1; + } + return rounded_toward_zero; + } + + void executeEval(Scalar* data) const { + typedef typename TensorEvaluator::Dimensions InputDims; + + const int maxSharedMem = m_device.sharedMemPerBlock(); + const int maxThreadsPerBlock = m_device.maxCudaThreadsPerBlock(); + const int maxBlocksPerProcessor = m_device.maxCudaThreadsPerMultiProcessor() / maxThreadsPerBlock; + const int numMultiProcessors = m_device.getNumCudaMultiProcessors(); + const int warpSize = 32; + + switch (NumKernelDims) { + case 1: { + const int kernel_size = m_kernelImpl.dimensions().TotalSize(); + + const int numX = dimensions()[m_indices[0]]; + const int numP = dimensions().TotalSize() / numX; + int maxX; + dim3 block_size; + + const int single_stride_dim = + static_cast(Layout) == static_cast(ColMajor) + ? 0 + : m_inputImpl.dimensions().rank() - 1; + if (m_indices[0] == single_stride_dim) { + // Maximum the reuse + const int inner_dim = ((maxSharedMem / (sizeof(Scalar)) - kernel_size + 1 + 31) / 32) * 32; + maxX = numext::mini(inner_dim, numX); + const int maxP = numext::mini(maxSharedMem / ((kernel_size - 1 + maxX) * sizeof(Scalar)), numP); + block_size.x = numext::mini(maxThreadsPerBlock, maxX); + block_size.y = numext::mini(maxThreadsPerBlock / block_size.x, maxP); + } + else { + // Read as much as possible alongside the inner most dimension, that is the plane + const int inner_dim = maxSharedMem / ((warpSize + kernel_size) * sizeof(Scalar)); + const int maxP = numext::mini(inner_dim, numP); + maxX = numext::mini(maxSharedMem / (inner_dim * sizeof(Scalar)) - kernel_size + 1, numX); + + block_size.x = numext::mini(warpSize, maxX); + block_size.y = numext::mini(maxThreadsPerBlock/block_size.x, maxP); + } + + const int shared_mem = block_size.y * (maxX + kernel_size - 1) * sizeof(Scalar); + assert(shared_mem <= maxSharedMem); + + const int num_x_blocks = ceil(numX, maxX); + const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem); + const int num_y_blocks = ceil(numMultiProcessors * blocksPerProcessor, num_x_blocks); + + dim3 num_blocks(num_x_blocks, numext::mini(num_y_blocks, ceil(numP, block_size.y))); + + + //cout << "launching 1D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " maxX: " << maxX << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl; + + const array indices(m_indices[0]); + const array kernel_dims(m_kernelImpl.dimensions()[0]); + internal::IndexMapper indexMapper( + m_inputImpl.dimensions(), kernel_dims, indices); + switch(kernel_size) { + case 4: { + LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D, Index, InputDims, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 4, data); + break; + } + case 7: { + LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D, Index, InputDims, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 7, data); + break; + } + default: { + LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D, Index, InputDims, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, kernel_size, data); + } + } + break; + } + + case 2: { + const int idxX = + static_cast(Layout) == static_cast(ColMajor) ? 0 : 1; + const int idxY = + static_cast(Layout) == static_cast(ColMajor) ? 1 : 0; + const int kernel_size_x = m_kernelImpl.dimensions()[idxX]; + const int kernel_size_y = m_kernelImpl.dimensions()[idxY]; + + const int numX = dimensions()[m_indices[idxX]]; + const int numY = dimensions()[m_indices[idxY]]; + const int numP = dimensions().TotalSize() / (numX*numY); + + const float scaling_factor = sqrtf(static_cast(maxSharedMem) / (sizeof(Scalar) * kernel_size_y * kernel_size_x)); + + // Snap maxX to warp size + int inner_dim = ((static_cast(scaling_factor * kernel_size_x) - kernel_size_x + 1 + 32) / 32) * 32; + const int maxX = numext::mini(inner_dim, numX); + const int maxY = numext::mini(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1)) - kernel_size_y + 1, numY); + const int maxP = numext::mini(maxSharedMem / ((kernel_size_x - 1 + maxX) * (kernel_size_y - 1 + maxY) * sizeof(Scalar)), numP); + + dim3 block_size; + block_size.x = numext::mini(1024, maxX); + block_size.y = numext::mini(1024/block_size.x, maxY); + block_size.z = numext::mini(1024/(block_size.x*block_size.y), maxP); + + const int shared_mem = block_size.z * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * sizeof(Scalar); + assert(shared_mem <= maxSharedMem); + + const int num_x_blocks = ceil(numX, maxX); + const int num_y_blocks = ceil(numY, maxY); + const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem); + const int num_z_blocks = ceil(numMultiProcessors * blocksPerProcessor, num_x_blocks * num_y_blocks); + + dim3 num_blocks(num_x_blocks, num_y_blocks, numext::mini(num_z_blocks, ceil(numP, block_size.z))); + + + //cout << "launching 2D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " maxX: " << maxX << " maxY: " << maxY << " maxP: " << maxP << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl; + + const array indices(m_indices[idxX], m_indices[idxY]); + const array kernel_dims(m_kernelImpl.dimensions()[idxX], + m_kernelImpl.dimensions()[idxY]); + internal::IndexMapper indexMapper( + m_inputImpl.dimensions(), kernel_dims, indices); + switch (kernel_size_x) { + case 4: { + switch (kernel_size_y) { + case 7: { + LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 4, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, 7, data); + break; + } + default: { + LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 4, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, kernel_size_y, data); + break; + } + } + break; + } + case 7: { + switch (kernel_size_y) { + case 4: { + LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 7, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, 4, data); + break; + } + default: { + LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 7, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, kernel_size_y, data); + break; + } + } + break; + } + default: { + LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D, Index, InputDims, Dynamic, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, kernel_size_x, kernel_size_y, data); + break; + } + } + break; + } + + case 3: { + const int idxX = + static_cast(Layout) == static_cast(ColMajor) ? 0 : 2; + const int idxY = + static_cast(Layout) == static_cast(ColMajor) ? 1 : 1; + const int idxZ = + static_cast(Layout) == static_cast(ColMajor) ? 2 : 0; + + const int kernel_size_x = m_kernelImpl.dimensions()[idxX]; + const int kernel_size_y = m_kernelImpl.dimensions()[idxY]; + const int kernel_size_z = m_kernelImpl.dimensions()[idxZ]; + + const int numX = dimensions()[m_indices[idxX]]; + const int numY = dimensions()[m_indices[idxY]]; + const int numZ = dimensions()[m_indices[idxZ]]; + const int numP = dimensions().TotalSize() / (numX*numY*numZ); + + const int maxX = numext::mini(128, numext::mini(maxSharedMem / (sizeof(Scalar) * kernel_size_y * kernel_size_z) - kernel_size_x + 1, numX)); + const int maxY = numext::mini(128, numext::mini(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1) * kernel_size_z) - kernel_size_y + 1, numY)); + const int maxZ = numext::mini(128, numext::mini(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1)) - kernel_size_z + 1, numZ)); + + dim3 block_size; + block_size.x = numext::mini(32, maxX); + block_size.y = numext::mini(32, maxY); + block_size.z = numext::mini(1024/(block_size.x*block_size.y), maxZ); + dim3 num_blocks(ceil(numX, maxX), ceil(numY, maxY), ceil(numZ, maxZ)); + + const int shared_mem = (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * (maxZ + kernel_size_z - 1) * sizeof(Scalar); + assert(shared_mem <= maxSharedMem); + + //cout << "launching 3D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl; + const array indices(m_indices[idxX], m_indices[idxY], + m_indices[idxZ]); + const array kernel_dims(m_kernelImpl.dimensions()[idxX], + m_kernelImpl.dimensions()[idxY], + m_kernelImpl.dimensions()[idxZ]); + internal::IndexMapper indexMapper( + m_inputImpl.dimensions(), kernel_dims, indices); + + LAUNCH_CUDA_KERNEL((EigenConvolutionKernel3D, Index, InputDims>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, numZ, maxZ, kernel_size_x, kernel_size_y, kernel_size_z, data); + break; + } + + default: { + EIGEN_STATIC_ASSERT((NumKernelDims >= 1 && NumKernelDims <= 3), THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + eigen_assert(m_buf); + eigen_assert(index < m_dimensions.TotalSize()); + return m_buf[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(const Index index) const + { + eigen_assert(m_buf); + eigen_assert(index < m_dimensions.TotalSize()); + return internal::ploadt(m_buf+index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): FIXME: For now, this is just a copy of the CPU cost + // model. + const double kernel_size = m_kernelImpl.dimensions().TotalSize(); + // We ignore the use of fused multiply-add. + const double convolve_compute_cost = + TensorOpCost::AddCost() + TensorOpCost::MulCost(); + const double firstIndex_compute_cost = + NumDims * + (2 * TensorOpCost::AddCost() + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost()); + return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) + + kernel_size * (m_inputImpl.costPerCoeff(vectorized) + + m_kernelImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, convolve_compute_cost, vectorized, + PacketSize)); + } + + private: + // No assignment (copies are needed by the kernels) + TensorEvaluator& operator = (const TensorEvaluator&); + + TensorEvaluator m_inputImpl; + TensorEvaluator m_kernelImpl; + KernelArgType m_kernelArg; + Indices m_indices; + Dimensions m_dimensions; + Scalar* m_buf; + const Scalar* m_kernel; + bool m_local_kernel; + + const GpuDevice& m_device; +}; +#endif + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h new file mode 100644 index 000000000..83c449cf1 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h @@ -0,0 +1,212 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Rasmus Munk Larsen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H +#define EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H + +namespace Eigen { + +/** \class TensorEvaluator + * \ingroup CXX11_Tensor_Module + * + * \brief A cost model used to limit the number of threads used for evaluating + * tensor expression. + * + */ + +// Class storing the cost of evaluating a tensor expression in terms of the +// estimated number of operand bytes loads, bytes stored, and compute cycles. +class TensorOpCost { + public: + // TODO(rmlarsen): Fix the scalar op costs in Eigen proper. Even a simple + // model based on minimal reciprocal throughput numbers from Intel or + // Agner Fog's tables would be better than what is there now. + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int MulCost() { + return internal::functor_traits< + internal::scalar_product_op >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int AddCost() { + return internal::functor_traits >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int DivCost() { + return internal::functor_traits< + internal::scalar_quotient_op >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int ModCost() { + return internal::functor_traits >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int CastCost() { + return internal::functor_traits< + internal::scalar_cast_op >::Cost; + } + + EIGEN_DEVICE_FUNC + TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {} + EIGEN_DEVICE_FUNC + TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles) + : bytes_loaded_(bytes_loaded), + bytes_stored_(bytes_stored), + compute_cycles_(compute_cycles) {} + + EIGEN_DEVICE_FUNC + TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles, + bool vectorized, double packet_size) + : bytes_loaded_(bytes_loaded), + bytes_stored_(bytes_stored), + compute_cycles_(vectorized ? compute_cycles / packet_size + : compute_cycles) { + eigen_assert(bytes_loaded >= 0 && (numext::isfinite)(bytes_loaded)); + eigen_assert(bytes_stored >= 0 && (numext::isfinite)(bytes_stored)); + eigen_assert(compute_cycles >= 0 && (numext::isfinite)(compute_cycles)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_loaded() const { + return bytes_loaded_; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_stored() const { + return bytes_stored_; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double compute_cycles() const { + return compute_cycles_; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double total_cost( + double load_cost, double store_cost, double compute_cost) const { + return load_cost * bytes_loaded_ + store_cost * bytes_stored_ + + compute_cost * compute_cycles_; + } + + // Drop memory access component. Intended for cases when memory accesses are + // sequential or are completely masked by computations. + EIGEN_DEVICE_FUNC void dropMemoryCost() { + bytes_loaded_ = 0; + bytes_stored_ = 0; + } + + // TODO(rmlarsen): Define min in terms of total cost, not elementwise. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMin( + const TensorOpCost& rhs) const { + double bytes_loaded = numext::mini(bytes_loaded_, rhs.bytes_loaded()); + double bytes_stored = numext::mini(bytes_stored_, rhs.bytes_stored()); + double compute_cycles = numext::mini(compute_cycles_, rhs.compute_cycles()); + return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles); + } + + // TODO(rmlarsen): Define max in terms of total cost, not elementwise. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMax( + const TensorOpCost& rhs) const { + double bytes_loaded = numext::maxi(bytes_loaded_, rhs.bytes_loaded()); + double bytes_stored = numext::maxi(bytes_stored_, rhs.bytes_stored()); + double compute_cycles = numext::maxi(compute_cycles_, rhs.compute_cycles()); + return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator+=( + const TensorOpCost& rhs) { + bytes_loaded_ += rhs.bytes_loaded(); + bytes_stored_ += rhs.bytes_stored(); + compute_cycles_ += rhs.compute_cycles(); + return *this; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator*=(double rhs) { + bytes_loaded_ *= rhs; + bytes_stored_ *= rhs; + compute_cycles_ *= rhs; + return *this; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator+( + TensorOpCost lhs, const TensorOpCost& rhs) { + lhs += rhs; + return lhs; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*( + TensorOpCost lhs, double rhs) { + lhs *= rhs; + return lhs; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*( + double lhs, TensorOpCost rhs) { + rhs *= lhs; + return rhs; + } + + friend std::ostream& operator<<(std::ostream& os, const TensorOpCost& tc) { + return os << "[bytes_loaded = " << tc.bytes_loaded() + << ", bytes_stored = " << tc.bytes_stored() + << ", compute_cycles = " << tc.compute_cycles() << "]"; + } + + private: + double bytes_loaded_; + double bytes_stored_; + double compute_cycles_; +}; + +// TODO(rmlarsen): Implement a policy that chooses an "optimal" number of theads +// in [1:max_threads] instead of just switching multi-threading off for small +// work units. +template +class TensorCostModel { + public: + // Scaling from Eigen compute cost to device cycles. + static const int kDeviceCyclesPerComputeCycle = 1; + + // Costs in device cycles. + static const int kStartupCycles = 100000; + static const int kPerThreadCycles = 100000; + static const int kTaskSize = 40000; + + // Returns the number of threads in [1:max_threads] to use for + // evaluating an expression with the given output size and cost per + // coefficient. + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int numThreads( + double output_size, const TensorOpCost& cost_per_coeff, int max_threads) { + double cost = totalCost(output_size, cost_per_coeff); + int threads = (cost - kStartupCycles) / kPerThreadCycles + 0.9; + return numext::mini(max_threads, numext::maxi(1, threads)); + } + + // taskSize assesses parallel task size. + // Value of 1.0 means ideal parallel task size. Values < 1.0 mean that task + // granularity needs to be increased to mitigate parallelization overheads. + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double taskSize( + double output_size, const TensorOpCost& cost_per_coeff) { + return totalCost(output_size, cost_per_coeff) / kTaskSize; + } + + private: + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double totalCost( + double output_size, const TensorOpCost& cost_per_coeff) { + // Cost of memory fetches from L2 cache. 64 is typical cache line size. + // 11 is L2 cache latency on Haswell. + // We don't know whether data is in L1, L2 or L3. But we are most interested + // in single-threaded computational time around 100us-10ms (smaller time + // is too small for parallelization, larger time is not intersting + // either because we are probably using all available threads already). + // And for the target time range, L2 seems to be what matters. Data set + // fitting into L1 is too small to take noticeable time. Data set fitting + // only into L3 presumably will take more than 10ms to load and process. + const double kLoadCycles = 1.0 / 64 * 11; + const double kStoreCycles = 1.0 / 64 * 11; + // Scaling from Eigen compute cost to device cycles. + return output_size * + cost_per_coeff.total_cost(kLoadCycles, kStoreCycles, + kDeviceCyclesPerComputeCycle); + } +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h new file mode 100644 index 000000000..e020d076f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h @@ -0,0 +1,313 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H +#define EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H + +namespace Eigen { + +/** \class TensorCustomUnaryOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor custom class. + * + * + */ +namespace internal { +template +struct traits > +{ + typedef typename XprType::Scalar Scalar; + typedef typename XprType::StorageKind StorageKind; + typedef typename XprType::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCustomUnaryOp& type; +}; + +template +struct nested > +{ + typedef TensorCustomUnaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCustomUnaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCustomUnaryOp(const XprType& expr, const CustomUnaryFunc& func) + : m_expr(expr), m_func(func) {} + + EIGEN_DEVICE_FUNC + const CustomUnaryFunc& func() const { return m_func; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_expr; } + + protected: + typename XprType::Nested m_expr; + const CustomUnaryFunc m_func; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorCustomUnaryOp ArgType; + typedef typename internal::traits::Index Index; + static const int NumDims = internal::traits::NumDimensions; + typedef DSizes Dimensions; + typedef typename internal::remove_const::type Scalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = false, + PacketAccess = (internal::packet_traits::size > 1), + BlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const ArgType& op, const Device& device) + : m_op(op), m_device(device), m_result(NULL) + { + m_dimensions = op.func().dimensions(op.expression()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { + if (data) { + evalTo(data); + return false; + } else { + m_result = static_cast( + m_device.allocate(dimensions().TotalSize() * sizeof(Scalar))); + evalTo(m_result); + return true; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + if (m_result != NULL) { + m_device.deallocate(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_result[index]; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const { + return internal::ploadt(m_result + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): Extend CustomOp API to return its cost estimate. + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_result; } + + protected: + EIGEN_DEVICE_FUNC void evalTo(Scalar* data) { + TensorMap > result( + data, m_dimensions); + m_op.func().eval(m_op.expression(), result, m_device); + } + + Dimensions m_dimensions; + const ArgType m_op; + const Device& m_device; + CoeffReturnType* m_result; +}; + + + +/** \class TensorCustomBinaryOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor custom class. + * + * + */ +namespace internal { +template +struct traits > +{ + typedef typename internal::promote_storage_type::ret Scalar; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCustomBinaryOp& type; +}; + +template +struct nested > +{ + typedef TensorCustomBinaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCustomBinaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::traits::CoeffReturnType CoeffReturnType; + typedef typename internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCustomBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const CustomBinaryFunc& func) + + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_func(func) {} + + EIGEN_DEVICE_FUNC + const CustomBinaryFunc& func() const { return m_func; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const CustomBinaryFunc m_func; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorCustomBinaryOp XprType; + typedef typename internal::traits::Index Index; + static const int NumDims = internal::traits::NumDimensions; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = false, + PacketAccess = (internal::packet_traits::size > 1), + BlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_op(op), m_device(device), m_result(NULL) + { + m_dimensions = op.func().dimensions(op.lhsExpression(), op.rhsExpression()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { + if (data) { + evalTo(data); + return false; + } else { + m_result = static_cast(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar))); + evalTo(m_result); + return true; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + if (m_result != NULL) { + m_device.deallocate(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_result[index]; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const { + return internal::ploadt(m_result + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): Extend CustomOp API to return its cost estimate. + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_result; } + + protected: + EIGEN_DEVICE_FUNC void evalTo(Scalar* data) { + TensorMap > result(data, m_dimensions); + m_op.func().eval(m_op.lhsExpression(), m_op.rhsExpression(), result, m_device); + } + + Dimensions m_dimensions; + const XprType m_op; + const Device& m_device; + CoeffReturnType* m_result; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h new file mode 100644 index 000000000..29e50a3b2 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h @@ -0,0 +1,68 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H + +namespace Eigen { + +/** \class TensorDevice + * \ingroup CXX11_Tensor_Module + * + * \brief Pseudo expression providing an operator = that will evaluate its argument + * on the specified computing 'device' (GPU, thread pool, ...) + * + * Example: + * C.device(EIGEN_GPU) = A + B; + * + * Todo: operator *= and /=. + */ + +template class TensorDevice { + public: + TensorDevice(const DeviceType& device, ExpressionType& expression) : m_device(device), m_expression(expression) {} + + template + EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) { + typedef TensorAssignOp Assign; + Assign assign(m_expression, other); + internal::TensorExecutor::run(assign, m_device); + return *this; + } + + template + EIGEN_STRONG_INLINE TensorDevice& operator+=(const OtherDerived& other) { + typedef typename OtherDerived::Scalar Scalar; + typedef TensorCwiseBinaryOp, const ExpressionType, const OtherDerived> Sum; + Sum sum(m_expression, other); + typedef TensorAssignOp Assign; + Assign assign(m_expression, sum); + internal::TensorExecutor::run(assign, m_device); + return *this; + } + + template + EIGEN_STRONG_INLINE TensorDevice& operator-=(const OtherDerived& other) { + typedef typename OtherDerived::Scalar Scalar; + typedef TensorCwiseBinaryOp, const ExpressionType, const OtherDerived> Difference; + Difference difference(m_expression, other); + typedef TensorAssignOp Assign; + Assign assign(m_expression, difference); + internal::TensorExecutor::run(assign, m_device); + return *this; + } + + protected: + const DeviceType& m_device; + ExpressionType& m_expression; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h new file mode 100644 index 000000000..4f5767bc7 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h @@ -0,0 +1,337 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H) +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H + +namespace Eigen { + +static const int kCudaScratchSize = 1024; + +// This defines an interface that GPUDevice can take to use +// CUDA streams underneath. +class StreamInterface { + public: + virtual ~StreamInterface() {} + + virtual const cudaStream_t& stream() const = 0; + virtual const cudaDeviceProp& deviceProperties() const = 0; + + // Allocate memory on the actual device where the computation will run + virtual void* allocate(size_t num_bytes) const = 0; + virtual void deallocate(void* buffer) const = 0; + + // Return a scratchpad buffer of size 1k + virtual void* scratchpad() const = 0; + + // Return a semaphore. The semaphore is initially initialized to 0, and + // each kernel using it is responsible for resetting to 0 upon completion + // to maintain the invariant that the semaphore is always equal to 0 upon + // each kernel start. + virtual unsigned int* semaphore() const = 0; +}; + +static cudaDeviceProp* m_deviceProperties; +static bool m_devicePropInitialized = false; + +static void initializeDeviceProp() { + if (!m_devicePropInitialized) { + // Attempts to ensure proper behavior in the case of multiple threads + // calling this function simultaneously. This would be trivial to + // implement if we could use std::mutex, but unfortunately mutex don't + // compile with nvcc, so we resort to atomics and thread fences instead. + // Note that if the caller uses a compiler that doesn't support c++11 we + // can't ensure that the initialization is thread safe. +#if __cplusplus >= 201103L + static std::atomic first(true); + if (first.exchange(false)) { +#else + static bool first = true; + if (first) { + first = false; +#endif + // We're the first thread to reach this point. + int num_devices; + cudaError_t status = cudaGetDeviceCount(&num_devices); + if (status != cudaSuccess) { + std::cerr << "Failed to get the number of CUDA devices: " + << cudaGetErrorString(status) + << std::endl; + assert(status == cudaSuccess); + } + m_deviceProperties = new cudaDeviceProp[num_devices]; + for (int i = 0; i < num_devices; ++i) { + status = cudaGetDeviceProperties(&m_deviceProperties[i], i); + if (status != cudaSuccess) { + std::cerr << "Failed to initialize CUDA device #" + << i + << ": " + << cudaGetErrorString(status) + << std::endl; + assert(status == cudaSuccess); + } + } + +#if __cplusplus >= 201103L + std::atomic_thread_fence(std::memory_order_release); +#endif + m_devicePropInitialized = true; + } else { + // Wait for the other thread to inititialize the properties. + while (!m_devicePropInitialized) { +#if __cplusplus >= 201103L + std::atomic_thread_fence(std::memory_order_acquire); +#endif + sleep(1); + } + } + } +} + +static const cudaStream_t default_stream = cudaStreamDefault; + +class CudaStreamDevice : public StreamInterface { + public: + // Use the default stream on the current device + CudaStreamDevice() : stream_(&default_stream), scratch_(NULL), semaphore_(NULL) { + cudaGetDevice(&device_); + initializeDeviceProp(); + } + // Use the default stream on the specified device + CudaStreamDevice(int device) : stream_(&default_stream), device_(device), scratch_(NULL), semaphore_(NULL) { + initializeDeviceProp(); + } + // Use the specified stream. Note that it's the + // caller responsibility to ensure that the stream can run on + // the specified device. If no device is specified the code + // assumes that the stream is associated to the current gpu device. + CudaStreamDevice(const cudaStream_t* stream, int device = -1) + : stream_(stream), device_(device), scratch_(NULL), semaphore_(NULL) { + if (device < 0) { + cudaGetDevice(&device_); + } else { + int num_devices; + cudaError_t err = cudaGetDeviceCount(&num_devices); + EIGEN_UNUSED_VARIABLE(err) + assert(err == cudaSuccess); + assert(device < num_devices); + device_ = device; + } + initializeDeviceProp(); + } + + virtual ~CudaStreamDevice() { + if (scratch_) { + deallocate(scratch_); + } + } + + const cudaStream_t& stream() const { return *stream_; } + const cudaDeviceProp& deviceProperties() const { + return m_deviceProperties[device_]; + } + virtual void* allocate(size_t num_bytes) const { + cudaError_t err = cudaSetDevice(device_); + EIGEN_UNUSED_VARIABLE(err) + assert(err == cudaSuccess); + void* result; + err = cudaMalloc(&result, num_bytes); + assert(err == cudaSuccess); + assert(result != NULL); + return result; + } + virtual void deallocate(void* buffer) const { + cudaError_t err = cudaSetDevice(device_); + EIGEN_UNUSED_VARIABLE(err) + assert(err == cudaSuccess); + assert(buffer != NULL); + err = cudaFree(buffer); + assert(err == cudaSuccess); + } + + virtual void* scratchpad() const { + if (scratch_ == NULL) { + scratch_ = allocate(kCudaScratchSize + sizeof(unsigned int)); + } + return scratch_; + } + + virtual unsigned int* semaphore() const { + if (semaphore_ == NULL) { + char* scratch = static_cast(scratchpad()) + kCudaScratchSize; + semaphore_ = reinterpret_cast(scratch); + cudaError_t err = cudaMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_); + EIGEN_UNUSED_VARIABLE(err) + assert(err == cudaSuccess); + } + return semaphore_; + } + + private: + const cudaStream_t* stream_; + int device_; + mutable void* scratch_; + mutable unsigned int* semaphore_; +}; + +struct GpuDevice { + // The StreamInterface is not owned: the caller is + // responsible for its initialization and eventual destruction. + explicit GpuDevice(const StreamInterface* stream) : stream_(stream), max_blocks_(INT_MAX) { + eigen_assert(stream); + } + explicit GpuDevice(const StreamInterface* stream, int num_blocks) : stream_(stream), max_blocks_(num_blocks) { + eigen_assert(stream); + } + // TODO(bsteiner): This is an internal API, we should not expose it. + EIGEN_STRONG_INLINE const cudaStream_t& stream() const { + return stream_->stream(); + } + + EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { + return stream_->allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate(void* buffer) const { + stream_->deallocate(buffer); + } + + EIGEN_STRONG_INLINE void* scratchpad() const { + return stream_->scratchpad(); + } + + EIGEN_STRONG_INLINE unsigned int* semaphore() const { + return stream_->semaphore(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { +#ifndef __CUDA_ARCH__ + cudaError_t err = cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToDevice, + stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + assert(err == cudaSuccess); +#else + eigen_assert(false && "The default device should be used instead to generate kernel code"); +#endif + } + + EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { + cudaError_t err = + cudaMemcpyAsync(dst, src, n, cudaMemcpyHostToDevice, stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + assert(err == cudaSuccess); + } + + EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { + cudaError_t err = + cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToHost, stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + assert(err == cudaSuccess); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { +#ifndef __CUDA_ARCH__ + cudaError_t err = cudaMemsetAsync(buffer, c, n, stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + assert(err == cudaSuccess); +#else + eigen_assert(false && "The default device should be used instead to generate kernel code"); +#endif + } + + EIGEN_STRONG_INLINE size_t numThreads() const { + // FIXME + return 32; + } + + EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { + // FIXME + return 48*1024; + } + + EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { + // We won't try to take advantage of the l2 cache for the time being, and + // there is no l3 cache on cuda devices. + return firstLevelCacheSize(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const { +#if defined(__CUDACC__) && !defined(__CUDA_ARCH__) + cudaError_t err = cudaStreamSynchronize(stream_->stream()); + if (err != cudaSuccess) { + std::cerr << "Error detected in CUDA stream: " + << cudaGetErrorString(err) + << std::endl; + assert(err == cudaSuccess); + } +#else + assert(false && "The default device should be used instead to generate kernel code"); +#endif + } + + EIGEN_STRONG_INLINE int getNumCudaMultiProcessors() const { + return stream_->deviceProperties().multiProcessorCount; + } + EIGEN_STRONG_INLINE int maxCudaThreadsPerBlock() const { + return stream_->deviceProperties().maxThreadsPerBlock; + } + EIGEN_STRONG_INLINE int maxCudaThreadsPerMultiProcessor() const { + return stream_->deviceProperties().maxThreadsPerMultiProcessor; + } + EIGEN_STRONG_INLINE int sharedMemPerBlock() const { + return stream_->deviceProperties().sharedMemPerBlock; + } + EIGEN_STRONG_INLINE int majorDeviceVersion() const { + return stream_->deviceProperties().major; + } + EIGEN_STRONG_INLINE int minorDeviceVersion() const { + return stream_->deviceProperties().minor; + } + + EIGEN_STRONG_INLINE int maxBlocks() const { + return max_blocks_; + } + + // This function checks if the CUDA runtime recorded an error for the + // underlying stream device. + inline bool ok() const { +#ifdef __CUDACC__ + cudaError_t error = cudaStreamQuery(stream_->stream()); + return (error == cudaSuccess) || (error == cudaErrorNotReady); +#else + return false; +#endif + } + + private: + const StreamInterface* stream_; + int max_blocks_; +}; + +#define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \ + (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \ + assert(cudaGetLastError() == cudaSuccess); + + +// FIXME: Should be device and kernel specific. +#ifdef __CUDACC__ +static EIGEN_DEVICE_FUNC inline void setCudaSharedMemConfig(cudaSharedMemConfig config) { +#ifndef __CUDA_ARCH__ + cudaError_t status = cudaDeviceSetSharedMemConfig(config); + EIGEN_UNUSED_VARIABLE(status) + assert(status == cudaSuccess); +#else + EIGEN_UNUSED_VARIABLE(config) +#endif +} +#endif + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h new file mode 100644 index 000000000..9d141395b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h @@ -0,0 +1,81 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H + + +namespace Eigen { + +// Default device for the machine (typically a single cpu core) +struct DefaultDevice { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { + return internal::aligned_malloc(num_bytes); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void* buffer) const { + internal::aligned_free(buffer); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { + ::memcpy(dst, src, n); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { + ::memset(buffer, c, n); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t numThreads() const { +#ifndef __CUDA_ARCH__ + // Running on the host CPU + return 1; +#else + // Running on a CUDA device + return 32; +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { +#ifndef __CUDA_ARCH__ + // Running on the host CPU + return l1CacheSize(); +#else + // Running on a CUDA device, return the amount of shared memory available. + return 48*1024; +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { +#ifndef __CUDA_ARCH__ + // Running single threaded on the host CPU + return l3CacheSize(); +#else + // Running on a CUDA device + return firstLevelCacheSize(); +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const { +#ifndef __CUDA_ARCH__ + // Running single threaded on the host CPU + // Should return an enum that encodes the ISA supported by the CPU + return 1; +#else + // Running on a CUDA device + return __CUDA_ARCH__ / 100; +#endif + } +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h new file mode 100644 index 000000000..7c039890e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h @@ -0,0 +1,122 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Copyright (C) 2016 Benoit Steiner + +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_SYCL) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H) +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H + +namespace Eigen { +struct SyclDevice { + /// class members + /// sycl queue + mutable cl::sycl::queue m_queue; + /// std::map is the container used to make sure that we create only one buffer + /// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice. + /// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it. + mutable std::map> buffer_map; + /// creating device by using selector + template SyclDevice(dev_Selector s) + : +#ifdef EIGEN_EXCEPTIONS + m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) { + for (const auto& e : l) { + try { + std::rethrow_exception(e); + } catch (cl::sycl::exception e) { + std::cout << e.what() << std::endl; + } + } + })) +#else + m_queue(cl::sycl::queue(s)) +#endif + {} + // destructor + ~SyclDevice() { deallocate_all(); } + + template void deallocate(T *p) const { + auto it = buffer_map.find(p); + if (it != buffer_map.end()) { + buffer_map.erase(it); + internal::aligned_free(p); + } + } + void deallocate_all() const { + std::map>::iterator it=buffer_map.begin(); + while (it!=buffer_map.end()) { + auto p=it->first; + buffer_map.erase(it); + internal::aligned_free(const_cast(p)); + it=buffer_map.begin(); + } + buffer_map.clear(); + } + + /// creation of sycl accessor for a buffer. This function first tries to find + /// the buffer in the buffer_map. If found it gets the accessor from it, if not, + ///the function then adds an entry by creating a sycl buffer for that particular pointer. + template inline cl::sycl::accessor + get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const { + return (get_sycl_buffer(num_bytes, ptr)->template get_access(cgh)); + } + + template inline std::pair>::iterator,bool> add_sycl_buffer(const T *ptr, size_t num_bytes) const { + using Type = cl::sycl::buffer; + std::pair>::iterator,bool> ret = buffer_map.insert(std::pair>(ptr, std::shared_ptr(new Type(cl::sycl::range<1>(num_bytes)), + [](void *dataMem) { delete static_cast(dataMem); }))); + (static_cast(buffer_map.at(ptr).get()))->set_final_data(nullptr); + return ret; + } + + template inline cl::sycl::buffer* get_sycl_buffer(size_t num_bytes,const T * ptr) const { + return static_cast*>(add_sycl_buffer(ptr, num_bytes).first->second.get()); + } + + /// allocating memory on the cpu + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t) const { + return internal::aligned_malloc(8); + } + + // some runtime conditions that can be applied here + bool isDeviceSuitable() const { return true; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const { + ::memcpy(dst, src, n); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const { + auto host_acc= (static_cast*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access(); + memcpy(host_acc.get_pointer(), src, n); + } + /// whith the current implementation of sycl, the data is copied twice from device to host. This will be fixed soon. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const { + auto it = buffer_map.find(src); + if (it != buffer_map.end()) { + auto host_acc= (static_cast*>(it->second.get()))-> template get_access(); + memcpy(dst,host_acc.get_pointer(), n); + } else{ + eigen_assert("no device memory found. The memory might be destroyed before creation"); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const { + ::memset(buffer, c, n); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const { + return 1; + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h new file mode 100644 index 000000000..17f04665a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h @@ -0,0 +1,282 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_THREADS) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H) +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H + +namespace Eigen { + +// Use the SimpleThreadPool by default. We'll switch to the new non blocking +// thread pool later. +#ifndef EIGEN_USE_SIMPLE_THREAD_POOL +template using ThreadPoolTempl = NonBlockingThreadPoolTempl; +typedef NonBlockingThreadPool ThreadPool; +#else +template using ThreadPoolTempl = SimpleThreadPoolTempl; +typedef SimpleThreadPool ThreadPool; +#endif + + +// Barrier is an object that allows one or more threads to wait until +// Notify has been called a specified number of times. +class Barrier { + public: + Barrier(unsigned int count) : state_(count << 1), notified_(false) { + eigen_assert(((count << 1) >> 1) == count); + } + ~Barrier() { + eigen_assert((state_>>1) == 0); + } + + void Notify() { + unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2; + if (v != 1) { + eigen_assert(((v + 2) & ~1) != 0); + return; // either count has not dropped to 0, or waiter is not waiting + } + std::unique_lock l(mu_); + eigen_assert(!notified_); + notified_ = true; + cv_.notify_all(); + } + + void Wait() { + unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel); + if ((v >> 1) == 0) return; + std::unique_lock l(mu_); + while (!notified_) { + cv_.wait(l); + } + } + + private: + std::mutex mu_; + std::condition_variable cv_; + std::atomic state_; // low bit is waiter flag + bool notified_; +}; + + +// Notification is an object that allows a user to to wait for another +// thread to signal a notification that an event has occurred. +// +// Multiple threads can wait on the same Notification object, +// but only one caller must call Notify() on the object. +struct Notification : Barrier { + Notification() : Barrier(1) {}; +}; + + +// Runs an arbitrary function and then calls Notify() on the passed in +// Notification. +template struct FunctionWrapperWithNotification +{ + static void run(Notification* n, Function f, Args... args) { + f(args...); + if (n) { + n->Notify(); + } + } +}; + +template struct FunctionWrapperWithBarrier +{ + static void run(Barrier* b, Function f, Args... args) { + f(args...); + if (b) { + b->Notify(); + } + } +}; + +template +static EIGEN_STRONG_INLINE void wait_until_ready(SyncType* n) { + if (n) { + n->Wait(); + } +} + + +// Build a thread pool device on top the an existing pool of threads. +struct ThreadPoolDevice { + // The ownership of the thread pool remains with the caller. + ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores) : pool_(pool), num_threads_(num_cores) { } + + EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { + return internal::aligned_malloc(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate(void* buffer) const { + internal::aligned_free(buffer); + } + + EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { + ::memcpy(dst, src, n); + } + EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + + EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { + ::memset(buffer, c, n); + } + + EIGEN_STRONG_INLINE int numThreads() const { + return num_threads_; + } + + EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { + return l1CacheSize(); + } + + EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { + // The l3 cache size is shared between all the cores. + return l3CacheSize() / num_threads_; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const { + // Should return an enum that encodes the ISA supported by the CPU + return 1; + } + + template + EIGEN_STRONG_INLINE Notification* enqueue(Function&& f, Args&&... args) const { + Notification* n = new Notification(); + pool_->Schedule(std::bind(&FunctionWrapperWithNotification::run, n, f, args...)); + return n; + } + + template + EIGEN_STRONG_INLINE void enqueue_with_barrier(Barrier* b, + Function&& f, + Args&&... args) const { + pool_->Schedule(std::bind( + &FunctionWrapperWithBarrier::run, b, f, args...)); + } + + template + EIGEN_STRONG_INLINE void enqueueNoNotification(Function&& f, Args&&... args) const { + pool_->Schedule(std::bind(f, args...)); + } + + // Returns a logical thread index between 0 and pool_->NumThreads() - 1 if + // called from one of the threads in pool_. Returns -1 otherwise. + EIGEN_STRONG_INLINE int currentThreadId() const { + return pool_->CurrentThreadId(); + } + + // parallelFor executes f with [0, n) arguments in parallel and waits for + // completion. F accepts a half-open interval [first, last). + // Block size is choosen based on the iteration cost and resulting parallel + // efficiency. If block_align is not nullptr, it is called to round up the + // block size. + void parallelFor(Index n, const TensorOpCost& cost, + std::function block_align, + std::function f) const { + typedef TensorCostModel CostModel; + if (n <= 1 || numThreads() == 1 || + CostModel::numThreads(n, cost, static_cast(numThreads())) == 1) { + f(0, n); + return; + } + + // Calculate block size based on (1) the iteration cost and (2) parallel + // efficiency. We want blocks to be not too small to mitigate + // parallelization overheads; not too large to mitigate tail + // effect and potential load imbalance and we also want number + // of blocks to be evenly dividable across threads. + + double block_size_f = 1.0 / CostModel::taskSize(1, cost); + const Index max_oversharding_factor = 4; + Index block_size = numext::mini( + n, numext::maxi(divup(n, max_oversharding_factor * numThreads()), + block_size_f)); + const Index max_block_size = numext::mini(n, 2 * block_size); + if (block_align) { + Index new_block_size = block_align(block_size); + eigen_assert(new_block_size >= block_size); + block_size = numext::mini(n, new_block_size); + } + Index block_count = divup(n, block_size); + // Calculate parallel efficiency as fraction of total CPU time used for + // computations: + double max_efficiency = + static_cast(block_count) / + (divup(block_count, numThreads()) * numThreads()); + // Now try to increase block size up to max_block_size as long as it + // doesn't decrease parallel efficiency. + for (Index prev_block_count = block_count; + max_efficiency < 1.0 && prev_block_count > 1;) { + // This is the next block size that divides size into a smaller number + // of blocks than the current block_size. + Index coarser_block_size = divup(n, prev_block_count - 1); + if (block_align) { + Index new_block_size = block_align(coarser_block_size); + eigen_assert(new_block_size >= coarser_block_size); + coarser_block_size = numext::mini(n, new_block_size); + } + if (coarser_block_size > max_block_size) { + break; // Reached max block size. Stop. + } + // Recalculate parallel efficiency. + const Index coarser_block_count = divup(n, coarser_block_size); + eigen_assert(coarser_block_count < prev_block_count); + prev_block_count = coarser_block_count; + const double coarser_efficiency = + static_cast(coarser_block_count) / + (divup(coarser_block_count, numThreads()) * numThreads()); + if (coarser_efficiency + 0.01 >= max_efficiency) { + // Taking it. + block_size = coarser_block_size; + block_count = coarser_block_count; + if (max_efficiency < coarser_efficiency) { + max_efficiency = coarser_efficiency; + } + } + } + + // Recursively divide size into halves until we reach block_size. + // Division code rounds mid to block_size, so we are guaranteed to get + // block_count leaves that do actual computations. + Barrier barrier(static_cast(block_count)); + std::function handleRange; + handleRange = [=, &handleRange, &barrier, &f](Index first, Index last) { + if (last - first <= block_size) { + // Single block or less, execute directly. + f(first, last); + barrier.Notify(); + return; + } + // Split into halves and submit to the pool. + Index mid = first + divup((last - first) / 2, block_size) * block_size; + pool_->Schedule([=, &handleRange]() { handleRange(mid, last); }); + pool_->Schedule([=, &handleRange]() { handleRange(first, mid); }); + }; + handleRange(0, n); + barrier.Wait(); + } + + // Convenience wrapper for parallelFor that does not align blocks. + void parallelFor(Index n, const TensorOpCost& cost, + std::function f) const { + parallelFor(n, cost, nullptr, std::move(f)); + } + + private: + ThreadPoolInterface* pool_; + int num_threads_; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h new file mode 100644 index 000000000..1a30e45fb --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h @@ -0,0 +1,236 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H +#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H + +namespace Eigen { + +/** \internal + * + * \class TensorDimensionList + * \ingroup CXX11_Tensor_Module + * + * \brief Special case of tensor index list used to list all the dimensions of a tensor of rank n. + * + * \sa Tensor + */ + +template struct DimensionList { + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + const Index operator[] (const Index i) const { return i; } +}; + +namespace internal { + +template struct array_size > { + static const size_t value = Rank; +}; +template struct array_size > { + static const size_t value = Rank; +}; + +template const Index array_get(DimensionList&) { + return n; +} +template const Index array_get(const DimensionList&) { + return n; +} + + +#if EIGEN_HAS_CONSTEXPR +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) { + return true; + } +}; +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) { + return true; + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; + +template +struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; +template +struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; + +template +struct index_statically_eq_impl > { + static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i == value; + } +}; +template +struct index_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i == value; + } +}; + +template +struct index_statically_ne_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i != value; + } +}; +template +struct index_statically_ne_impl > { + static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i != value; + } +}; + +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i > value; + } +}; +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i > value; + } +}; + +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i < value; + } +}; +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i < value; + } +}; + +#else +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) { + return true; + } +}; +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) { + return true; + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; + +template +struct indices_statically_known_to_increase_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; +template +struct indices_statically_known_to_increase_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; + +template +struct index_statically_eq_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +template +struct index_statically_eq_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; + +template +struct index_statically_ne_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex){ + return false; + } +}; +template +struct index_statically_ne_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; + +template +struct index_statically_gt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +template +struct index_statically_gt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; + +template +struct index_statically_lt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +template +struct index_statically_lt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +#endif + +} // end namespace internal +} // end namespace Eigen + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h new file mode 100644 index 000000000..451940de3 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h @@ -0,0 +1,428 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H +#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H + + +namespace Eigen { + +/** \internal + * + * \class TensorDimensions + * \ingroup CXX11_Tensor_Module + * + * \brief Set of classes used to encode and store the dimensions of a Tensor. + * + * The Sizes class encodes as part of the type the number of dimensions and the + * sizes corresponding to each dimension. It uses no storage space since it is + * entirely known at compile time. + * The DSizes class is its dynamic sibling: the number of dimensions is known + * at compile time but the sizes are set during execution. + * + * \sa Tensor + */ + +// Boilerplate code +namespace internal { + +template struct dget { + static const std::size_t value = get::value; +}; + + +template +struct fixed_size_tensor_index_linearization_helper +{ + template EIGEN_DEVICE_FUNC + static inline Index run(array const& indices, + const Dimensions& dimensions) + { + return array_get(indices) + + dget::value * + fixed_size_tensor_index_linearization_helper::run(indices, dimensions); + } +}; + +template +struct fixed_size_tensor_index_linearization_helper +{ + template EIGEN_DEVICE_FUNC + static inline Index run(array const&, const Dimensions&) + { + return 0; + } +}; + +template +struct fixed_size_tensor_index_extraction_helper +{ + template EIGEN_DEVICE_FUNC + static inline Index run(const Index index, + const Dimensions& dimensions) + { + const Index mult = (index == n-1) ? 1 : 0; + return array_get(dimensions) * mult + + fixed_size_tensor_index_extraction_helper::run(index, dimensions); + } +}; + +template +struct fixed_size_tensor_index_extraction_helper +{ + template EIGEN_DEVICE_FUNC + static inline Index run(const Index, + const Dimensions&) + { + return 0; + } + }; + +} // end namespace internal + + +// Fixed size +#ifndef EIGEN_EMULATE_CXX11_META_H +template +struct Sizes : internal::numeric_list { + typedef internal::numeric_list Base; + static const std::ptrdiff_t total_size = internal::arg_prod(Indices...); + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t rank() const { + return Base::count; + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t TotalSize() { + return internal::arg_prod(Indices...); + } + + EIGEN_DEVICE_FUNC Sizes() { } + template + explicit EIGEN_DEVICE_FUNC Sizes(const array& /*indices*/) { + // todo: add assertion + } +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC Sizes(DenseIndex...) { } + explicit EIGEN_DEVICE_FUNC Sizes(std::initializer_list /*l*/) { + // todo: add assertion + } +#endif + + template Sizes& operator = (const T& /*other*/) { + // add assertion failure if the size of other is different + return *this; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::size_t index) const { + return internal::fixed_size_tensor_index_extraction_helper::run(index, *this); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + size_t IndexOfColMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, *static_cast(this)); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + size_t IndexOfRowMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, *static_cast(this)); + } +}; + +namespace internal { +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes&) { + return Sizes::total_size; +} +} + +#else + +template +struct non_zero_size { + typedef internal::type2val type; +}; +template <> +struct non_zero_size<0> { + typedef internal::null_type type; +}; + +template struct Sizes { + typedef typename internal::make_type_list::type, typename non_zero_size::type, typename non_zero_size::type, typename non_zero_size::type, typename non_zero_size::type >::type Base; + static const size_t count = Base::count; + static const std::size_t total_size = internal::arg_prod::value; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const { + return count; + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t TotalSize() { + return internal::arg_prod::value; + } + + Sizes() { } + template + explicit Sizes(const array& /*indices*/) { + // todo: add assertion + } + template Sizes& operator = (const T& /*other*/) { + // add assertion failure if the size of other is different + return *this; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template Sizes(DenseIndex... /*indices*/) { } + explicit Sizes(std::initializer_list) { + // todo: add assertion + } +#else + EIGEN_DEVICE_FUNC explicit Sizes(const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) { + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index operator[] (const Index index) const { + switch (index) { + case 0: + return internal::get<0, Base>::value; + case 1: + return internal::get<1, Base>::value; + case 2: + return internal::get<2, Base>::value; + case 3: + return internal::get<3, Base>::value; + case 4: + return internal::get<4, Base>::value; + default: + eigen_assert(false && "index overflow"); + return static_cast(-1); + } + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + size_t IndexOfColMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, *reinterpret_cast(this)); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + size_t IndexOfRowMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, *reinterpret_cast(this)); + } +}; + +namespace internal { +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes&) { + return Sizes::total_size; +} +} + +#endif + +// Boilerplate +namespace internal { +template +struct tensor_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, array const& dimensions) + { + return array_get(indices) + + array_get(dimensions) * + tensor_index_linearization_helper::run(indices, dimensions); + } +}; + +template +struct tensor_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, array const&) + { + return array_get(indices); + } +}; +} // end namespace internal + + + +// Dynamic size +template +struct DSizes : array { + typedef array Base; + static const int count = NumDims; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const { + return NumDims; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex TotalSize() const { + return (NumDims == 0) ? 1 : internal::array_prod(*static_cast(this)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DSizes() { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = 0; + } + } + EIGEN_DEVICE_FUNC explicit DSizes(const array& a) : Base(a) { } + + EIGEN_DEVICE_FUNC explicit DSizes(const DenseIndex i0) { + eigen_assert(NumDims == 1); + (*this)[0] = i0; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) { + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 2 == NumDims, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#else + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1) { + eigen_assert(NumDims == 2); + (*this)[0] = i0; + (*this)[1] = i1; + } + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2) { + eigen_assert(NumDims == 3); + (*this)[0] = i0; + (*this)[1] = i1; + (*this)[2] = i2; + } + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3) { + eigen_assert(NumDims == 4); + (*this)[0] = i0; + (*this)[1] = i1; + (*this)[2] = i2; + (*this)[3] = i3; + } + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3, const DenseIndex i4) { + eigen_assert(NumDims == 5); + (*this)[0] = i0; + (*this)[1] = i1; + (*this)[2] = i2; + (*this)[3] = i3; + (*this)[4] = i4; + } +#endif + + EIGEN_DEVICE_FUNC DSizes& operator = (const array& other) { + *static_cast(this) = other; + return *this; + } + + // A constexpr would be so much better here + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfColMajor(const array& indices) const { + return internal::tensor_index_linearization_helper::run(indices, *static_cast(this)); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfRowMajor(const array& indices) const { + return internal::tensor_index_linearization_helper::run(indices, *static_cast(this)); + } +}; + + + + +// Boilerplate +namespace internal { +template +struct tensor_vsize_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, std::vector const& dimensions) + { + return array_get(indices) + + array_get(dimensions) * + tensor_vsize_index_linearization_helper::run(indices, dimensions); + } +}; + +template +struct tensor_vsize_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, std::vector const&) + { + return array_get(indices); + } +}; +} // end namespace internal + + +namespace internal { + +template struct array_size > { + static const size_t value = NumDims; +}; +template struct array_size > { + static const size_t value = NumDims; +}; +#ifndef EIGEN_EMULATE_CXX11_META_H +template struct array_size > { +static const std::ptrdiff_t value = Sizes::count; +}; +template struct array_size > { +static const std::ptrdiff_t value = Sizes::count; +}; +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes&) { + return get >::value; +} +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<>&) { + eigen_assert(false && "should never be called"); + return -1; +} +#else +template struct array_size > { + static const size_t value = Sizes::count; +}; +template struct array_size > { + static const size_t value = Sizes::count; +}; +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_get(const Sizes&) { + return get::Base>::value; +} + +#endif + + +template +struct sizes_match_below_dim { + static EIGEN_DEVICE_FUNC inline bool run(Dims1&, Dims2&) { + return false; + } +}; +template +struct sizes_match_below_dim { + static EIGEN_DEVICE_FUNC inline bool run(Dims1& dims1, Dims2& dims2) { + return (array_get(dims1) == array_get(dims2)) & + sizes_match_below_dim::run(dims1, dims2); + } +}; +template +struct sizes_match_below_dim { + static EIGEN_DEVICE_FUNC inline bool run(Dims1&, Dims2&) { + return true; + } +}; + +} // end namespace internal + + +template +EIGEN_DEVICE_FUNC bool dimensions_match(Dims1& dims1, Dims2& dims2) { + return internal::sizes_match_below_dim::value, internal::array_size::value>::run(dims1, dims2); +} + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h new file mode 100644 index 000000000..06987132b --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h @@ -0,0 +1,181 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H +#define EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H + +namespace Eigen { + +/** \class TensorForcedEval + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reshaping class. + * + * + */ +namespace internal { +template class MakePointer_> +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + + enum { + Flags = 0 + }; + template + struct MakePointer { + // Intermediate typedef to workaround MSVC issue. + typedef MakePointer_ MakePointerT; + typedef typename MakePointerT::Type Type; + }; +}; + +template class MakePointer_> +struct eval, Eigen::Dense> +{ + typedef const TensorEvalToOp& type; +}; + +template class MakePointer_> +struct nested, 1, typename eval >::type> +{ + typedef TensorEvalToOp type; +}; + +} // end namespace internal + + + + +template class MakePointer_> +class TensorEvalToOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename MakePointer_::Type PointerType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvalToOp(PointerType buffer, const XprType& expr) + : m_xpr(expr), m_buffer(buffer) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC PointerType buffer() const { return m_buffer; } + + protected: + typename XprType::Nested m_xpr; + PointerType m_buffer; +}; + + + +template class MakePointer_> +struct TensorEvaluator, Device> +{ + typedef TensorEvalToOp XprType; + typedef typename ArgType::Scalar Scalar; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef typename XprType::Index Index; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = true + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_device(device), + m_buffer(op.buffer()), m_op(op), m_expression(op.expression()) + { } + + // Used for accessor extraction in SYCL Managed TensorMap: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const XprType& op() const { + return m_op; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~TensorEvaluator() { + } + + typedef typename internal::traits >::template MakePointer::Type DevicePointer; + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(DevicePointer scalar) { + EIGEN_UNUSED_VARIABLE(scalar); + eigen_assert(scalar == NULL); + return m_impl.evalSubExprsIfNeeded(m_buffer); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) { + m_buffer[i] = m_impl.coeff(i); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) { + internal::pstoret(m_buffer + i, m_impl.template packet::IsAligned ? Aligned : Unaligned>(i)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_buffer[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return internal::ploadt(m_buffer + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // We assume that evalPacket or evalScalar is called to perform the + // assignment and account for the cost of the write here. + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC DevicePointer data() const { return m_buffer; } + ArgType expression() const { return m_expression; } + + /// required by sycl in order to extract the accessor + const TensorEvaluator& impl() const { return m_impl; } + /// added for sycl in order to construct the buffer from the sycl device + const Device& device() const{return m_device;} + + private: + TensorEvaluator m_impl; + const Device& m_device; + DevicePointer m_buffer; + const XprType& m_op; + const ArgType m_expression; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h new file mode 100644 index 000000000..834ce07df --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h @@ -0,0 +1,633 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H + +namespace Eigen { + +/** \class TensorEvaluator + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor evaluator classes. + * + * These classes are responsible for the evaluation of the tensor expression. + * + * TODO: add support for more types of expressions, in particular expressions + * leading to lvalues (slicing, reshaping, etc...) + */ + +// Generic evaluator +template +struct TensorEvaluator +{ + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Derived::Dimensions Dimensions; + + // NumDimensions is -1 for variable dim tensors + static const int NumCoords = internal::traits::NumDimensions > 0 ? + internal::traits::NumDimensions : 0; + + enum { + IsAligned = Derived::IsAligned, + PacketAccess = (internal::unpacket_traits::size > 1), + Layout = Derived::Layout, + CoordAccess = NumCoords > 0, + RawAccess = true + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) + : m_data(const_cast::template MakePointer::Type>(m.data())), m_dims(m.dimensions()), m_device(device), m_impl(m) + { } + + // Used for accessor extraction in SYCL Managed TensorMap: + const Derived& derived() const { return m_impl; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* dest) { + if (dest) { + m_device.memcpy((void*)dest, m_data, sizeof(Scalar) * m_dims.TotalSize()); + return false; + } + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + eigen_assert(m_data); + return m_data[index]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + eigen_assert(m_data); + return m_data[index]; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketReturnType packet(Index index) const + { + return internal::ploadt(m_data + index); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + return internal::pstoret(m_data + index, x); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array& coords) const { + eigen_assert(m_data); + if (static_cast(Layout) == static_cast(ColMajor)) { + return m_data[m_dims.IndexOfColMajor(coords)]; + } else { + return m_data[m_dims.IndexOfRowMajor(coords)]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array& coords) { + eigen_assert(m_data); + if (static_cast(Layout) == static_cast(ColMajor)) { + return m_data[m_dims.IndexOfColMajor(coords)]; + } else { + return m_data[m_dims.IndexOfRowMajor(coords)]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, + internal::unpacket_traits::size); + } + + EIGEN_DEVICE_FUNC typename internal::traits::template MakePointer::Type data() const { return m_data; } + + /// required by sycl in order to construct sycl buffer from raw pointer + const Device& device() const{return m_device;} + + protected: + typename internal::traits::template MakePointer::Type m_data; + Dimensions m_dims; + const Device& m_device; + const Derived& m_impl; +}; + +namespace { +template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T loadConstant(const T* address) { + return *address; +} +// Use the texture cache on CUDA devices whenever possible +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350 +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float loadConstant(const float* address) { + return __ldg(address); +} +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double loadConstant(const double* address) { + return __ldg(address); +} +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +Eigen::half loadConstant(const Eigen::half* address) { + return Eigen::half(half_impl::raw_uint16_to_half(__ldg(&address->x))); +} +#endif +} + + +// Default evaluator for rvalues +template +struct TensorEvaluator +{ + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Derived::Dimensions Dimensions; + + // NumDimensions is -1 for variable dim tensors + static const int NumCoords = internal::traits::NumDimensions > 0 ? + internal::traits::NumDimensions : 0; + + enum { + IsAligned = Derived::IsAligned, + PacketAccess = (internal::unpacket_traits::size > 1), + Layout = Derived::Layout, + CoordAccess = NumCoords > 0, + RawAccess = true + }; + + // Used for accessor extraction in SYCL Managed TensorMap: + const Derived& derived() const { return m_impl; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) + : m_data(m.data()), m_dims(m.dimensions()), m_device(device), m_impl(m) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { + if (!NumTraits::type>::RequireInitialization && data) { + m_device.memcpy((void*)data, m_data, m_dims.TotalSize() * sizeof(Scalar)); + return false; + } + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + eigen_assert(m_data); + return loadConstant(m_data+index); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketReturnType packet(Index index) const + { + return internal::ploadt_ro(m_data + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array& coords) const { + eigen_assert(m_data); + const Index index = (static_cast(Layout) == static_cast(ColMajor)) ? m_dims.IndexOfColMajor(coords) + : m_dims.IndexOfRowMajor(coords); + return loadConstant(m_data+index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, + internal::unpacket_traits::size); + } + + EIGEN_DEVICE_FUNC typename internal::traits::template MakePointer::Type data() const { return m_data; } + + /// added for sycl in order to construct the buffer from the sycl device + const Device& device() const{return m_device;} + + protected: + typename internal::traits::template MakePointer::Type m_data; + Dimensions m_dims; + const Device& m_device; + const Derived& m_impl; +}; + + + + +// -------------------- CwiseNullaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseNullaryOp XprType; + + enum { + IsAligned = true, + PacketAccess = internal::functor_traits::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC + TensorEvaluator(const XprType& op, const Device& device) + : m_functor(op.functor()), m_argImpl(op.nestedExpression(), device), m_wrapper() + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { return true; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_wrapper(m_functor, index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_wrapper.template packetOp(m_functor, index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, + internal::unpacket_traits::size); + } + + EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; } + + /// required by sycl in order to extract the accessor + const TensorEvaluator& impl() const { return m_argImpl; } + /// required by sycl in order to extract the accessor + NullaryOp functor() const { return m_functor; } + + + private: + const NullaryOp m_functor; + TensorEvaluator m_argImpl; + const internal::nullary_wrapper m_wrapper; +}; + + + +// -------------------- CwiseUnaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseUnaryOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & internal::functor_traits::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) + : m_functor(op.functor()), + m_argImpl(op.nestedExpression(), device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { + m_argImpl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_argImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_functor(m_argImpl.coeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_functor.packetOp(m_argImpl.template packet(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double functor_cost = internal::functor_traits::Cost; + return m_argImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; } + + /// required by sycl in order to extract the accessor + const TensorEvaluator & impl() const { return m_argImpl; } + /// added for sycl in order to construct the buffer from sycl device + UnaryOp functor() const { return m_functor; } + + + private: + const UnaryOp m_functor; + TensorEvaluator m_argImpl; +}; + + +// -------------------- CwiseBinaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseBinaryOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess & + internal::functor_traits::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) + : m_functor(op.functor()), + m_leftImpl(op.lhsExpression(), device), + m_rightImpl(op.rhsExpression(), device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || internal::traits::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions())); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // TODO: use right impl instead if right impl dimensions are known at compile time. + return m_leftImpl.dimensions(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { + m_leftImpl.evalSubExprsIfNeeded(NULL); + m_rightImpl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_functor(m_leftImpl.coeff(index), m_rightImpl.coeff(index)); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_functor.packetOp(m_leftImpl.template packet(index), m_rightImpl.template packet(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double functor_cost = internal::functor_traits::Cost; + return m_leftImpl.costPerCoeff(vectorized) + + m_rightImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; } + /// required by sycl in order to extract the accessor + const TensorEvaluator& left_impl() const { return m_leftImpl; } + /// required by sycl in order to extract the accessor + const TensorEvaluator& right_impl() const { return m_rightImpl; } + /// required by sycl in order to extract the accessor + BinaryOp functor() const { return m_functor; } + + private: + const BinaryOp m_functor; + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; +}; + +// -------------------- CwiseTernaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseTernaryOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess & + internal::functor_traits::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) + : m_functor(op.functor()), + m_arg1Impl(op.arg1Expression(), device), + m_arg2Impl(op.arg2Expression(), device), + m_arg3Impl(op.arg3Expression(), device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || internal::traits::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((internal::is_same::StorageKind, + typename internal::traits::StorageKind>::value), + STORAGE_KIND_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same::StorageKind, + typename internal::traits::StorageKind>::value), + STORAGE_KIND_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same::Index, + typename internal::traits::Index>::value), + STORAGE_INDEX_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same::Index, + typename internal::traits::Index>::value), + STORAGE_INDEX_MUST_MATCH) + + eigen_assert(dimensions_match(m_arg1Impl.dimensions(), m_arg2Impl.dimensions()) && dimensions_match(m_arg1Impl.dimensions(), m_arg3Impl.dimensions())); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // TODO: use arg2 or arg3 dimensions if they are known at compile time. + return m_arg1Impl.dimensions(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { + m_arg1Impl.evalSubExprsIfNeeded(NULL); + m_arg2Impl.evalSubExprsIfNeeded(NULL); + m_arg3Impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_arg1Impl.cleanup(); + m_arg2Impl.cleanup(); + m_arg3Impl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index)); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_functor.packetOp(m_arg1Impl.template packet(index), + m_arg2Impl.template packet(index), + m_arg3Impl.template packet(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double functor_cost = internal::functor_traits::Cost; + return m_arg1Impl.costPerCoeff(vectorized) + + m_arg2Impl.costPerCoeff(vectorized) + + m_arg3Impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; } + + /// required by sycl in order to extract the accessor + const TensorEvaluator & arg1Impl() const { return m_arg1Impl; } + /// required by sycl in order to extract the accessor + const TensorEvaluator& arg2Impl() const { return m_arg2Impl; } + /// required by sycl in order to extract the accessor + const TensorEvaluator& arg3Impl() const { return m_arg3Impl; } + + private: + const TernaryOp m_functor; + TensorEvaluator m_arg1Impl; + TensorEvaluator m_arg2Impl; + TensorEvaluator m_arg3Impl; +}; + + +// -------------------- SelectOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorSelectOp XprType; + typedef typename XprType::Scalar Scalar; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess & + internal::packet_traits::HasBlend, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) + : m_condImpl(op.ifExpression(), device), + m_thenImpl(op.thenExpression(), device), + m_elseImpl(op.elseExpression(), device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(dimensions_match(m_condImpl.dimensions(), m_thenImpl.dimensions())); + eigen_assert(dimensions_match(m_thenImpl.dimensions(), m_elseImpl.dimensions())); + } + + typedef typename XprType::Index Index; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // TODO: use then or else impl instead if they happen to be known at compile time. + return m_condImpl.dimensions(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { + m_condImpl.evalSubExprsIfNeeded(NULL); + m_thenImpl.evalSubExprsIfNeeded(NULL); + m_elseImpl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_condImpl.cleanup(); + m_thenImpl.cleanup(); + m_elseImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_condImpl.coeff(index) ? m_thenImpl.coeff(index) : m_elseImpl.coeff(index); + } + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + { + internal::Selector select; + for (Index i = 0; i < PacketSize; ++i) { + select.select[i] = m_condImpl.coeff(index+i); + } + return internal::pblend(select, + m_thenImpl.template packet(index), + m_elseImpl.template packet(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return m_condImpl.costPerCoeff(vectorized) + + m_thenImpl.costPerCoeff(vectorized) + .cwiseMax(m_elseImpl.costPerCoeff(vectorized)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const { return NULL; } + /// required by sycl in order to extract the accessor + const TensorEvaluator & cond_impl() const { return m_condImpl; } + /// required by sycl in order to extract the accessor + const TensorEvaluator& then_impl() const { return m_thenImpl; } + /// required by sycl in order to extract the accessor + const TensorEvaluator& else_impl() const { return m_elseImpl; } + + private: + TensorEvaluator m_condImpl; + TensorEvaluator m_thenImpl; + TensorEvaluator m_elseImpl; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h new file mode 100644 index 000000000..f01d77c0a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -0,0 +1,288 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H + +namespace Eigen { + +/** \class TensorExecutor + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor executor class. + * + * This class is responsible for launch the evaluation of the expression on + * the specified computing device. + */ +namespace internal { + +// Default strategy: the expression is evaluated with a single cpu thread. +template +class TensorExecutor +{ + public: + typedef typename Expression::Index Index; + EIGEN_DEVICE_FUNC + static inline void run(const Expression& expr, const Device& device = Device()) + { + TensorEvaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) + { + const Index size = array_prod(evaluator.dimensions()); + for (Index i = 0; i < size; ++i) { + evaluator.evalScalar(i); + } + } + evaluator.cleanup(); + } +}; + + +template +class TensorExecutor +{ + public: + typedef typename Expression::Index Index; + EIGEN_DEVICE_FUNC + static inline void run(const Expression& expr, const DefaultDevice& device = DefaultDevice()) + { + TensorEvaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) + { + const Index size = array_prod(evaluator.dimensions()); + const int PacketSize = unpacket_traits::PacketReturnType>::size; + // Give the compiler a strong hint to unroll the loop. But don't insist + // on unrolling, because if the function is expensive the compiler should not + // unroll the loop at the expense of inlining. + const Index UnrolledSize = (size / (4 * PacketSize)) * 4 * PacketSize; + for (Index i = 0; i < UnrolledSize; i += 4*PacketSize) { + for (Index j = 0; j < 4; j++) { + evaluator.evalPacket(i + j * PacketSize); + } + } + const Index VectorizedSize = (size / PacketSize) * PacketSize; + for (Index i = UnrolledSize; i < VectorizedSize; i += PacketSize) { + evaluator.evalPacket(i); + } + for (Index i = VectorizedSize; i < size; ++i) { + evaluator.evalScalar(i); + } + } + evaluator.cleanup(); + } +}; + + + +// Multicore strategy: the index space is partitioned and each partition is executed on a single core +#ifdef EIGEN_USE_THREADS +template +struct EvalRange { + static void run(Evaluator* evaluator_in, const Index first, const Index last) { + Evaluator evaluator = *evaluator_in; + eigen_assert(last >= first); + for (Index i = first; i < last; ++i) { + evaluator.evalScalar(i); + } + } + + static Index alignBlockSize(Index size) { + return size; + } +}; + +template +struct EvalRange { + static const int PacketSize = unpacket_traits::size; + + static void run(Evaluator* evaluator_in, const Index first, const Index last) { + Evaluator evaluator = *evaluator_in; + eigen_assert(last >= first); + Index i = first; + if (last - first >= PacketSize) { + eigen_assert(first % PacketSize == 0); + Index last_chunk_offset = last - 4 * PacketSize; + // Give the compiler a strong hint to unroll the loop. But don't insist + // on unrolling, because if the function is expensive the compiler should not + // unroll the loop at the expense of inlining. + for (; i <= last_chunk_offset; i += 4*PacketSize) { + for (Index j = 0; j < 4; j++) { + evaluator.evalPacket(i + j * PacketSize); + } + } + last_chunk_offset = last - PacketSize; + for (; i <= last_chunk_offset; i += PacketSize) { + evaluator.evalPacket(i); + } + } + for (; i < last; ++i) { + evaluator.evalScalar(i); + } + } + + static Index alignBlockSize(Index size) { + // Align block size to packet size and account for unrolling in run above. + if (size >= 16 * PacketSize) { + return (size + 4 * PacketSize - 1) & ~(4 * PacketSize - 1); + } + // Aligning to 4 * PacketSize would increase block size by more than 25%. + return (size + PacketSize - 1) & ~(PacketSize - 1); + } +}; + +template +class TensorExecutor { + public: + typedef typename Expression::Index Index; + static inline void run(const Expression& expr, const ThreadPoolDevice& device) + { + typedef TensorEvaluator Evaluator; + Evaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) + { + const Index size = array_prod(evaluator.dimensions()); +#if !defined(EIGEN_USE_SIMPLE_THREAD_POOL) + device.parallelFor(size, evaluator.costPerCoeff(Vectorizable), + EvalRange::alignBlockSize, + [&evaluator](Index first, Index last) { + EvalRange::run(&evaluator, first, last); + }); +#else + size_t num_threads = device.numThreads(); + if (num_threads > 1) { + num_threads = TensorCostModel::numThreads( + size, evaluator.costPerCoeff(Vectorizable), num_threads); + } + if (num_threads == 1) { + EvalRange::run(&evaluator, 0, size); + } else { + const Index PacketSize = Vectorizable ? unpacket_traits::size : 1; + Index blocksz = std::ceil(static_cast(size)/num_threads) + PacketSize - 1; + const Index blocksize = numext::maxi(PacketSize, (blocksz - (blocksz % PacketSize))); + const Index numblocks = size / blocksize; + + Barrier barrier(numblocks); + for (int i = 0; i < numblocks; ++i) { + device.enqueue_with_barrier( + &barrier, &EvalRange::run, + &evaluator, i * blocksize, (i + 1) * blocksize); + } + if (numblocks * blocksize < size) { + EvalRange::run( + &evaluator, numblocks * blocksize, size); + } + barrier.Wait(); + } +#endif // defined(!EIGEN_USE_SIMPLE_THREAD_POOL) + } + evaluator.cleanup(); + } +}; +#endif // EIGEN_USE_THREADS + + +// GPU: the evaluation of the expression is offloaded to a GPU. +#if defined(EIGEN_USE_GPU) + +template +class TensorExecutor { + public: + typedef typename Expression::Index Index; + static void run(const Expression& expr, const GpuDevice& device); +}; + + +#if defined(__CUDACC__) +template +struct EigenMetaKernelEval { + static __device__ EIGEN_ALWAYS_INLINE + void run(Evaluator& eval, Index first, Index last, Index step_size) { + for (Index i = first; i < last; i += step_size) { + eval.evalScalar(i); + } + } +}; + +template +struct EigenMetaKernelEval { + static __device__ EIGEN_ALWAYS_INLINE + void run(Evaluator& eval, Index first, Index last, Index step_size) { + const Index PacketSize = unpacket_traits::size; + const Index vectorized_size = (last / PacketSize) * PacketSize; + const Index vectorized_step_size = step_size * PacketSize; + + // Use the vector path + for (Index i = first * PacketSize; i < vectorized_size; + i += vectorized_step_size) { + eval.evalPacket(i); + } + for (Index i = vectorized_size + first; i < last; i += step_size) { + eval.evalScalar(i); + } + } +}; + +template +__global__ void +__launch_bounds__(1024) +EigenMetaKernel(Evaluator eval, Index size) { + + const Index first_index = blockIdx.x * blockDim.x + threadIdx.x; + const Index step_size = blockDim.x * gridDim.x; + + const bool vectorizable = Evaluator::PacketAccess & Evaluator::IsAligned; + EigenMetaKernelEval::run(eval, first_index, size, step_size); +} + +/*static*/ +template +inline void TensorExecutor::run( + const Expression& expr, const GpuDevice& device) { + TensorEvaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) { + const int block_size = device.maxCudaThreadsPerBlock(); + const int max_blocks = device.getNumCudaMultiProcessors() * + device.maxCudaThreadsPerMultiProcessor() / block_size; + const Index size = array_prod(evaluator.dimensions()); + // Create a least one block to ensure we won't crash when tensorflow calls with tensors of size 0. + const int num_blocks = numext::maxi(numext::mini(max_blocks, divup(size, block_size)), 1); + + LAUNCH_CUDA_KERNEL( + (EigenMetaKernel, Index>), + num_blocks, block_size, 0, device, evaluator, size); + } + evaluator.cleanup(); +} + +#endif // __CUDACC__ +#endif // EIGEN_USE_GPU + +// SYCL Executor policy +#ifdef EIGEN_USE_SYCL + +template +class TensorExecutor { +public: + static inline void run(const Expression &expr, const SyclDevice &device) { + // call TensorSYCL module + TensorSycl::run(expr, device); + } +}; + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h new file mode 100644 index 000000000..85dfc7a69 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h @@ -0,0 +1,371 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXPR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EXPR_H + +namespace Eigen { + +/** \class TensorExpr + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor expression classes. + * + * The TensorCwiseNullaryOp class applies a nullary operators to an expression. + * This is typically used to generate constants. + * + * The TensorCwiseUnaryOp class represents an expression where a unary operator + * (e.g. cwiseSqrt) is applied to an expression. + * + * The TensorCwiseBinaryOp class represents an expression where a binary + * operator (e.g. addition) is applied to a lhs and a rhs expression. + * + */ +namespace internal { +template +struct traits > + : traits +{ + typedef traits XprTraits; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::Nested XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + + enum { + Flags = 0 + }; +}; + +} // end namespace internal + + + +template +class TensorCwiseNullaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef TensorCwiseNullaryOp Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseNullaryOp(const XprType& xpr, const NullaryOp& func = NullaryOp()) + : m_xpr(xpr), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC + const NullaryOp& functor() const { return m_functor; } + + protected: + typename XprType::Nested m_xpr; + const NullaryOp m_functor; +}; + + + +namespace internal { +template +struct traits > + : traits +{ + // TODO(phli): Add InputScalar, InputPacket. Check references to + // current Scalar/Packet to see if the intent is Input or Output. + typedef typename result_of::type Scalar; + typedef traits XprTraits; + typedef typename XprType::Nested XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseUnaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseUnaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCwiseUnaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + // TODO(phli): Add InputScalar, InputPacket. Check references to + // current Scalar/Packet to see if the intent is Input or Output. + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef Scalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) + : m_xpr(xpr), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const UnaryOp& functor() const { return m_functor; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const UnaryOp m_functor; +}; + + +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs + // are different. + // TODO(phli): Add Lhs/RhsScalar, Lhs/RhsPacket. Check references to + // current Scalar/Packet to see if the intent is Inputs or Output. + typedef typename result_of< + BinaryOp(typename LhsXprType::Scalar, + typename RhsXprType::Scalar)>::type Scalar; + typedef traits XprTraits; + typedef typename promote_storage_type< + typename traits::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type< + typename traits::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseBinaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseBinaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCwiseBinaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + // TODO(phli): Add Lhs/RhsScalar, Lhs/RhsPacket. Check references to + // current Scalar/Packet to see if the intent is Inputs or Output. + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef Scalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const BinaryOp& func = BinaryOp()) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const BinaryOp& functor() const { return m_functor; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const BinaryOp m_functor; +}; + + +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the args are different. + typedef typename result_of< + TernaryOp(typename Arg1XprType::Scalar, + typename Arg2XprType::Scalar, + typename Arg3XprType::Scalar)>::type Scalar; + typedef traits XprTraits; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename Arg1XprType::Nested Arg1Nested; + typedef typename Arg2XprType::Nested Arg2Nested; + typedef typename Arg3XprType::Nested Arg3Nested; + typedef typename remove_reference::type _Arg1Nested; + typedef typename remove_reference::type _Arg2Nested; + typedef typename remove_reference::type _Arg3Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseTernaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseTernaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCwiseTernaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef Scalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseTernaryOp(const Arg1XprType& arg1, const Arg2XprType& arg2, const Arg3XprType& arg3, const TernaryOp& func = TernaryOp()) + : m_arg1_xpr(arg1), m_arg2_xpr(arg2), m_arg3_xpr(arg3), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const TernaryOp& functor() const { return m_functor; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + arg1Expression() const { return m_arg1_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + arg2Expression() const { return m_arg2_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + arg3Expression() const { return m_arg3_xpr; } + + protected: + typename Arg1XprType::Nested m_arg1_xpr; + typename Arg2XprType::Nested m_arg2_xpr; + typename Arg3XprType::Nested m_arg3_xpr; + const TernaryOp m_functor; +}; + + +namespace internal { +template +struct traits > + : traits +{ + typedef typename traits::Scalar Scalar; + typedef traits XprTraits; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename IfXprType::Nested IfNested; + typedef typename ThenXprType::Nested ThenNested; + typedef typename ElseXprType::Nested ElseNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorSelectOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorSelectOp type; +}; + +} // end namespace internal + + +template +class TensorSelectOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC + TensorSelectOp(const IfXprType& a_condition, + const ThenXprType& a_then, + const ElseXprType& a_else) + : m_condition(a_condition), m_then(a_then), m_else(a_else) + { } + + EIGEN_DEVICE_FUNC + const IfXprType& ifExpression() const { return m_condition; } + + EIGEN_DEVICE_FUNC + const ThenXprType& thenExpression() const { return m_then; } + + EIGEN_DEVICE_FUNC + const ElseXprType& elseExpression() const { return m_else; } + + protected: + typename IfXprType::Nested m_condition; + typename ThenXprType::Nested m_then; + typename ElseXprType::Nested m_else; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EXPR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h new file mode 100644 index 000000000..08eb5595a --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h @@ -0,0 +1,651 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Jianwei Cui +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FFT_H +#define EIGEN_CXX11_TENSOR_TENSOR_FFT_H + +// This code requires the ability to initialize arrays of constant +// values directly inside a class. +#if __cplusplus >= 201103L || EIGEN_COMP_MSVC >= 1900 + +namespace Eigen { + +/** \class TensorFFT + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor FFT class. + * + * TODO: + * Vectorize the Cooley Tukey and the Bluestein algorithm + * Add support for multithreaded evaluation + * Improve the performance on GPU + */ + +template struct MakeComplex { + template + EIGEN_DEVICE_FUNC + T operator() (const T& val) const { return val; } +}; + +template <> struct MakeComplex { + template + EIGEN_DEVICE_FUNC + std::complex operator() (const T& val) const { return std::complex(val, 0); } +}; + +template <> struct MakeComplex { + template + EIGEN_DEVICE_FUNC + std::complex operator() (const std::complex& val) const { return val; } +}; + +template struct PartOf { + template T operator() (const T& val) const { return val; } +}; + +template <> struct PartOf { + template T operator() (const std::complex& val) const { return val.real(); } +}; + +template <> struct PartOf { + template T operator() (const std::complex& val) const { return val.imag(); } +}; + +namespace internal { +template +struct traits > : public traits { + typedef traits XprTraits; + typedef typename NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; + typedef typename XprTraits::Scalar InputScalar; + typedef typename conditional::type OutputScalar; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> { + typedef const TensorFFTOp& type; +}; + +template +struct nested, 1, typename eval >::type> { + typedef TensorFFTOp type; +}; + +} // end namespace internal + +template +class TensorFFTOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; + typedef typename internal::conditional::type OutputScalar; + typedef OutputScalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFFTOp(const XprType& expr, const FFT& fft) + : m_xpr(expr), m_fft(fft) {} + + EIGEN_DEVICE_FUNC + const FFT& fft() const { return m_fft; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& expression() const { + return m_xpr; + } + + protected: + typename XprType::Nested m_xpr; + const FFT m_fft; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> { + typedef TensorFFTOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; + typedef typename TensorEvaluator::Dimensions InputDimensions; + typedef internal::traits XprTraits; + typedef typename XprTraits::Scalar InputScalar; + typedef typename internal::conditional::type OutputScalar; + typedef OutputScalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = false, + PacketAccess = true, + BlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_fft(op.fft()), m_impl(op.expression(), device), m_data(NULL), m_device(device) { + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + for (int i = 0; i < NumDims; ++i) { + eigen_assert(input_dims[i] > 0); + m_dimensions[i] = input_dims[i]; + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1]; + } + } else { + m_strides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1]; + } + } + m_size = m_dimensions.TotalSize(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_dimensions; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(OutputScalar* data) { + m_impl.evalSubExprsIfNeeded(NULL); + if (data) { + evalToBuf(data); + return false; + } else { + m_data = (CoeffReturnType*)m_device.allocate(sizeof(CoeffReturnType) * m_size); + evalToBuf(m_data); + return true; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + if (m_data) { + m_device.deallocate(m_data); + m_data = NULL; + } + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const { + return m_data[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType + packet(Index index) const { + return internal::ploadt(m_data + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return m_data; } + + + private: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalToBuf(OutputScalar* data) { + const bool write_to_out = internal::is_same::value; + ComplexScalar* buf = write_to_out ? (ComplexScalar*)data : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * m_size); + + for (Index i = 0; i < m_size; ++i) { + buf[i] = MakeComplex::value>()(m_impl.coeff(i)); + } + + for (size_t i = 0; i < m_fft.size(); ++i) { + Index dim = m_fft[i]; + eigen_assert(dim >= 0 && dim < NumDims); + Index line_len = m_dimensions[dim]; + eigen_assert(line_len >= 1); + ComplexScalar* line_buf = (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * line_len); + const bool is_power_of_two = isPowerOfTwo(line_len); + const Index good_composite = is_power_of_two ? 0 : findGoodComposite(line_len); + const Index log_len = is_power_of_two ? getLog2(line_len) : getLog2(good_composite); + + ComplexScalar* a = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite); + ComplexScalar* b = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite); + ComplexScalar* pos_j_base_powered = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * (line_len + 1)); + if (!is_power_of_two) { + // Compute twiddle factors + // t_n = exp(sqrt(-1) * pi * n^2 / line_len) + // for n = 0, 1,..., line_len-1. + // For n > 2 we use the recurrence t_n = t_{n-1}^2 / t_{n-2} * t_1^2 + pos_j_base_powered[0] = ComplexScalar(1, 0); + if (line_len > 1) { + const RealScalar pi_over_len(EIGEN_PI / line_len); + const ComplexScalar pos_j_base = ComplexScalar( + std::cos(pi_over_len), std::sin(pi_over_len)); + pos_j_base_powered[1] = pos_j_base; + if (line_len > 2) { + const ComplexScalar pos_j_base_sq = pos_j_base * pos_j_base; + for (int j = 2; j < line_len + 1; ++j) { + pos_j_base_powered[j] = pos_j_base_powered[j - 1] * + pos_j_base_powered[j - 1] / + pos_j_base_powered[j - 2] * pos_j_base_sq; + } + } + } + } + + for (Index partial_index = 0; partial_index < m_size / line_len; ++partial_index) { + const Index base_offset = getBaseOffsetFromIndex(partial_index, dim); + + // get data into line_buf + const Index stride = m_strides[dim]; + if (stride == 1) { + memcpy(line_buf, &buf[base_offset], line_len*sizeof(ComplexScalar)); + } else { + Index offset = base_offset; + for (int j = 0; j < line_len; ++j, offset += stride) { + line_buf[j] = buf[offset]; + } + } + + // processs the line + if (is_power_of_two) { + processDataLineCooleyTukey(line_buf, line_len, log_len); + } + else { + processDataLineBluestein(line_buf, line_len, good_composite, log_len, a, b, pos_j_base_powered); + } + + // write back + if (FFTDir == FFT_FORWARD && stride == 1) { + memcpy(&buf[base_offset], line_buf, line_len*sizeof(ComplexScalar)); + } else { + Index offset = base_offset; + const ComplexScalar div_factor = ComplexScalar(1.0 / line_len, 0); + for (int j = 0; j < line_len; ++j, offset += stride) { + buf[offset] = (FFTDir == FFT_FORWARD) ? line_buf[j] : line_buf[j] * div_factor; + } + } + } + m_device.deallocate(line_buf); + if (!is_power_of_two) { + m_device.deallocate(a); + m_device.deallocate(b); + m_device.deallocate(pos_j_base_powered); + } + } + + if(!write_to_out) { + for (Index i = 0; i < m_size; ++i) { + data[i] = PartOf()(buf[i]); + } + m_device.deallocate(buf); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static bool isPowerOfTwo(Index x) { + eigen_assert(x > 0); + return !(x & (x - 1)); + } + + // The composite number for padding, used in Bluestein's FFT algorithm + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index findGoodComposite(Index n) { + Index i = 2; + while (i < 2 * n - 1) i *= 2; + return i; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index getLog2(Index m) { + Index log2m = 0; + while (m >>= 1) log2m++; + return log2m; + } + + // Call Cooley Tukey algorithm directly, data length must be power of 2 + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineCooleyTukey(ComplexScalar* line_buf, Index line_len, Index log_len) { + eigen_assert(isPowerOfTwo(line_len)); + scramble_FFT(line_buf, line_len); + compute_1D_Butterfly(line_buf, line_len, log_len); + } + + // Call Bluestein's FFT algorithm, m is a good composite number greater than (2 * n - 1), used as the padding length + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineBluestein(ComplexScalar* line_buf, Index line_len, Index good_composite, Index log_len, ComplexScalar* a, ComplexScalar* b, const ComplexScalar* pos_j_base_powered) { + Index n = line_len; + Index m = good_composite; + ComplexScalar* data = line_buf; + + for (Index i = 0; i < n; ++i) { + if(FFTDir == FFT_FORWARD) { + a[i] = data[i] * numext::conj(pos_j_base_powered[i]); + } + else { + a[i] = data[i] * pos_j_base_powered[i]; + } + } + for (Index i = n; i < m; ++i) { + a[i] = ComplexScalar(0, 0); + } + + for (Index i = 0; i < n; ++i) { + if(FFTDir == FFT_FORWARD) { + b[i] = pos_j_base_powered[i]; + } + else { + b[i] = numext::conj(pos_j_base_powered[i]); + } + } + for (Index i = n; i < m - n; ++i) { + b[i] = ComplexScalar(0, 0); + } + for (Index i = m - n; i < m; ++i) { + if(FFTDir == FFT_FORWARD) { + b[i] = pos_j_base_powered[m-i]; + } + else { + b[i] = numext::conj(pos_j_base_powered[m-i]); + } + } + + scramble_FFT(a, m); + compute_1D_Butterfly(a, m, log_len); + + scramble_FFT(b, m); + compute_1D_Butterfly(b, m, log_len); + + for (Index i = 0; i < m; ++i) { + a[i] *= b[i]; + } + + scramble_FFT(a, m); + compute_1D_Butterfly(a, m, log_len); + + //Do the scaling after ifft + for (Index i = 0; i < m; ++i) { + a[i] /= m; + } + + for (Index i = 0; i < n; ++i) { + if(FFTDir == FFT_FORWARD) { + data[i] = a[i] * numext::conj(pos_j_base_powered[i]); + } + else { + data[i] = a[i] * pos_j_base_powered[i]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void scramble_FFT(ComplexScalar* data, Index n) { + eigen_assert(isPowerOfTwo(n)); + Index j = 1; + for (Index i = 1; i < n; ++i){ + if (j > i) { + std::swap(data[j-1], data[i-1]); + } + Index m = n >> 1; + while (m >= 2 && j > m) { + j -= m; + m >>= 1; + } + j += m; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_2(ComplexScalar* data) { + ComplexScalar tmp = data[1]; + data[1] = data[0] - data[1]; + data[0] += tmp; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_4(ComplexScalar* data) { + ComplexScalar tmp[4]; + tmp[0] = data[0] + data[1]; + tmp[1] = data[0] - data[1]; + tmp[2] = data[2] + data[3]; + if (Dir == FFT_FORWARD) { + tmp[3] = ComplexScalar(0.0, -1.0) * (data[2] - data[3]); + } else { + tmp[3] = ComplexScalar(0.0, 1.0) * (data[2] - data[3]); + } + data[0] = tmp[0] + tmp[2]; + data[1] = tmp[1] + tmp[3]; + data[2] = tmp[0] - tmp[2]; + data[3] = tmp[1] - tmp[3]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_8(ComplexScalar* data) { + ComplexScalar tmp_1[8]; + ComplexScalar tmp_2[8]; + + tmp_1[0] = data[0] + data[1]; + tmp_1[1] = data[0] - data[1]; + tmp_1[2] = data[2] + data[3]; + if (Dir == FFT_FORWARD) { + tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, -1); + } else { + tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, 1); + } + tmp_1[4] = data[4] + data[5]; + tmp_1[5] = data[4] - data[5]; + tmp_1[6] = data[6] + data[7]; + if (Dir == FFT_FORWARD) { + tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, -1); + } else { + tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, 1); + } + tmp_2[0] = tmp_1[0] + tmp_1[2]; + tmp_2[1] = tmp_1[1] + tmp_1[3]; + tmp_2[2] = tmp_1[0] - tmp_1[2]; + tmp_2[3] = tmp_1[1] - tmp_1[3]; + tmp_2[4] = tmp_1[4] + tmp_1[6]; +// SQRT2DIV2 = sqrt(2)/2 +#define SQRT2DIV2 0.7071067811865476 + if (Dir == FFT_FORWARD) { + tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, -SQRT2DIV2); + tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, -1); + tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, -SQRT2DIV2); + } else { + tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, SQRT2DIV2); + tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, 1); + tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, SQRT2DIV2); + } + data[0] = tmp_2[0] + tmp_2[4]; + data[1] = tmp_2[1] + tmp_2[5]; + data[2] = tmp_2[2] + tmp_2[6]; + data[3] = tmp_2[3] + tmp_2[7]; + data[4] = tmp_2[0] - tmp_2[4]; + data[5] = tmp_2[1] - tmp_2[5]; + data[6] = tmp_2[2] - tmp_2[6]; + data[7] = tmp_2[3] - tmp_2[7]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_1D_merge( + ComplexScalar* data, Index n, Index n_power_of_2) { + // Original code: + // RealScalar wtemp = std::sin(M_PI/n); + // RealScalar wpi = -std::sin(2 * M_PI/n); + const RealScalar wtemp = m_sin_PI_div_n_LUT[n_power_of_2]; + const RealScalar wpi = (Dir == FFT_FORWARD) + ? m_minus_sin_2_PI_div_n_LUT[n_power_of_2] + : -m_minus_sin_2_PI_div_n_LUT[n_power_of_2]; + + const ComplexScalar wp(wtemp, wpi); + const ComplexScalar wp_one = wp + ComplexScalar(1, 0); + const ComplexScalar wp_one_2 = wp_one * wp_one; + const ComplexScalar wp_one_3 = wp_one_2 * wp_one; + const ComplexScalar wp_one_4 = wp_one_3 * wp_one; + const Index n2 = n / 2; + ComplexScalar w(1.0, 0.0); + for (Index i = 0; i < n2; i += 4) { + ComplexScalar temp0(data[i + n2] * w); + ComplexScalar temp1(data[i + 1 + n2] * w * wp_one); + ComplexScalar temp2(data[i + 2 + n2] * w * wp_one_2); + ComplexScalar temp3(data[i + 3 + n2] * w * wp_one_3); + w = w * wp_one_4; + + data[i + n2] = data[i] - temp0; + data[i] += temp0; + + data[i + 1 + n2] = data[i + 1] - temp1; + data[i + 1] += temp1; + + data[i + 2 + n2] = data[i + 2] - temp2; + data[i + 2] += temp2; + + data[i + 3 + n2] = data[i + 3] - temp3; + data[i + 3] += temp3; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_1D_Butterfly( + ComplexScalar* data, Index n, Index n_power_of_2) { + eigen_assert(isPowerOfTwo(n)); + if (n > 8) { + compute_1D_Butterfly(data, n / 2, n_power_of_2 - 1); + compute_1D_Butterfly(data + n / 2, n / 2, n_power_of_2 - 1); + butterfly_1D_merge(data, n, n_power_of_2); + } else if (n == 8) { + butterfly_8(data); + } else if (n == 4) { + butterfly_4(data); + } else if (n == 2) { + butterfly_2(data); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getBaseOffsetFromIndex(Index index, Index omitted_dim) const { + Index result = 0; + + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > omitted_dim; --i) { + const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim]; + const Index idx = index / partial_m_stride; + index -= idx * partial_m_stride; + result += idx * m_strides[i]; + } + result += index; + } + else { + for (Index i = 0; i < omitted_dim; ++i) { + const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim]; + const Index idx = index / partial_m_stride; + index -= idx * partial_m_stride; + result += idx * m_strides[i]; + } + result += index; + } + // Value of index_coords[omitted_dim] is not determined to this step + return result; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getIndexFromOffset(Index base, Index omitted_dim, Index offset) const { + Index result = base + offset * m_strides[omitted_dim] ; + return result; + } + + protected: + Index m_size; + const FFT& m_fft; + Dimensions m_dimensions; + array m_strides; + TensorEvaluator m_impl; + CoeffReturnType* m_data; + const Device& m_device; + + // This will support a maximum FFT size of 2^32 for each dimension + // m_sin_PI_div_n_LUT[i] = (-2) * std::sin(M_PI / std::pow(2,i)) ^ 2; + const RealScalar m_sin_PI_div_n_LUT[32] = { + RealScalar(0.0), + RealScalar(-2), + RealScalar(-0.999999999999999), + RealScalar(-0.292893218813453), + RealScalar(-0.0761204674887130), + RealScalar(-0.0192147195967696), + RealScalar(-0.00481527332780311), + RealScalar(-0.00120454379482761), + RealScalar(-3.01181303795779e-04), + RealScalar(-7.52981608554592e-05), + RealScalar(-1.88247173988574e-05), + RealScalar(-4.70619042382852e-06), + RealScalar(-1.17654829809007e-06), + RealScalar(-2.94137117780840e-07), + RealScalar(-7.35342821488550e-08), + RealScalar(-1.83835707061916e-08), + RealScalar(-4.59589268710903e-09), + RealScalar(-1.14897317243732e-09), + RealScalar(-2.87243293150586e-10), + RealScalar( -7.18108232902250e-11), + RealScalar(-1.79527058227174e-11), + RealScalar(-4.48817645568941e-12), + RealScalar(-1.12204411392298e-12), + RealScalar(-2.80511028480785e-13), + RealScalar(-7.01277571201985e-14), + RealScalar(-1.75319392800498e-14), + RealScalar(-4.38298482001247e-15), + RealScalar(-1.09574620500312e-15), + RealScalar(-2.73936551250781e-16), + RealScalar(-6.84841378126949e-17), + RealScalar(-1.71210344531737e-17), + RealScalar(-4.28025861329343e-18) + }; + + // m_minus_sin_2_PI_div_n_LUT[i] = -std::sin(2 * M_PI / std::pow(2,i)); + const RealScalar m_minus_sin_2_PI_div_n_LUT[32] = { + RealScalar(0.0), + RealScalar(0.0), + RealScalar(-1.00000000000000e+00), + RealScalar(-7.07106781186547e-01), + RealScalar(-3.82683432365090e-01), + RealScalar(-1.95090322016128e-01), + RealScalar(-9.80171403295606e-02), + RealScalar(-4.90676743274180e-02), + RealScalar(-2.45412285229123e-02), + RealScalar(-1.22715382857199e-02), + RealScalar(-6.13588464915448e-03), + RealScalar(-3.06795676296598e-03), + RealScalar(-1.53398018628477e-03), + RealScalar(-7.66990318742704e-04), + RealScalar(-3.83495187571396e-04), + RealScalar(-1.91747597310703e-04), + RealScalar(-9.58737990959773e-05), + RealScalar(-4.79368996030669e-05), + RealScalar(-2.39684498084182e-05), + RealScalar(-1.19842249050697e-05), + RealScalar(-5.99211245264243e-06), + RealScalar(-2.99605622633466e-06), + RealScalar(-1.49802811316901e-06), + RealScalar(-7.49014056584716e-07), + RealScalar(-3.74507028292384e-07), + RealScalar(-1.87253514146195e-07), + RealScalar(-9.36267570730981e-08), + RealScalar(-4.68133785365491e-08), + RealScalar(-2.34066892682746e-08), + RealScalar(-1.17033446341373e-08), + RealScalar(-5.85167231706864e-09), + RealScalar(-2.92583615853432e-09) + }; +}; + +} // end namespace Eigen + +#endif // EIGEN_HAS_CONSTEXPR + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FFT_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h new file mode 100644 index 000000000..fcee5f60d --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h @@ -0,0 +1,389 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H +#define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H + +namespace Eigen { + +/** \class TensorFixedSize + * \ingroup CXX11_Tensor_Module + * + * \brief The fixed sized version of the tensor class. + * + * The fixed sized equivalent of + * Eigen::Tensor t(3, 5, 7); + * is + * Eigen::TensorFixedSize> t; + */ + +template +class TensorFixedSize : public TensorBase > +{ + public: + typedef TensorFixedSize Self; + typedef TensorBase > Base; + typedef typename Eigen::internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef Scalar_ Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + + static const int Options = Options_; + + enum { + IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0), + Layout = Options_ & RowMajor ? RowMajor : ColMajor, + CoordAccess = true, + RawAccess = true + }; + + typedef Dimensions_ Dimensions; + static const std::size_t NumIndices = Dimensions::count; + + protected: + TensorStorage m_storage; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } + + // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + // work, because that uses base().coeffRef() - and we don't yet + // implement a similar class hierarchy + inline Self& base() { return *this; } + inline const Self& base() const { return *this; } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeff(array{{firstIndex, otherIndices...}}); + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff(const array& indices) const + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(array{{firstIndex, otherIndices...}}); + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(const array& indices) + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return this->operator()(array{{firstIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const + { + if (Options&RowMajor) { + const Index index = i1 + i0 * m_storage.dimensions()[1]; + return m_storage.data()[index]; + } else { + const Index index = i0 + i1 * m_storage.dimensions()[0]; + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const + { + if (Options&RowMajor) { + const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const + { + if (Options&RowMajor) { + const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3)); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + if (Options&RowMajor) { + const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0))); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4))); + return m_storage.data()[index]; + } + } +#endif + + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(const array& indices) const + { + eigen_assert(checkIndexRange(indices)); + return coeff(indices); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return coeff(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const + { + // The bracket operator is only for vectors, use the parenthesis operator instead. + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(index); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return operator()(array{{firstIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1) + { + if (Options&RowMajor) { + const Index index = i1 + i0 * m_storage.dimensions()[1]; + return m_storage.data()[index]; + } else { + const Index index = i0 + i1 * m_storage.dimensions()[0]; + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2) + { + if (Options&RowMajor) { + const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) + { + if (Options&RowMajor) { + const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3)); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) + { + if (Options&RowMajor) { + const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0))); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4))); + return m_storage.data()[index]; + } + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(const array& indices) + { + eigen_assert(checkIndexRange(indices)); + return coeffRef(indices); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index index) + { + eigen_assert(index >= 0 && index < size()); + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeffRef(); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator[](Index index) + { + // The bracket operator is only for vectors, use the parenthesis operator instead + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize() + : m_storage() + { + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize(const Self& other) + : m_storage(other.m_storage) + { + } + +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other) + : m_storage(other.m_storage) + { + } +#endif + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize& operator=(const TensorFixedSize& other) + { + // FIXME: check that the dimensions of other match the dimensions of *this. + // Unfortunately this isn't possible yet when the rhs is an expression. + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize& operator=(const OtherDerived& other) + { + // FIXME: check that the dimensions of other match the dimensions of *this. + // Unfortunately this isn't possible yet when the rhs is an expression. + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + protected: + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE bool checkIndexRange(const array& /*indices*/) const + { + using internal::array_apply_and_reduce; + using internal::array_zip_and_reduce; + using internal::greater_equal_zero_op; + using internal::logical_and_op; + using internal::lesser_op; + + return true; + // check whether the indices are all >= 0 + /* array_apply_and_reduce(indices) && + // check whether the indices fit in the dimensions + array_zip_and_reduce(indices, m_storage.dimensions());*/ + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index linearizedIndex(const array& indices) const + { + if (Options&RowMajor) { + return m_storage.dimensions().IndexOfRowMajor(indices); + } else { + return m_storage.dimensions().IndexOfColMajor(indices); + } + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h new file mode 100644 index 000000000..8bece4e65 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h @@ -0,0 +1,169 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H +#define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H + +namespace Eigen { + +namespace internal { +template class MakePointer_> +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + + enum { + Flags = 0 + }; + template struct MakePointer { + // Intermediate typedef to workaround MSVC issue. + typedef MakePointer_ MakePointerT; + typedef typename MakePointerT::Type Type; + }; +}; + +template class MakePointer_> +struct eval, Eigen::Dense> +{ + typedef const TensorForcedEvalOp& type; +}; + +template class MakePointer_> +struct nested, 1, typename eval >::type> +{ + typedef TensorForcedEvalOp type; +}; + +} // end namespace internal + + + +// FIXME use proper doxygen documentation (e.g. \tparam MakePointer_) + +/** \class TensorForcedEvalOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reshaping class. + * + * + */ +/// `template class MakePointer_` is added to convert the host pointer to the device pointer. +/// It is added due to the fact that for our device compiler `T*` is not allowed. +/// If we wanted to use the same Evaluator functions we have to convert that type to our pointer `T`. +/// This is done through our `MakePointer_` class. By default the Type in the `MakePointer_` is `T*` . +/// Therefore, by adding the default value, we managed to convert the type and it does not break any +/// existing code as its default value is `T*`. +template class MakePointer_> +class TensorForcedEvalOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr) + : m_xpr(expr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; +}; + + +template class MakePointer_> +struct TensorEvaluator, Device> +{ + typedef TensorForcedEvalOp XprType; + typedef typename ArgType::Scalar Scalar; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = true, + PacketAccess = (PacketSize > 1), + Layout = TensorEvaluator::Layout, + RawAccess = true + }; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) + /// op_ is used for sycl + : m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL) + { } + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { + const Index numValues = internal::array_prod(m_impl.dimensions()); + m_buffer = (CoeffReturnType*)m_device.allocate(numValues * sizeof(CoeffReturnType)); + // Should initialize the memory in case we're dealing with non POD types. + if (NumTraits::RequireInitialization) { + for (Index i = 0; i < numValues; ++i) { + new(m_buffer+i) CoeffReturnType(); + } + } + typedef TensorEvalToOp< const typename internal::remove_const::type > EvalTo; + EvalTo evalToTmp(m_buffer, m_op); + const bool PacketAccess = internal::IsVectorizable::value; + internal::TensorExecutor::type, PacketAccess>::run(evalToTmp, m_device); + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_device.deallocate(m_buffer); + m_buffer = NULL; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_buffer[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return internal::ploadt(m_buffer + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC typename MakePointer::Type data() const { return m_buffer; } + + /// required by sycl in order to extract the sycl accessor + const TensorEvaluator& impl() { return m_impl; } + /// used by sycl in order to build the sycl buffer + const Device& device() const{return m_device;} + private: + TensorEvaluator m_impl; + const ArgType m_op; + const Device& m_device; + typename MakePointer::Type m_buffer; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h new file mode 100644 index 000000000..52b803d7f --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h @@ -0,0 +1,109 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H +#define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H + +namespace Eigen { + +// MakePointer class is used as a container of the adress space of the pointer +// on the host and on the device. From the host side it generates the T* pointer +// and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to +// T* m_data on the host. It is always called on the device. +// Specialisation of MakePointer class for creating the sycl buffer with +// map_allocator. +template struct MakePointer { + typedef T* Type; +}; + +template class MakePointer_ = MakePointer> class TensorMap; +template class Tensor; +template class TensorFixedSize; +template class TensorRef; +template class TensorBase; + +template class TensorCwiseNullaryOp; +template class TensorCwiseUnaryOp; +template class TensorCwiseBinaryOp; +template class TensorCwiseTernaryOp; +template class TensorSelectOp; +template class MakePointer_ = MakePointer > class TensorReductionOp; +template class TensorIndexTupleOp; +template class TensorTupleReducerOp; +template class TensorConcatenationOp; +template class TensorContractionOp; +template class TensorConversionOp; +template class TensorConvolutionOp; +template class TensorFFTOp; +template class TensorPatchOp; +template class TensorImagePatchOp; +template class TensorVolumePatchOp; +template class TensorBroadcastingOp; +template class TensorChippingOp; +template class TensorReshapingOp; +template class TensorLayoutSwapOp; +template class TensorSlicingOp; +template class TensorReverseOp; +template class TensorPaddingOp; +template class TensorShufflingOp; +template class TensorStridingOp; +template class TensorStridingSlicingOp; +template class TensorInflationOp; +template class TensorGeneratorOp; +template class TensorAssignOp; +template class TensorScanOp; + +template class TensorCustomUnaryOp; +template class TensorCustomBinaryOp; + +template class MakePointer_ = MakePointer> class TensorEvalToOp; +template class MakePointer_ = MakePointer> class TensorForcedEvalOp; + +template class TensorDevice; +template struct TensorEvaluator; + +struct DefaultDevice; +struct ThreadPoolDevice; +struct GpuDevice; +struct SyclDevice; + +enum FFTResultType { + RealPart = 0, + ImagPart = 1, + BothParts = 2 +}; + +enum FFTDirection { + FFT_FORWARD = 0, + FFT_REVERSE = 1 +}; + + +namespace internal { + +template +struct IsVectorizable { + static const bool value = TensorEvaluator::PacketAccess; +}; + +template +struct IsVectorizable { + static const bool value = TensorEvaluator::PacketAccess && + TensorEvaluator::IsAligned; +}; + +template ::value> +class TensorExecutor; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h new file mode 100644 index 000000000..d73f6dc68 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h @@ -0,0 +1,489 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H +#define EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H + +namespace Eigen { +namespace internal { + + +/** \internal + * \brief Template functor to compute the modulo between an array and a scalar. + */ +template +struct scalar_mod_op { + EIGEN_DEVICE_FUNC scalar_mod_op(const Scalar& divisor) : m_divisor(divisor) {} + EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a % m_divisor; } + const Scalar m_divisor; +}; +template +struct functor_traits > +{ enum { Cost = scalar_div_cost::value, PacketAccess = false }; }; + + +/** \internal + * \brief Template functor to compute the modulo between 2 arrays. + */ +template +struct scalar_mod2_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_mod2_op); + EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a, const Scalar& b) const { return a % b; } +}; +template +struct functor_traits > +{ enum { Cost = scalar_div_cost::value, PacketAccess = false }; }; + +template +struct scalar_fmod_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_fmod_op); + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar + operator()(const Scalar& a, const Scalar& b) const { + return numext::fmod(a, b); + } +}; +template +struct functor_traits > { + enum { Cost = 13, // Reciprocal throughput of FPREM on Haswell. + PacketAccess = false }; +}; + + +/** \internal + * \brief Template functor to compute the sigmoid of a scalar + * \sa class CwiseUnaryOp, ArrayBase::sigmoid() + */ +template +struct scalar_sigmoid_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sigmoid_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const { + const T one = T(1); + return one / (one + numext::exp(-x)); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Packet packetOp(const Packet& x) const { + const Packet one = pset1(T(1)); + return pdiv(one, padd(one, pexp(pnegate(x)))); + } +}; + +template +struct functor_traits > { + enum { + Cost = NumTraits::AddCost * 2 + NumTraits::MulCost * 6, + PacketAccess = packet_traits::HasAdd && packet_traits::HasDiv && + packet_traits::HasNegate && packet_traits::HasExp + }; +}; + + +template +struct reducer_traits { + enum { + Cost = 1, + PacketAccess = false + }; +}; + +// Standard reduction functors +template struct SumReducer +{ + static const bool PacketAccess = packet_traits::HasAdd; + static const bool IsStateful = false; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + internal::scalar_sum_op sum_op; + *accum = sum_op(*accum, t); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + (*accum) = padd(*accum, p); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + internal::scalar_cast_op conv; + return conv(0); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + internal::scalar_sum_op sum_op; + return sum_op(saccum, predux(vaccum)); + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasAdd + }; +}; + + +template struct MeanReducer +{ + static const bool PacketAccess = packet_traits::HasAdd && !NumTraits::IsInteger; + static const bool IsStateful = true; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + MeanReducer() : scalarCount_(0), packetCount_(0) { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) { + internal::scalar_sum_op sum_op; + *accum = sum_op(*accum, t); + scalarCount_++; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) { + (*accum) = padd(*accum, p); + packetCount_++; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + internal::scalar_cast_op conv; + return conv(0); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum / scalarCount_; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return pdiv(vaccum, pset1(packetCount_)); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + internal::scalar_sum_op sum_op; + return sum_op(saccum, predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits::size); + } + + protected: + DenseIndex scalarCount_; + DenseIndex packetCount_; +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasAdd + }; +}; + + +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return Eigen::NumTraits::lowest(); + } +}; +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return -Eigen::NumTraits::infinity(); + } +}; +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return Eigen::NumTraits::highest(); + } +}; +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return Eigen::NumTraits::infinity(); + } +}; + + +template struct MaxReducer +{ + static const bool PacketAccess = packet_traits::HasMax; + static const bool IsStateful = false; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + if (t > *accum) { *accum = t; } + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + (*accum) = pmax(*accum, p); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return MinMaxBottomValue::IsInteger>::bottom_value(); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + return numext::maxi(saccum, predux_max(vaccum)); + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasMax + }; +}; + + +template struct MinReducer +{ + static const bool PacketAccess = packet_traits::HasMin; + static const bool IsStateful = false; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + if (t < *accum) { *accum = t; } + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + (*accum) = pmin(*accum, p); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return MinMaxBottomValue::IsInteger>::bottom_value(); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + return numext::mini(saccum, predux_min(vaccum)); + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasMin + }; +}; + + +template struct ProdReducer +{ + static const bool PacketAccess = packet_traits::HasMul; + static const bool IsStateful = false; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + internal::scalar_product_op prod_op; + (*accum) = prod_op(*accum, t); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + (*accum) = pmul(*accum, p); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + internal::scalar_cast_op conv; + return conv(1); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + internal::scalar_product_op prod_op; + return prod_op(saccum, predux_mul(vaccum)); + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::MulCost, + PacketAccess = PacketType::HasMul + }; +}; + + +struct AndReducer +{ + static const bool PacketAccess = false; + static const bool IsStateful = false; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const { + *accum = *accum && t; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const { + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const { + return accum; + } +}; + +template +struct reducer_traits { + enum { + Cost = 1, + PacketAccess = false + }; +}; + + +struct OrReducer { + static const bool PacketAccess = false; + static const bool IsStateful = false; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const { + *accum = *accum || t; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const { + return false; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const { + return accum; + } +}; + +template +struct reducer_traits { + enum { + Cost = 1, + PacketAccess = false + }; +}; + + +// Argmin/Argmax reducers +template struct ArgMaxTupleReducer +{ + static const bool PacketAccess = false; + static const bool IsStateful = false; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + if (t.second > accum->second) { *accum = t; } + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return T(0, NumTraits::lowest()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const { + return accum; + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + + +template struct ArgMinTupleReducer +{ + static const bool PacketAccess = false; + static const bool IsStateful = false; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T& t, T* accum) const { + if (t.second < accum->second) { *accum = t; } + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return T(0, NumTraits::highest()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const { + return accum; + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + + +template +class GaussianGenerator { + public: + static const bool PacketAccess = false; + + EIGEN_DEVICE_FUNC GaussianGenerator(const array& means, + const array& std_devs) + : m_means(means) + { + for (size_t i = 0; i < NumDims; ++i) { + m_two_sigmas[i] = std_devs[i] * std_devs[i] * 2; + } + } + + EIGEN_DEVICE_FUNC T operator()(const array& coordinates) const { + T tmp = T(0); + for (size_t i = 0; i < NumDims; ++i) { + T offset = coordinates[i] - m_means[i]; + tmp += offset * offset / m_two_sigmas[i]; + } + return numext::exp(-tmp); + } + + private: + array m_means; + array m_two_sigmas; +}; + +template +struct functor_traits > { + enum { + Cost = NumDims * (2 * NumTraits::AddCost + NumTraits::MulCost + + functor_traits >::Cost) + + functor_traits >::Cost, + PacketAccess = GaussianGenerator::PacketAccess + }; +}; + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h new file mode 100644 index 000000000..e27753b19 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h @@ -0,0 +1,185 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H + +namespace Eigen { + +/** \class TensorGeneratorOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor generator class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorGeneratorOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorGeneratorOp type; +}; + +} // end namespace internal + + + +template +class TensorGeneratorOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorGeneratorOp(const XprType& expr, const Generator& generator) + : m_xpr(expr), m_generator(generator) {} + + EIGEN_DEVICE_FUNC + const Generator& generator() const { return m_generator; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Generator m_generator; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorGeneratorOp XprType; + typedef typename XprType::Index Index; + typedef typename TensorEvaluator::Dimensions Dimensions; + static const int NumDims = internal::array_size::value; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + enum { + IsAligned = false, + PacketAccess = (internal::unpacket_traits::size > 1), + BlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_generator(op.generator()) + { + TensorEvaluator impl(op.expression(), device); + m_dimensions = impl.dimensions(); + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1]; + } + } else { + m_strides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + array coords; + extract_coordinates(index, coords); + return m_generator(coords); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + const int packetSize = internal::unpacket_traits::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+packetSize-1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[packetSize]; + for (int i = 0; i < packetSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool) const { + // TODO(rmlarsen): This is just a placeholder. Define interface to make + // generators return their cost. + return TensorOpCost(0, 0, TensorOpCost::AddCost() + + TensorOpCost::MulCost()); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void extract_coordinates(Index index, array& coords) const { + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_strides[i]; + index -= idx * m_strides[i]; + coords[i] = idx; + } + coords[0] = index; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_strides[i]; + index -= idx * m_strides[i]; + coords[i] = idx; + } + coords[NumDims-1] = index; + } + } + + Dimensions m_dimensions; + array m_strides; + Generator m_generator; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h new file mode 100644 index 000000000..665b861cf --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h @@ -0,0 +1,33 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Eugene Brevdo +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H +#define EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H + +namespace Eigen { + +/** \cpp11 \returns an expression of the coefficient-wise betainc(\a x, \a a, \a b) to the given tensors. + * + * This function computes the regularized incomplete beta function (integral). + * + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const + TensorCwiseTernaryOp, + const ADerived, const BDerived, const XDerived> + betainc(const ADerived& a, const BDerived& b, const XDerived& x) { + return TensorCwiseTernaryOp< + internal::scalar_betainc_op, const ADerived, + const BDerived, const XDerived>( + a, b, x, internal::scalar_betainc_op()); +} + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h new file mode 100644 index 000000000..a901c5dd4 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h @@ -0,0 +1,79 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_IO_H +#define EIGEN_CXX11_TENSOR_TENSOR_IO_H + +namespace Eigen { + +namespace internal { + +// Print the tensor as a 2d matrix +template +struct TensorPrinter { + static void run (std::ostream& os, const Tensor& tensor) { + typedef typename internal::remove_const::type Scalar; + typedef typename Tensor::Index Index; + const Index total_size = internal::array_prod(tensor.dimensions()); + if (total_size > 0) { + const Index first_dim = Eigen::internal::array_get<0>(tensor.dimensions()); + static const int layout = Tensor::Layout; + Map > matrix(const_cast(tensor.data()), first_dim, total_size/first_dim); + os << matrix; + } + } +}; + + +// Print the tensor as a vector +template +struct TensorPrinter { + static void run (std::ostream& os, const Tensor& tensor) { + typedef typename internal::remove_const::type Scalar; + typedef typename Tensor::Index Index; + const Index total_size = internal::array_prod(tensor.dimensions()); + if (total_size > 0) { + Map > array(const_cast(tensor.data()), total_size); + os << array; + } + } +}; + + +// Print the tensor as a scalar +template +struct TensorPrinter { + static void run (std::ostream& os, const Tensor& tensor) { + os << tensor.coeff(0); + } +}; +} + +template +std::ostream& operator << (std::ostream& os, const TensorBase& expr) { + typedef TensorEvaluator, DefaultDevice> Evaluator; + typedef typename Evaluator::Dimensions Dimensions; + + // Evaluate the expression if needed + TensorForcedEvalOp eval = expr.eval(); + Evaluator tensor(eval, DefaultDevice()); + tensor.evalSubExprsIfNeeded(NULL); + + // Print the result + static const int rank = internal::array_size::value; + internal::TensorPrinter::run(os, tensor); + + // Cleanup. + tensor.cleanup(); + return os; +} + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_IO_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h new file mode 100644 index 000000000..566856ed2 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h @@ -0,0 +1,509 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H +#define EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H + +namespace Eigen { + +/** \class TensorImagePatch + * \ingroup CXX11_Tensor_Module + * + * \brief Patch extraction specialized for image processing. + * This assumes that the input has a least 3 dimensions ordered as follow: + * 1st dimension: channels (of size d) + * 2nd dimension: rows (of size r) + * 3rd dimension: columns (of size c) + * There can be additional dimensions such as time (for video) or batch (for + * bulk processing after the first 3. + * Calling the image patch code with patch_rows and patch_cols is equivalent + * to calling the regular patch extraction code with parameters d, patch_rows, + * patch_cols, and 1 for all the additional dimensions. + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename internal::remove_const::type Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions + 1; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorImagePatchOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorImagePatchOp type; +}; + +} // end namespace internal + +template +class TensorImagePatchOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols, + DenseIndex row_strides, DenseIndex col_strides, + DenseIndex in_row_strides, DenseIndex in_col_strides, + DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, + PaddingType padding_type, Scalar padding_value) + : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols), + m_row_strides(row_strides), m_col_strides(col_strides), + m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides), + m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides), + m_padding_explicit(false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0), + m_padding_type(padding_type), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols, + DenseIndex row_strides, DenseIndex col_strides, + DenseIndex in_row_strides, DenseIndex in_col_strides, + DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, + DenseIndex padding_top, DenseIndex padding_bottom, + DenseIndex padding_left, DenseIndex padding_right, + Scalar padding_value) + : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols), + m_row_strides(row_strides), m_col_strides(col_strides), + m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides), + m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides), + m_padding_explicit(true), m_padding_top(padding_top), m_padding_bottom(padding_bottom), + m_padding_left(padding_left), m_padding_right(padding_right), + m_padding_type(PADDING_VALID), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC + DenseIndex patch_rows() const { return m_patch_rows; } + EIGEN_DEVICE_FUNC + DenseIndex patch_cols() const { return m_patch_cols; } + EIGEN_DEVICE_FUNC + DenseIndex row_strides() const { return m_row_strides; } + EIGEN_DEVICE_FUNC + DenseIndex col_strides() const { return m_col_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_row_strides() const { return m_in_row_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_col_strides() const { return m_in_col_strides; } + EIGEN_DEVICE_FUNC + DenseIndex row_inflate_strides() const { return m_row_inflate_strides; } + EIGEN_DEVICE_FUNC + DenseIndex col_inflate_strides() const { return m_col_inflate_strides; } + EIGEN_DEVICE_FUNC + bool padding_explicit() const { return m_padding_explicit; } + EIGEN_DEVICE_FUNC + DenseIndex padding_top() const { return m_padding_top; } + EIGEN_DEVICE_FUNC + DenseIndex padding_bottom() const { return m_padding_bottom; } + EIGEN_DEVICE_FUNC + DenseIndex padding_left() const { return m_padding_left; } + EIGEN_DEVICE_FUNC + DenseIndex padding_right() const { return m_padding_right; } + EIGEN_DEVICE_FUNC + PaddingType padding_type() const { return m_padding_type; } + EIGEN_DEVICE_FUNC + Scalar padding_value() const { return m_padding_value; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const DenseIndex m_patch_rows; + const DenseIndex m_patch_cols; + const DenseIndex m_row_strides; + const DenseIndex m_col_strides; + const DenseIndex m_in_row_strides; + const DenseIndex m_in_col_strides; + const DenseIndex m_row_inflate_strides; + const DenseIndex m_col_inflate_strides; + const bool m_padding_explicit; + const DenseIndex m_padding_top; + const DenseIndex m_padding_bottom; + const DenseIndex m_padding_left; + const DenseIndex m_padding_right; + const PaddingType m_padding_type; + const Scalar m_padding_value; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorImagePatchOp XprType; + typedef typename XprType::Index Index; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims + 1; + typedef DSizes Dimensions; + typedef typename internal::remove_const::type Scalar; + typedef TensorEvaluator, + Device> Self; + typedef TensorEvaluator Impl; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + EIGEN_STATIC_ASSERT((NumDims >= 4), YOU_MADE_A_PROGRAMMING_MISTAKE); + + m_paddingValue = op.padding_value(); + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + + // Caches a few variables. + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputDepth = input_dims[0]; + m_inputRows = input_dims[1]; + m_inputCols = input_dims[2]; + } else { + m_inputDepth = input_dims[NumInputDims-1]; + m_inputRows = input_dims[NumInputDims-2]; + m_inputCols = input_dims[NumInputDims-3]; + } + + m_row_strides = op.row_strides(); + m_col_strides = op.col_strides(); + + // Input strides and effective input/patch size + m_in_row_strides = op.in_row_strides(); + m_in_col_strides = op.in_col_strides(); + m_row_inflate_strides = op.row_inflate_strides(); + m_col_inflate_strides = op.col_inflate_strides(); + // The "effective" input rows and input cols are the input rows and cols + // after inflating them with zeros. + // For examples, a 2x3 matrix with row_inflate_strides and + // col_inflate_strides of 2 comes from: + // A B C + // D E F + // + // to a matrix is 3 x 5: + // + // A . B . C + // . . . . . + // D . E . F + + m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1; + m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1; + m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1); + m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1); + + if (op.padding_explicit()) { + m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast(m_row_strides)); + m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast(m_col_strides)); + m_rowPaddingTop = op.padding_top(); + m_colPaddingLeft = op.padding_left(); + } else { + // Computing padding from the type + switch (op.padding_type()) { + case PADDING_VALID: + m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast(m_row_strides)); + m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast(m_col_strides)); + // Calculate the padding + m_rowPaddingTop = numext::maxi(0, ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2); + m_colPaddingLeft = numext::maxi(0, ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2); + break; + case PADDING_SAME: + m_outputRows = numext::ceil(m_input_rows_eff / static_cast(m_row_strides)); + m_outputCols = numext::ceil(m_input_cols_eff / static_cast(m_col_strides)); + // Calculate the padding + m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2; + m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2; + break; + default: + eigen_assert(false && "unexpected padding"); + } + } + eigen_assert(m_outputRows > 0); + eigen_assert(m_outputCols > 0); + + // Dimensions for result of extraction. + if (static_cast(Layout) == static_cast(ColMajor)) { + // ColMajor + // 0: depth + // 1: patch_rows + // 2: patch_cols + // 3: number of patches + // 4 and beyond: anything else (such as batch). + m_dimensions[0] = input_dims[0]; + m_dimensions[1] = op.patch_rows(); + m_dimensions[2] = op.patch_cols(); + m_dimensions[3] = m_outputRows * m_outputCols; + for (int i = 4; i < NumDims; ++i) { + m_dimensions[i] = input_dims[i-1]; + } + } else { + // RowMajor + // NumDims-1: depth + // NumDims-2: patch_rows + // NumDims-3: patch_cols + // NumDims-4: number of patches + // NumDims-5 and beyond: anything else (such as batch). + m_dimensions[NumDims-1] = input_dims[NumInputDims-1]; + m_dimensions[NumDims-2] = op.patch_rows(); + m_dimensions[NumDims-3] = op.patch_cols(); + m_dimensions[NumDims-4] = m_outputRows * m_outputCols; + for (int i = NumDims-5; i >= 0; --i) { + m_dimensions[i] = input_dims[i]; + } + } + + // Strides for moving the patch in various dimensions. + if (static_cast(Layout) == static_cast(ColMajor)) { + m_colStride = m_dimensions[1]; + m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0]; + m_otherStride = m_patchStride * m_dimensions[3]; + } else { + m_colStride = m_dimensions[NumDims-2]; + m_patchStride = m_colStride * m_dimensions[NumDims-3] * m_dimensions[NumDims-1]; + m_otherStride = m_patchStride * m_dimensions[NumDims-4]; + } + + // Strides for navigating through the input tensor. + m_rowInputStride = m_inputDepth; + m_colInputStride = m_inputDepth * m_inputRows; + m_patchInputStride = m_inputDepth * m_inputRows * m_inputCols; + + // Fast representations of different variables. + m_fastOtherStride = internal::TensorIntDivisor(m_otherStride); + m_fastPatchStride = internal::TensorIntDivisor(m_patchStride); + m_fastColStride = internal::TensorIntDivisor(m_colStride); + m_fastInflateRowStride = internal::TensorIntDivisor(m_row_inflate_strides); + m_fastInflateColStride = internal::TensorIntDivisor(m_col_inflate_strides); + m_fastInputColsEff = internal::TensorIntDivisor(m_input_cols_eff); + + // Number of patches in the width dimension. + m_fastOutputRows = internal::TensorIntDivisor(m_outputRows); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_fastOutputDepth = internal::TensorIntDivisor(m_dimensions[0]); + } else { + m_fastOutputDepth = internal::TensorIntDivisor(m_dimensions[NumDims-1]); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + // Patch index corresponding to the passed in index. + const Index patchIndex = index / m_fastPatchStride; + // Find the offset of the element wrt the location of the first element. + const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth; + + // Other ways to index this element. + const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride; + const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride; + + // Calculate col index in the input original tensor. + const Index colIndex = patch2DIndex / m_fastOutputRows; + const Index colOffset = patchOffset / m_fastColStride; + const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft; + const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInflateColStride) : 0); + if (inputCol < 0 || inputCol >= m_input_cols_eff || + ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) { + return Scalar(m_paddingValue); + } + + // Calculate row index in the original input tensor. + const Index rowIndex = patch2DIndex - colIndex * m_outputRows; + const Index rowOffset = patchOffset - colOffset * m_colStride; + const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop; + const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInflateRowStride) : 0); + if (inputRow < 0 || inputRow >= m_input_rows_eff || + ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) { + return Scalar(m_paddingValue); + } + + const int depth_index = static_cast(Layout) == static_cast(ColMajor) ? 0 : NumDims - 1; + const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index]; + + const Index inputIndex = depth + origInputRow * m_rowInputStride + origInputCol * m_colInputStride + otherIndex * m_patchInputStride; + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1) { + return packetWithPossibleZero(index); + } + + const Index indices[2] = {index, index + PacketSize - 1}; + const Index patchIndex = indices[0] / m_fastPatchStride; + if (patchIndex != indices[1] / m_fastPatchStride) { + return packetWithPossibleZero(index); + } + const Index otherIndex = (NumDims == 4) ? 0 : indices[0] / m_fastOtherStride; + eigen_assert(otherIndex == indices[1] / m_fastOtherStride); + + // Find the offset of the element wrt the location of the first element. + const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth, + (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth}; + + const Index patch2DIndex = (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride; + eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride); + + const Index colIndex = patch2DIndex / m_fastOutputRows; + const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride}; + + // Calculate col indices in the original input tensor. + const Index inputCols[2] = {colIndex * m_col_strides + colOffsets[0] - + m_colPaddingLeft, colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft}; + if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputCols[0] == inputCols[1]) { + const Index rowIndex = patch2DIndex - colIndex * m_outputRows; + const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0]*m_colStride, patchOffsets[1] - colOffsets[1]*m_colStride}; + eigen_assert(rowOffsets[0] <= rowOffsets[1]); + // Calculate col indices in the original input tensor. + const Index inputRows[2] = {rowIndex * m_row_strides + rowOffsets[0] - + m_rowPaddingTop, rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop}; + + if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) { + // no padding + const int depth_index = static_cast(Layout) == static_cast(ColMajor) ? 0 : NumDims - 1; + const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index]; + const Index inputIndex = depth + inputRows[0] * m_rowInputStride + inputCols[0] * m_colInputStride + otherIndex * m_patchInputStride; + return m_impl.template packet(inputIndex); + } + } + + return packetWithPossibleZero(index); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + const TensorEvaluator& impl() const { return m_impl; } + + Index rowPaddingTop() const { return m_rowPaddingTop; } + Index colPaddingLeft() const { return m_colPaddingLeft; } + Index outputRows() const { return m_outputRows; } + Index outputCols() const { return m_outputCols; } + Index userRowStride() const { return m_row_strides; } + Index userColStride() const { return m_col_strides; } + Index userInRowStride() const { return m_in_row_strides; } + Index userInColStride() const { return m_in_col_strides; } + Index rowInflateStride() const { return m_row_inflate_strides; } + Index colInflateStride() const { return m_col_inflate_strides; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + // We conservatively estimate the cost for the code path where the computed + // index is inside the original image and + // TensorEvaluator::CoordAccess is false. + const double compute_cost = 3 * TensorOpCost::DivCost() + + 6 * TensorOpCost::MulCost() + + 8 * TensorOpCost::MulCost(); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const + { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + Dimensions m_dimensions; + + Index m_otherStride; + Index m_patchStride; + Index m_colStride; + Index m_row_strides; + Index m_col_strides; + + Index m_in_row_strides; + Index m_in_col_strides; + Index m_row_inflate_strides; + Index m_col_inflate_strides; + + Index m_input_rows_eff; + Index m_input_cols_eff; + Index m_patch_rows_eff; + Index m_patch_cols_eff; + + internal::TensorIntDivisor m_fastOtherStride; + internal::TensorIntDivisor m_fastPatchStride; + internal::TensorIntDivisor m_fastColStride; + internal::TensorIntDivisor m_fastInflateRowStride; + internal::TensorIntDivisor m_fastInflateColStride; + internal::TensorIntDivisor m_fastInputColsEff; + + Index m_rowInputStride; + Index m_colInputStride; + Index m_patchInputStride; + + Index m_inputDepth; + Index m_inputRows; + Index m_inputCols; + + Index m_outputRows; + Index m_outputCols; + + Index m_rowPaddingTop; + Index m_colPaddingLeft; + + internal::TensorIntDivisor m_fastOutputRows; + internal::TensorIntDivisor m_fastOutputDepth; + + Scalar m_paddingValue; + + TensorEvaluator m_impl; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h new file mode 100644 index 000000000..3209fecd3 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h @@ -0,0 +1,725 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H +#define EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H + + +#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES + +#define EIGEN_HAS_INDEX_LIST + +namespace Eigen { + +/** \internal + * + * \class TensorIndexList + * \ingroup CXX11_Tensor_Module + * + * \brief Set of classes used to encode a set of Tensor dimensions/indices. + * + * The indices in the list can be known at compile time or at runtime. A mix + * of static and dynamic indices can also be provided if needed. The tensor + * code will attempt to take advantage of the indices that are known at + * compile time to optimize the code it generates. + * + * This functionality requires a c++11 compliant compiler. If your compiler + * is older you need to use arrays of indices instead. + * + * Several examples are provided in the cxx11_tensor_index_list.cpp file. + * + * \sa Tensor + */ + +template +struct type2index { + static const DenseIndex value = n; + EIGEN_DEVICE_FUNC constexpr operator DenseIndex() const { return n; } + EIGEN_DEVICE_FUNC void set(DenseIndex val) { + eigen_assert(val == n); + } +}; + +// This can be used with IndexPairList to get compile-time constant pairs, +// such as IndexPairList, type2indexpair<3,4>>(). +template +struct type2indexpair { + static const DenseIndex first = f; + static const DenseIndex second = s; + + constexpr EIGEN_DEVICE_FUNC operator IndexPair() const { + return IndexPair(f, s); + } + + EIGEN_DEVICE_FUNC void set(const IndexPair& val) { + eigen_assert(val.first == f); + eigen_assert(val.second == s); + } +}; + + +template struct NumTraits > +{ + typedef DenseIndex Real; + enum { + IsComplex = 0, + RequireInitialization = false, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; + + EIGEN_DEVICE_FUNC static inline Real epsilon() { return 0; } + EIGEN_DEVICE_FUNC static inline Real dummy_precision() { return 0; } + EIGEN_DEVICE_FUNC static inline Real highest() { return n; } + EIGEN_DEVICE_FUNC static inline Real lowest() { return n; } +}; + +namespace internal { +template +EIGEN_DEVICE_FUNC void update_value(T& val, DenseIndex new_val) { + val = new_val; +} +template +EIGEN_DEVICE_FUNC void update_value(type2index& val, DenseIndex new_val) { + val.set(new_val); +} + +template +EIGEN_DEVICE_FUNC void update_value(T& val, IndexPair new_val) { + val = new_val; +} +template +EIGEN_DEVICE_FUNC void update_value(type2indexpair& val, IndexPair new_val) { + val.set(new_val); +} + + +template +struct is_compile_time_constant { + static constexpr bool value = false; +}; + +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; + +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; + + +template +struct IndexTuple; + +template +struct IndexTuple { + EIGEN_DEVICE_FUNC constexpr IndexTuple() : head(), others() { } + EIGEN_DEVICE_FUNC constexpr IndexTuple(const T& v, const O... o) : head(v), others(o...) { } + + constexpr static int count = 1 + sizeof...(O); + T head; + IndexTuple others; + typedef T Head; + typedef IndexTuple Other; +}; + +template + struct IndexTuple { + EIGEN_DEVICE_FUNC constexpr IndexTuple() : head() { } + EIGEN_DEVICE_FUNC constexpr IndexTuple(const T& v) : head(v) { } + + constexpr static int count = 1; + T head; + typedef T Head; +}; + + +template +struct IndexTupleExtractor; + +template +struct IndexTupleExtractor { + + typedef typename IndexTupleExtractor::ValType ValType; + + EIGEN_DEVICE_FUNC static constexpr ValType& get_val(IndexTuple& val) { + return IndexTupleExtractor::get_val(val.others); + } + + EIGEN_DEVICE_FUNC static constexpr const ValType& get_val(const IndexTuple& val) { + return IndexTupleExtractor::get_val(val.others); + } + template + EIGEN_DEVICE_FUNC static void set_val(IndexTuple& val, V& new_val) { + IndexTupleExtractor::set_val(val.others, new_val); + } + +}; + +template + struct IndexTupleExtractor<0, T, O...> { + + typedef T ValType; + + EIGEN_DEVICE_FUNC static constexpr ValType& get_val(IndexTuple& val) { + return val.head; + } + EIGEN_DEVICE_FUNC static constexpr const ValType& get_val(const IndexTuple& val) { + return val.head; + } + template + EIGEN_DEVICE_FUNC static void set_val(IndexTuple& val, V& new_val) { + val.head = new_val; + } +}; + + + +template +EIGEN_DEVICE_FUNC constexpr typename IndexTupleExtractor::ValType& array_get(IndexTuple& tuple) { + return IndexTupleExtractor::get_val(tuple); +} +template +EIGEN_DEVICE_FUNC constexpr const typename IndexTupleExtractor::ValType& array_get(const IndexTuple& tuple) { + return IndexTupleExtractor::get_val(tuple); +} +template + struct array_size > { + static const size_t value = IndexTuple::count; +}; +template + struct array_size > { + static const size_t value = IndexTuple::count; +}; + + + + +template +struct tuple_coeff { + template + EIGEN_DEVICE_FUNC static constexpr ValueT get(const DenseIndex i, const IndexTuple& t) { + // return array_get(t) * (i == Idx) + tuple_coeff::get(i, t) * (i != Idx); + return (i == Idx ? array_get(t) : tuple_coeff::get(i, t)); + } + template + EIGEN_DEVICE_FUNC static void set(const DenseIndex i, IndexTuple& t, const ValueT& value) { + if (i == Idx) { + update_value(array_get(t), value); + } else { + tuple_coeff::set(i, t, value); + } + } + + template + EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const DenseIndex i, const IndexTuple& t) { + return ((i == Idx) & is_compile_time_constant::ValType>::value) || + tuple_coeff::value_known_statically(i, t); + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_known_statically(const IndexTuple& t) { + return is_compile_time_constant::ValType>::value && + tuple_coeff::values_up_to_known_statically(t); + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_statically_known_to_increase(const IndexTuple& t) { + return is_compile_time_constant::ValType>::value && + is_compile_time_constant::ValType>::value && + array_get(t) > array_get(t) && + tuple_coeff::values_up_to_statically_known_to_increase(t); + } +}; + +template +struct tuple_coeff<0, ValueT> { + template + EIGEN_DEVICE_FUNC static constexpr ValueT get(const DenseIndex /*i*/, const IndexTuple& t) { + // eigen_assert (i == 0); // gcc fails to compile assertions in constexpr + return array_get<0>(t)/* * (i == 0)*/; + } + template + EIGEN_DEVICE_FUNC static void set(const DenseIndex i, IndexTuple& t, const ValueT value) { + eigen_assert (i == 0); + update_value(array_get<0>(t), value); + } + template + EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const DenseIndex i, const IndexTuple&) { + return is_compile_time_constant::ValType>::value & (i == 0); + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_known_statically(const IndexTuple&) { + return is_compile_time_constant::ValType>::value; + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_statically_known_to_increase(const IndexTuple&) { + return true; + } +}; +} // namespace internal + + + +template +struct IndexList : internal::IndexTuple { + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr DenseIndex operator[] (const DenseIndex i) const { + return internal::tuple_coeff >::value-1, DenseIndex>::get(i, *this); + } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr DenseIndex get(const DenseIndex i) const { + return internal::tuple_coeff >::value-1, DenseIndex>::get(i, *this); + } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const DenseIndex i, const DenseIndex value) { + return internal::tuple_coeff >::value-1, DenseIndex>::set(i, *this, value); + } + + EIGEN_DEVICE_FUNC constexpr IndexList(const internal::IndexTuple& other) : internal::IndexTuple(other) { } + EIGEN_DEVICE_FUNC constexpr IndexList(FirstType& first, OtherTypes... other) : internal::IndexTuple(first, other...) { } + EIGEN_DEVICE_FUNC constexpr IndexList() : internal::IndexTuple() { } + + EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const DenseIndex i) const { + return internal::tuple_coeff >::value-1, DenseIndex>::value_known_statically(i, *this); + } + EIGEN_DEVICE_FUNC constexpr bool all_values_known_statically() const { + return internal::tuple_coeff >::value-1, DenseIndex>::values_up_to_known_statically(*this); + } + + EIGEN_DEVICE_FUNC constexpr bool values_statically_known_to_increase() const { + return internal::tuple_coeff >::value-1, DenseIndex>::values_up_to_statically_known_to_increase(*this); + } +}; + + +template +constexpr IndexList make_index_list(FirstType val1, OtherTypes... other_vals) { + return IndexList(val1, other_vals...); +} + + +template +struct IndexPairList : internal::IndexTuple { + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr IndexPair operator[] (const DenseIndex i) const { + return internal::tuple_coeff >::value-1, IndexPair>::get(i, *this); + } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const DenseIndex i, const IndexPair value) { + return internal::tuple_coeff>::value-1, IndexPair >::set(i, *this, value); + } + + EIGEN_DEVICE_FUNC constexpr IndexPairList(const internal::IndexTuple& other) : internal::IndexTuple(other) { } + EIGEN_DEVICE_FUNC constexpr IndexPairList() : internal::IndexTuple() { } + + EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const DenseIndex i) const { + return internal::tuple_coeff >::value-1, DenseIndex>::value_known_statically(i, *this); + } +}; + +namespace internal { + +template size_t array_prod(const IndexList& sizes) { + size_t result = 1; + for (int i = 0; i < array_size >::value; ++i) { + result *= sizes[i]; + } + return result; +} + +template struct array_size > { + static const size_t value = array_size >::value; +}; +template struct array_size > { + static const size_t value = array_size >::value; +}; + +template struct array_size > { + static const size_t value = std::tuple_size >::value; +}; +template struct array_size > { + static const size_t value = std::tuple_size >::value; +}; + +template EIGEN_DEVICE_FUNC constexpr DenseIndex array_get(IndexList& a) { + return IndexTupleExtractor::get_val(a); +} +template EIGEN_DEVICE_FUNC constexpr DenseIndex array_get(const IndexList& a) { + return IndexTupleExtractor::get_val(a); +} + +template +struct index_known_statically_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) { + return false; + } +}; + +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i) { + return IndexList().value_known_statically(i); + } +}; + +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i) { + return IndexList().value_known_statically(i); + } +}; + + +template +struct all_indices_known_statically_impl { + static constexpr bool run() { + return false; + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return IndexList().all_values_known_statically(); + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return IndexList().all_values_known_statically(); + } +}; + + +template +struct indices_statically_known_to_increase_impl { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return false; + } +}; + +template + struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return Eigen::IndexList().values_statically_known_to_increase(); + } +}; + +template + struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return Eigen::IndexList().values_statically_known_to_increase(); + } +}; + + +template +struct index_statically_eq_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) == value); + } +}; + +template +struct index_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) == value); + } +}; + + +template +struct index_statically_ne_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_statically_ne_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) != value); + } +}; + +template +struct index_statically_ne_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) != value); + } +}; + + +template +struct index_statically_gt_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) > value); + } +}; + +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) > value); + } +}; + + + +template +struct index_statically_lt_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) < value); + } +}; + +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) < value); + } +}; + + + +template +struct index_pair_first_statically_eq_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_pair_first_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexPairList().value_known_statically(i) & + (IndexPairList().operator[](i).first == value); + } +}; + +template +struct index_pair_first_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexPairList().value_known_statically(i) & + (IndexPairList().operator[](i).first == value); + } +}; + + + +template +struct index_pair_second_statically_eq_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_pair_second_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexPairList().value_known_statically(i) & + (IndexPairList().operator[](i).second == value); + } +}; + +template +struct index_pair_second_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return IndexPairList().value_known_statically(i) & + (IndexPairList().operator[](i).second == value); + } +}; + + +} // end namespace internal +} // end namespace Eigen + +#else + +namespace Eigen { +namespace internal { + +template +struct index_known_statically_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex) { + return false; + } +}; + +template +struct all_indices_known_statically_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return false; + } +}; + +template +struct indices_statically_known_to_increase_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return false; + } +}; + +template +struct index_statically_eq_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_statically_ne_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_statically_gt_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_statically_lt_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_pair_first_statically_eq_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) { + return false; + } +}; + +template +struct index_pair_second_statically_eq_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) { + return false; + } +}; + + + +} // end namespace internal +} // end namespace Eigen + +#endif + + +namespace Eigen { +namespace internal { +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_known_statically(DenseIndex i) { + return index_known_statically_impl::run(i); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool all_indices_known_statically() { + return all_indices_known_statically_impl::run(); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool indices_statically_known_to_increase() { + return indices_statically_known_to_increase_impl::run(); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_eq(DenseIndex i, DenseIndex value) { + return index_statically_eq_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_ne(DenseIndex i, DenseIndex value) { + return index_statically_ne_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_gt(DenseIndex i, DenseIndex value) { + return index_statically_gt_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_lt(DenseIndex i, DenseIndex value) { + return index_statically_lt_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_first_statically_eq(DenseIndex i, DenseIndex value) { + return index_pair_first_statically_eq_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_second_statically_eq(DenseIndex i, DenseIndex value) { + return index_pair_second_statically_eq_impl::run(i, value); +} + +} // end namespace internal +} // end namespace Eigen + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h new file mode 100644 index 000000000..f391fb9ee --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h @@ -0,0 +1,229 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Ke Yang +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H +#define EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H + +namespace Eigen { + +/** \class TensorInflation + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor inflation class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorInflationOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorInflationOp type; +}; + +} // end namespace internal + +template +class TensorInflationOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorInflationOp(const XprType& expr, const Strides& strides) + : m_xpr(expr), m_strides(strides) {} + + EIGEN_DEVICE_FUNC + const Strides& strides() const { return m_strides; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Strides m_strides; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorInflationOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/ false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_strides(op.strides()) + { + m_dimensions = m_impl.dimensions(); + // Expand each dimension to the inflated dimension. + for (int i = 0; i < NumDims; ++i) { + m_dimensions[i] = (m_dimensions[i] - 1) * op.strides()[i] + 1; + } + + // Remember the strides for fast division. + for (int i = 0; i < NumDims; ++i) { + m_fastStrides[i] = internal::TensorIntDivisor(m_strides[i]); + } + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_outputStrides[0] = 1; + m_inputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + } + } else { // RowMajor + m_outputStrides[NumDims-1] = 1; + m_inputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + // Computes the input index given the output index. Returns true if the output + // index doesn't fall into a hole. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool getInputIndex(Index index, Index* inputIndex) const + { + eigen_assert(index < dimensions().TotalSize()); + *inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (idx != idx / m_fastStrides[i] * m_strides[i]) { + return false; + } + *inputIndex += idx / m_strides[i] * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (index != index / m_fastStrides[0] * m_strides[0]) { + return false; + } + *inputIndex += index / m_strides[0]; + return true; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + if (idx != idx / m_fastStrides[i] * m_strides[i]) { + return false; + } + *inputIndex += idx / m_strides[i] * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (index != index / m_fastStrides[NumDims-1] * m_strides[NumDims-1]) { + return false; + } + *inputIndex += index / m_strides[NumDims - 1]; + } + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + Index inputIndex = 0; + if (getInputIndex(index, &inputIndex)) { + return m_impl.coeff(inputIndex); + } else { + return Scalar(0); + } + } + + // TODO(yangke): optimize this function so that we can detect and produce + // all-zero packets + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double compute_cost = NumDims * (3 * TensorOpCost::DivCost() + + 3 * TensorOpCost::MulCost() + + 2 * TensorOpCost::AddCost()); + const double input_size = m_impl.dimensions().TotalSize(); + const double output_size = m_dimensions.TotalSize(); + if (output_size == 0) + return TensorOpCost(); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(sizeof(CoeffReturnType) * input_size / output_size, 0, + compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + protected: + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; + const Strides m_strides; + array, NumDims> m_fastStrides; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h new file mode 100644 index 000000000..33edc49e3 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h @@ -0,0 +1,82 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H +#define EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H + +#if EIGEN_HAS_VARIADIC_TEMPLATES + +#include + +namespace Eigen { + +/** \class TensorInitializer + * \ingroup CXX11_Tensor_Module + * + * \brief Helper template to initialize Tensors from std::initializer_lists. + */ +namespace internal { + +template +struct Initializer { + typedef std::initializer_list< + typename Initializer::InitList> InitList; + + static void run(TensorEvaluator& tensor, + Eigen::array::Index, traits::NumDimensions>* indices, + const InitList& vals) { + int i = 0; + for (auto v : vals) { + (*indices)[traits::NumDimensions - N] = i++; + Initializer::run(tensor, indices, v); + } + } +}; + +template +struct Initializer { + typedef std::initializer_list::Scalar> InitList; + + static void run(TensorEvaluator& tensor, + Eigen::array::Index, traits::NumDimensions>* indices, + const InitList& vals) { + int i = 0; + // There is likely a faster way to do that than iterating. + for (auto v : vals) { + (*indices)[traits::NumDimensions - 1] = i++; + tensor.coeffRef(*indices) = v; + } + } +}; + +template +struct Initializer { + typedef typename traits::Scalar InitList; + + static void run(TensorEvaluator& tensor, + Eigen::array::Index, traits::NumDimensions>*, + const InitList& v) { + tensor.coeffRef(0) = v; + } +}; + + +template +void initialize_tensor(TensorEvaluator& tensor, + const typename Initializer::NumDimensions>::InitList& vals) { + Eigen::array::Index, traits::NumDimensions> indices; + Initializer::NumDimensions>::run(tensor, &indices, vals); +} + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_HAS_VARIADIC_TEMPLATES + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h new file mode 100644 index 000000000..ede3939c2 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h @@ -0,0 +1,253 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H +#define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H + + +namespace Eigen { + +/** \internal + * + * \class TensorIntDiv + * \ingroup CXX11_Tensor_Module + * + * \brief Fast integer division by a constant. + * + * See the paper from Granlund and Montgomery for explanation. + * (at http://dx.doi.org/10.1145/773473.178249) + * + * \sa Tensor + */ + +namespace internal { + +namespace { + + // Note: result is undefined if val == 0 + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + typename internal::enable_if::type count_leading_zeros(const T val) + { +#ifdef __CUDA_ARCH__ + return __clz(val); +#elif EIGEN_COMP_MSVC + unsigned long index; + _BitScanReverse(&index, val); + return 31 - index; +#else + EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE); + return __builtin_clz(static_cast(val)); +#endif + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + typename internal::enable_if::type count_leading_zeros(const T val) + { +#ifdef __CUDA_ARCH__ + return __clzll(val); +#elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64 + unsigned long index; + _BitScanReverse64(&index, val); + return 63 - index; +#elif EIGEN_COMP_MSVC + // MSVC's _BitScanReverse64 is not available for 32bits builds. + unsigned int lo = (unsigned int)(val&0xffffffff); + unsigned int hi = (unsigned int)((val>>32)&0xffffffff); + int n; + if(hi==0) + n = 32 + count_leading_zeros(lo); + else + n = count_leading_zeros(hi); + return n; +#else + EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE); + return __builtin_clzll(static_cast(val)); +#endif + } + + template + struct UnsignedTraits { + typedef typename conditional::type type; + }; + + template + struct DividerTraits { + typedef typename UnsignedTraits::type type; + static const int N = sizeof(T) * 8; + }; + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(const uint32_t a, const T b) { +#if defined(__CUDA_ARCH__) + return __umulhi(a, b); +#else + return (static_cast(a) * b) >> 32; +#endif + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(const uint64_t a, const T b) { +#if defined(__CUDA_ARCH__) + return __umul64hi(a, b); +#elif defined(__SIZEOF_INT128__) + __uint128_t v = static_cast<__uint128_t>(a) * static_cast<__uint128_t>(b); + return static_cast(v >> 64); +#else + return (TensorUInt128, uint64_t>(a) * TensorUInt128, uint64_t>(b)).upper(); +#endif + } + + template + struct DividerHelper { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(const int log_div, const T divider) { + EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE); + return static_cast((static_cast(1) << (N+log_div)) / divider - (static_cast(1) << N) + 1); + } + }; + + template + struct DividerHelper<64, T> { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T divider) { +#if defined(__SIZEOF_INT128__) && !defined(__CUDA_ARCH__) + return static_cast((static_cast<__uint128_t>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (static_cast<__uint128_t>(1) << 64) + 1); +#else + const uint64_t shift = 1ULL << log_div; + TensorUInt128 result = TensorUInt128 >(shift, 0) / TensorUInt128, uint64_t>(divider) + - TensorUInt128, static_val<0> >(1, 0) + + TensorUInt128, static_val<1> >(1); + return static_cast(result); +#endif + } + }; +} + + +template +struct TensorIntDivisor { + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() { + multiplier = 0; + shift1 = 0; + shift2 = 0; + } + + // Must have 0 < divider < 2^31. This is relaxed to + // 0 < divider < 2^63 when using 64-bit indices on platforms that support + // the __uint128_t type. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(const T divider) { + const int N = DividerTraits::N; + eigen_assert(static_cast::type>(divider) < NumTraits::highest()/2); + eigen_assert(divider > 0); + + // fast ln2 + const int leading_zeros = count_leading_zeros(static_cast(divider)); + int log_div = N - leading_zeros; + // if divider is a power of two then log_div is 1 more than it should be. + if ((static_cast::type>(1) << (log_div-1)) == static_cast::type>(divider)) + log_div--; + + multiplier = DividerHelper::computeMultiplier(log_div, divider); + shift1 = log_div > 1 ? 1 : log_div; + shift2 = log_div > 1 ? log_div-1 : 0; + } + + // Must have 0 <= numerator. On platforms that dont support the __uint128_t + // type numerator should also be less than 2^32-1. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const { + eigen_assert(static_cast::type>(numerator) < NumTraits::highest()/2); + //eigen_assert(numerator >= 0); // this is implicitly asserted by the line above + + UnsignedType t1 = muluh(multiplier, numerator); + UnsignedType t = (static_cast(numerator) - t1) >> shift1; + return (t1 + t) >> shift2; + } + + private: + typedef typename DividerTraits::type UnsignedType; + UnsignedType multiplier; + int32_t shift1; + int32_t shift2; +}; + + +// Optimized version for signed 32 bit integers. +// Derived from Hacker's Delight. +// Only works for divisors strictly greater than one +template <> +class TensorIntDivisor { + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() { + magic = 0; + shift = 0; + } + // Must have 2 <= divider + EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider) { + eigen_assert(divider >= 2); + calcMagic(divider); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const { +#ifdef __CUDA_ARCH__ + return (__umulhi(magic, n) >> shift); +#else + uint64_t v = static_cast(magic) * static_cast(n); + return (static_cast(v >> 32) >> shift); +#endif + } + +private: + // Compute the magic numbers. See Hacker's Delight section 10 for an in + // depth explanation. + EIGEN_DEVICE_FUNC void calcMagic(int32_t d) { + const unsigned two31 = 0x80000000; // 2**31. + unsigned ad = d; + unsigned t = two31 + (ad >> 31); + unsigned anc = t - 1 - t%ad; // Absolute value of nc. + int p = 31; // Init. p. + unsigned q1 = two31/anc; // Init. q1 = 2**p/|nc|. + unsigned r1 = two31 - q1*anc; // Init. r1 = rem(2**p, |nc|). + unsigned q2 = two31/ad; // Init. q2 = 2**p/|d|. + unsigned r2 = two31 - q2*ad; // Init. r2 = rem(2**p, |d|). + unsigned delta = 0; + do { + p = p + 1; + q1 = 2*q1; // Update q1 = 2**p/|nc|. + r1 = 2*r1; // Update r1 = rem(2**p, |nc|). + if (r1 >= anc) { // (Must be an unsigned + q1 = q1 + 1; // comparison here). + r1 = r1 - anc;} + q2 = 2*q2; // Update q2 = 2**p/|d|. + r2 = 2*r2; // Update r2 = rem(2**p, |d|). + if (r2 >= ad) { // (Must be an unsigned + q2 = q2 + 1; // comparison here). + r2 = r2 - ad;} + delta = ad - r2; + } while (q1 < delta || (q1 == delta && r1 == 0)); + + magic = (unsigned)(q2 + 1); + shift = p - 32; + } + + uint32_t magic; + int32_t shift; +}; + + +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator / (const T& numerator, const TensorIntDivisor& divisor) { + return divisor.divide(numerator); +} + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h new file mode 100644 index 000000000..cd0109ef4 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h @@ -0,0 +1,209 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H +#define EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H + +namespace Eigen { + +/** \class TensorLayoutSwap + * \ingroup CXX11_Tensor_Module + * + * \brief Swap the layout from col-major to row-major, or row-major + * to col-major, and invert the order of the dimensions. + * + * Beware: the dimensions are reversed by this operation. If you want to + * preserve the ordering of the dimensions, you need to combine this + * operation with a shuffle. + * + * \example: + * Tensor input(2, 4); + * Tensor output = input.swap_layout(); + * eigen_assert(output.dimension(0) == 4); + * eigen_assert(output.dimension(1) == 2); + * + * array shuffle(1, 0); + * output = input.swap_layout().shuffle(shuffle); + * eigen_assert(output.dimension(0) == 2); + * eigen_assert(output.dimension(1) == 4); + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = (traits::Layout == ColMajor) ? RowMajor : ColMajor; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorLayoutSwapOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorLayoutSwapOp type; +}; + +} // end namespace internal + + + +template +class TensorLayoutSwapOp : public TensorBase, WriteAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorLayoutSwapOp(const XprType& expr) + : m_xpr(expr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorLayoutSwapOp& operator = (const TensorLayoutSwapOp& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorLayoutSwapOp& operator = (const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + protected: + typename XprType::Nested m_xpr; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorLayoutSwapOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = (static_cast(TensorEvaluator::Layout) == static_cast(ColMajor)) ? RowMajor : ColMajor, + CoordAccess = false, // to be implemented + RawAccess = TensorEvaluator::RawAccess + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + for(int i = 0; i < NumDims; ++i) { + m_dimensions[i] = m_impl.dimensions()[NumDims-1-i]; + } + } + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { + return m_impl.evalSubExprsIfNeeded(data); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_impl.template packet(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return m_impl.data(); } + + const TensorEvaluator& impl() const { return m_impl; } + + protected: + TensorEvaluator m_impl; + Dimensions m_dimensions; +}; + + +// Eval as lvalue +template + struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorLayoutSwapOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = (static_cast(TensorEvaluator::Layout) == static_cast(ColMajor)) ? RowMajor : ColMajor, + CoordAccess = false // to be implemented + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(index); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + this->m_impl.template writePacket(index, x); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h new file mode 100644 index 000000000..ee0078bbc --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h @@ -0,0 +1,54 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H +#define EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H + + +/** use this macro in sfinae selection in templated functions + * + * template::value , int >::type = 0 + * > + * void foo(){} + * + * becomes => + * + * template::value ) + * > + * void foo(){} + */ + +// SFINAE requires variadic templates +#ifndef __CUDACC__ +#if EIGEN_HAS_VARIADIC_TEMPLATES + // SFINAE doesn't work for gcc <= 4.7 + #ifdef EIGEN_COMP_GNUC + #if EIGEN_GNUC_AT_LEAST(4,8) + #define EIGEN_HAS_SFINAE + #endif + #else + #define EIGEN_HAS_SFINAE + #endif +#endif +#endif + +#define EIGEN_SFINAE_ENABLE_IF( __condition__ ) \ + typename internal::enable_if< ( __condition__ ) , int >::type = 0 + + +#if EIGEN_HAS_CONSTEXPR +#define EIGEN_CONSTEXPR constexpr +#else +#define EIGEN_CONSTEXPR +#endif + + +#endif diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h new file mode 100644 index 000000000..e4fc86a40 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h @@ -0,0 +1,323 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_MAP_H +#define EIGEN_CXX11_TENSOR_TENSOR_MAP_H + +namespace Eigen { + +// FIXME use proper doxygen documentation (e.g. \tparam MakePointer_) + +/** \class TensorMap + * \ingroup CXX11_Tensor_Module + * + * \brief A tensor expression mapping an existing array of data. + * + */ +/// `template class MakePointer_` is added to convert the host pointer to the device pointer. +/// It is added due to the fact that for our device compiler `T*` is not allowed. +/// If we wanted to use the same Evaluator functions we have to convert that type to our pointer `T`. +/// This is done through our `MakePointer_` class. By default the Type in the `MakePointer_` is `T*` . +/// Therefore, by adding the default value, we managed to convert the type and it does not break any +/// existing code as its default value is `T*`. +template class MakePointer_> class TensorMap : public TensorBase > +{ + public: + typedef TensorMap Self; + typedef typename PlainObjectType::Base Base; + typedef typename Eigen::internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + + /* typedef typename internal::conditional< + bool(internal::is_lvalue::value), + Scalar *, + const Scalar *>::type + PointerType;*/ + typedef typename MakePointer_::Type PointerType; + typedef PointerType PointerArgType; + + static const int Options = Options_; + + static const Index NumIndices = PlainObjectType::NumIndices; + typedef typename PlainObjectType::Dimensions Dimensions; + + enum { + IsAligned = ((int(Options_)&Aligned)==Aligned), + Layout = PlainObjectType::Layout, + CoordAccess = true, + RawAccess = true + }; + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr) : m_data(dataPtr), m_dimensions() { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT((0 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE) + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions(firstDimension, otherDimensions...) { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT((sizeof...(otherDimensions) + 1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions(firstDimension) { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT((1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2) : m_data(dataPtr), m_dimensions(dim1, dim2) { + EIGEN_STATIC_ASSERT(2 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2, Index dim3) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3) { + EIGEN_STATIC_ASSERT(3 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4) { + EIGEN_STATIC_ASSERT(4 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4, dim5) { + EIGEN_STATIC_ASSERT(5 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, const array& dimensions) + : m_data(dataPtr), m_dimensions(dimensions) + { } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, const Dimensions& dimensions) + : m_data(dataPtr), m_dimensions(dimensions) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(PlainObjectType& tensor) + : m_data(tensor.data()), m_dimensions(tensor.dimensions()) + { } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index rank() const { return m_dimensions.rank(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index dimension(Index n) const { return m_dimensions[n]; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index size() const { return m_dimensions.TotalSize(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PointerType data() { return m_data; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const PointerType data() const { return m_data; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(const array& indices) const + { + // eigen_assert(checkIndexRange(indices)); + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(indices); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(indices); + return m_data[index]; + } + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE) + return m_data[0]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return m_data[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const + { + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i1 + i0 * m_dimensions[1]; + return m_data[index]; + } else { + const Index index = i0 + i1 * m_dimensions[0]; + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3)); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0))); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4))); + return m_data[index]; + } + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(const array& indices) + { + // eigen_assert(checkIndexRange(indices)); + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(indices); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(indices); + return m_data[index]; + } + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE) + return m_data[0]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index index) + { + eigen_internal_assert(index >= 0 && index < size()); + return m_data[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) + { + static_assert(sizeof...(otherIndices) + 2 == NumIndices || NumIndices == Dynamic, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor."); + const std::size_t NumDims = sizeof...(otherIndices) + 2; + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i1 + i0 * m_dimensions[1]; + return m_data[index]; + } else { + const Index index = i0 + i1 * m_dimensions[0]; + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3)); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0))); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4))); + return m_data[index]; + } + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Self& operator=(const Self& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Self& operator=(const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + private: + typename MakePointer_::Type m_data; + Dimensions m_dimensions; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_MAP_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h new file mode 100644 index 000000000..615559d44 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h @@ -0,0 +1,218 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_META_H +#define EIGEN_CXX11_TENSOR_TENSOR_META_H + +namespace Eigen { + +template struct Cond {}; + +template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +const T1& choose(Cond, const T1& first, const T2&) { + return first; +} + +template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +const T2& choose(Cond, const T1&, const T2& second) { + return second; +} + + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T divup(const X x, const Y y) { + return static_cast((x + y - 1) / y); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T divup(const T x, const T y) { + return static_cast((x + y - 1) / y); +} + +template struct max_n_1 { + static const size_t size = n; +}; +template <> struct max_n_1<0> { + static const size_t size = 1; +}; + + +// Default packet types +template +struct PacketType : internal::packet_traits { + typedef typename internal::packet_traits::type type; +}; + +// For CUDA packet types when using a GpuDevice +#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) && defined(EIGEN_HAS_CUDA_FP16) +template <> +struct PacketType { + typedef half2 type; + static const int size = 2; + enum { + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0, + HasBlend = 0, + + HasDiv = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasExp = 1, + HasLog = 1, + HasLog1p = 0, + HasLog10 = 0, + HasPow = 1, + }; +}; +#endif + +#if defined(EIGEN_USE_SYCL) +template + struct PacketType { + typedef T type; + static const int size = 1; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasArg = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0, + HasBlend = 0 + }; +}; +#endif + + +// Tuple mimics std::pair but works on e.g. nvcc. +template struct Tuple { + public: + U first; + V second; + + typedef U first_type; + typedef V second_type; + + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Tuple() : first(), second() {} + + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Tuple(const U& f, const V& s) : first(f), second(s) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Tuple& operator= (const Tuple& rhs) { + if (&rhs == this) return *this; + first = rhs.first; + second = rhs.second; + return *this; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void swap(Tuple& rhs) { + using numext::swap; + swap(first, rhs.first); + swap(second, rhs.second); + } +}; + +template +EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +bool operator==(const Tuple& x, const Tuple& y) { + return (x.first == y.first && x.second == y.second); +} + +template +EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +bool operator!=(const Tuple& x, const Tuple& y) { + return !(x == y); +} + + +// Can't use std::pairs on cuda devices +template struct IndexPair { + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair() : first(0), second(0) {} + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair(Idx f, Idx s) : first(f), second(s) {} + + EIGEN_DEVICE_FUNC void set(IndexPair val) { + first = val.first; + second = val.second; + } + + Idx first; + Idx second; +}; + + +#ifdef EIGEN_HAS_SFINAE +namespace internal { + + template + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + array customIndices2Array(IndexType& idx, numeric_list) { + return { idx[Is]... }; + } + template + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + array customIndices2Array(IndexType&, numeric_list) { + return array(); + } + + /** Make an array (for index/dimensions) out of a custom index */ + template + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + array customIndices2Array(IndexType& idx) { + return customIndices2Array(idx, typename gen_numeric_list::type{}); + } + + + template + struct is_base_of + { + + typedef char (&yes)[1]; + typedef char (&no)[2]; + + template + struct Host + { + operator BB*() const; + operator DD*(); + }; + + template + static yes check(D*, T); + static no check(B*, int); + + static const bool value = sizeof(check(Host(), int())) == sizeof(yes); + }; + +} +#endif + + + +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_META_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h new file mode 100644 index 000000000..d34f1e328 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h @@ -0,0 +1,888 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H +#define EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H + +namespace Eigen { + +/** \class TensorReshaping + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reshaping class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = array_size::value; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorReshapingOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorReshapingOp type; +}; + +} // end namespace internal + + + +template +class TensorReshapingOp : public TensorBase, WriteAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReshapingOp(const XprType& expr, const NewDimensions& dims) + : m_xpr(expr), m_dims(dims) {} + + EIGEN_DEVICE_FUNC + const NewDimensions& dimensions() const { return m_dims; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorReshapingOp& operator = (const TensorReshapingOp& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorReshapingOp& operator = (const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + protected: + typename XprType::Nested m_xpr; + const NewDimensions m_dims; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorReshapingOp XprType; + typedef NewDimensions Dimensions; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = TensorEvaluator::RawAccess + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_dimensions(op.dimensions()) + { + // The total size of the reshaped tensor must be equal to the total size + // of the input tensor. + eigen_assert(internal::array_prod(m_impl.dimensions()) == internal::array_prod(op.dimensions())); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { + return m_impl.evalSubExprsIfNeeded(data); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_impl.template packet(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return const_cast(m_impl.data()); } + + EIGEN_DEVICE_FUNC const TensorEvaluator& impl() const { return m_impl; } + + protected: + TensorEvaluator m_impl; + NewDimensions m_dimensions; +}; + + +// Eval as lvalue +template + struct TensorEvaluator, Device> + : public TensorEvaluator, Device> + +{ + typedef TensorEvaluator, Device> Base; + typedef TensorReshapingOp XprType; + typedef NewDimensions Dimensions; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = TensorEvaluator::RawAccess + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(index); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + this->m_impl.template writePacket(index, x); + } +}; + + +/** \class TensorSlicing + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor slicing class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = array_size::value; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorSlicingOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorSlicingOp type; +}; + +} // end namespace internal + + + +template +class TensorSlicingOp : public TensorBase > +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSlicingOp(const XprType& expr, const StartIndices& indices, const Sizes& sizes) + : m_xpr(expr), m_indices(indices), m_sizes(sizes) {} + + EIGEN_DEVICE_FUNC + const StartIndices& startIndices() const { return m_indices; } + EIGEN_DEVICE_FUNC + const Sizes& sizes() const { return m_sizes; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorSlicingOp& operator = (const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorSlicingOp& operator = (const TensorSlicingOp& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + + + protected: + typename XprType::Nested m_xpr; + const StartIndices m_indices; + const Sizes m_sizes; +}; + + +// Fixme: figure out the exact threshold +namespace { +template struct MemcpyTriggerForSlicing { + EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const Device& device) : threshold_(2 * device.numThreads()) { } + EIGEN_DEVICE_FUNC bool operator ()(Index val) const { return val > threshold_; } + + private: + Index threshold_; +}; + +// It is very expensive to start the memcpy kernel on GPU: we therefore only +// use it for large copies. +#ifdef EIGEN_USE_GPU +template struct MemcpyTriggerForSlicing { + EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const GpuDevice&) { } + EIGEN_DEVICE_FUNC bool operator ()(Index val) const { return val > 4*1024*1024; } +}; +#endif +} + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorSlicingOp XprType; + static const int NumDims = internal::array_size::value; + + enum { + // Alignment can't be guaranteed at compile time since it depends on the + // slice offsets and sizes. + IsAligned = /*TensorEvaluator::IsAligned*/false, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_device(device), m_dimensions(op.sizes()), m_offsets(op.startIndices()) + { + for (std::size_t i = 0; i < internal::array_size::value; ++i) { + eigen_assert(m_impl.dimensions()[i] >= op.sizes()[i] + op.startIndices()[i]); + } + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + const Sizes& output_dims = op.sizes(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + } + + // Don't initialize m_fastOutputStrides[0] since it won't ever be accessed. + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i]); + } + } else { + m_inputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + } + + // Don't initialize m_fastOutputStrides[NumDims-1] since it won't ever be accessed. + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i]); + } + } + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef Sizes Dimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { + m_impl.evalSubExprsIfNeeded(NULL); + if (!NumTraits::type>::RequireInitialization && data && m_impl.data()) { + Index contiguous_values = 1; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims; ++i) { + contiguous_values *= dimensions()[i]; + if (dimensions()[i] != m_impl.dimensions()[i]) { + break; + } + } + } else { + for (int i = NumDims-1; i >= 0; --i) { + contiguous_values *= dimensions()[i]; + if (dimensions()[i] != m_impl.dimensions()[i]) { + break; + } + } + } + // Use memcpy if it's going to be faster than using the regular evaluation. + const MemcpyTriggerForSlicing trigger(m_device); + if (trigger(contiguous_values)) { + Scalar* src = (Scalar*)m_impl.data(); + for (int i = 0; i < internal::array_prod(dimensions()); i += contiguous_values) { + Index offset = srcCoeff(i); + m_device.memcpy((void*)(data+i), src+offset, contiguous_values * sizeof(Scalar)); + } + return false; + } + } + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(srcCoeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + const int packetSize = internal::unpacket_traits::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+packetSize-1 < internal::array_prod(dimensions())); + + Index inputIndices[] = {0, 0}; + Index indices[] = {index, index + packetSize - 1}; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / m_fastOutputStrides[i]; + const Index idx1 = indices[1] / m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i]; + inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i]; + indices[0] -= idx0 * m_outputStrides[i]; + indices[1] -= idx1 * m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + m_offsets[0]); + inputIndices[1] += (indices[1] + m_offsets[0]); + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / m_fastOutputStrides[i]; + const Index idx1 = indices[1] / m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i]; + inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i]; + indices[0] -= idx0 * m_outputStrides[i]; + indices[1] -= idx1 * m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + m_offsets[NumDims-1]); + inputIndices[1] += (indices[1] + m_offsets[NumDims-1]); + } + if (inputIndices[1] - inputIndices[0] == packetSize - 1) { + PacketReturnType rslt = m_impl.template packet(inputIndices[0]); + return rslt; + } + else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[packetSize]; + values[0] = m_impl.coeff(inputIndices[0]); + values[packetSize-1] = m_impl.coeff(inputIndices[1]); + for (int i = 1; i < packetSize-1; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, NumDims); + } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const { + Scalar* result = m_impl.data(); + if (result) { + Index offset = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims; ++i) { + if (m_dimensions[i] != m_impl.dimensions()[i]) { + offset += m_offsets[i] * m_inputStrides[i]; + for (int j = i+1; j < NumDims; ++j) { + if (m_dimensions[j] > 1) { + return NULL; + } + offset += m_offsets[j] * m_inputStrides[j]; + } + break; + } + } + } else { + for (int i = NumDims - 1; i >= 0; --i) { + if (m_dimensions[i] != m_impl.dimensions()[i]) { + offset += m_offsets[i] * m_inputStrides[i]; + for (int j = i-1; j >= 0; --j) { + if (m_dimensions[j] > 1) { + return NULL; + } + offset += m_offsets[j] * m_inputStrides[j]; + } + break; + } + } + } + return result + offset; + } + return NULL; + } + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += (idx + m_offsets[i]) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + inputIndex += (index + m_offsets[0]); + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += (idx + m_offsets[i]) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + inputIndex += (index + m_offsets[NumDims-1]); + } + return inputIndex; + } + + array m_outputStrides; + array, NumDims> m_fastOutputStrides; + array m_inputStrides; + TensorEvaluator m_impl; + const Device& m_device; + Dimensions m_dimensions; + const StartIndices m_offsets; +}; + + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorSlicingOp XprType; + static const int NumDims = internal::array_size::value; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/false, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef Sizes Dimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + const int packetSize = internal::unpacket_traits::size; + Index inputIndices[] = {0, 0}; + Index indices[] = {index, index + packetSize - 1}; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / this->m_fastOutputStrides[i]; + const Index idx1 = indices[1] / this->m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i]; + inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i]; + indices[0] -= idx0 * this->m_outputStrides[i]; + indices[1] -= idx1 * this->m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + this->m_offsets[0]); + inputIndices[1] += (indices[1] + this->m_offsets[0]); + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / this->m_fastOutputStrides[i]; + const Index idx1 = indices[1] / this->m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i]; + inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i]; + indices[0] -= idx0 * this->m_outputStrides[i]; + indices[1] -= idx1 * this->m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + this->m_offsets[NumDims-1]); + inputIndices[1] += (indices[1] + this->m_offsets[NumDims-1]); + } + if (inputIndices[1] - inputIndices[0] == packetSize - 1) { + this->m_impl.template writePacket(inputIndices[0], x); + } + else { + EIGEN_ALIGN_MAX CoeffReturnType values[packetSize]; + internal::pstore(values, x); + this->m_impl.coeffRef(inputIndices[0]) = values[0]; + this->m_impl.coeffRef(inputIndices[1]) = values[packetSize-1]; + for (int i = 1; i < packetSize-1; ++i) { + this->coeffRef(index+i) = values[i]; + } + } + } +}; + + + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = array_size::value; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorStridingSlicingOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorStridingSlicingOp type; +}; + +} // end namespace internal + + +template +class TensorStridingSlicingOp : public TensorBase > +{ + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingSlicingOp( + const XprType& expr, const StartIndices& startIndices, + const StopIndices& stopIndices, const Strides& strides) + : m_xpr(expr), m_startIndices(startIndices), m_stopIndices(stopIndices), + m_strides(strides) {} + + EIGEN_DEVICE_FUNC + const StartIndices& startIndices() const { return m_startIndices; } + EIGEN_DEVICE_FUNC + const StartIndices& stopIndices() const { return m_stopIndices; } + EIGEN_DEVICE_FUNC + const StartIndices& strides() const { return m_strides; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorStridingSlicingOp& operator = (const TensorStridingSlicingOp& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run( + assign, DefaultDevice()); + return *this; + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorStridingSlicingOp& operator = (const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run( + assign, DefaultDevice()); + return *this; + } + + protected: + typename XprType::Nested m_xpr; + const StartIndices m_startIndices; + const StopIndices m_stopIndices; + const Strides m_strides; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorStridingSlicingOp XprType; + static const int NumDims = internal::array_size::value; + + enum { + // Alignment can't be guaranteed at compile time since it depends on the + // slice offsets and sizes. + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_device(device), m_strides(op.strides()) + { + // Handle degenerate intervals by gracefully clamping and allowing m_dimensions to be zero + DSizes startIndicesClamped, stopIndicesClamped; + for (size_t i = 0; i < internal::array_size::value; ++i) { + eigen_assert(m_strides[i] != 0 && "0 stride is invalid"); + if(m_strides[i]>0){ + startIndicesClamped[i] = clamp(op.startIndices()[i], 0, m_impl.dimensions()[i]); + stopIndicesClamped[i] = clamp(op.stopIndices()[i], 0, m_impl.dimensions()[i]); + }else{ + /* implies m_strides[i]<0 by assert */ + startIndicesClamped[i] = clamp(op.startIndices()[i], -1, m_impl.dimensions()[i] - 1); + stopIndicesClamped[i] = clamp(op.stopIndices()[i], -1, m_impl.dimensions()[i] - 1); + } + m_startIndices[i] = startIndicesClamped[i]; + } + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + + // check for degenerate intervals and compute output tensor shape + bool degenerate = false;; + for(int i = 0; i < NumDims; i++){ + Index interval = stopIndicesClamped[i] - startIndicesClamped[i]; + if(interval == 0 || ((interval<0) != (m_strides[i]<0))){ + m_dimensions[i] = 0; + degenerate = true; + }else{ + m_dimensions[i] = interval / m_strides[i] + + (interval % m_strides[i] != 0 ? 1 : 0); + eigen_assert(m_dimensions[i] >= 0); + } + } + Strides output_dims = m_dimensions; + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = m_strides[0]; + m_offsets[0] = startIndicesClamped[0]; + Index previousDimProduct = 1; + for (int i = 1; i < NumDims; ++i) { + previousDimProduct *= input_dims[i-1]; + m_inputStrides[i] = previousDimProduct * m_strides[i]; + m_offsets[i] = startIndicesClamped[i] * previousDimProduct; + } + + // Don't initialize m_fastOutputStrides[0] since it won't ever be accessed. + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1]; + // NOTE: if tensor is degenerate, we send 1 to prevent TensorIntDivisor constructor crash + m_fastOutputStrides[i] = internal::TensorIntDivisor(degenerate ? 1 : m_outputStrides[i]); + } + } else { + m_inputStrides[NumDims-1] = m_strides[NumDims-1]; + m_offsets[NumDims-1] = startIndicesClamped[NumDims-1]; + Index previousDimProduct = 1; + for (int i = NumDims - 2; i >= 0; --i) { + previousDimProduct *= input_dims[i+1]; + m_inputStrides[i] = previousDimProduct * m_strides[i]; + m_offsets[i] = startIndicesClamped[i] * previousDimProduct; + } + + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1]; + // NOTE: if tensor is degenerate, we send 1 to prevent TensorIntDivisor constructor crash + m_fastOutputStrides[i] = internal::TensorIntDivisor(degenerate ? 1 : m_outputStrides[i]); + } + } + m_block_total_size_max = numext::maxi(static_cast(1), + device.lastLevelCacheSize() / + sizeof(Scalar)); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::remove_const::type ScalarNonConst; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef Strides Dimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(srcCoeff(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, NumDims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const { + return NULL; + } + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i >= 0; --i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += idx * m_inputStrides[i] + m_offsets[i]; + index -= idx * m_outputStrides[i]; + } + } else { + for (int i = 0; i < NumDims; ++i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += idx * m_inputStrides[i] + m_offsets[i]; + index -= idx * m_outputStrides[i]; + } + } + return inputIndex; + } + + static EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) { + return numext::maxi(min, numext::mini(max,value)); + } + + array m_outputStrides; + array, NumDims> m_fastOutputStrides; + array m_inputStrides; + TensorEvaluator m_impl; + const Device& m_device; + DSizes m_startIndices; // clamped startIndices + DSizes m_dimensions; + DSizes m_offsets; // offset in a flattened shape + const Strides m_strides; + std::size_t m_block_total_size_max; +}; + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorStridingSlicingOp XprType; + static const int NumDims = internal::array_size::value; + + enum { + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = TensorEvaluator::CoordAccess, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::remove_const::type ScalarNonConst; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef Strides Dimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h new file mode 100644 index 000000000..647bcf108 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h @@ -0,0 +1,397 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_PADDING_H +#define EIGEN_CXX11_TENSOR_TENSOR_PADDING_H + +namespace Eigen { + +/** \class TensorPadding + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor padding class. + * At the moment only padding with a constant value is supported. + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorPaddingOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorPaddingOp type; +}; + +} // end namespace internal + + + +template +class TensorPaddingOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPaddingOp(const XprType& expr, const PaddingDimensions& padding_dims, const Scalar padding_value) + : m_xpr(expr), m_padding_dims(padding_dims), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC + const PaddingDimensions& padding() const { return m_padding_dims; } + EIGEN_DEVICE_FUNC + Scalar padding_value() const { return m_padding_value; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const PaddingDimensions m_padding_dims; + const Scalar m_padding_value; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorPaddingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = true, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = true, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_padding(op.padding()), m_paddingValue(op.padding_value()) + { + // The padding op doesn't change the rank of the tensor. Directly padding a scalar would lead + // to a vector, which doesn't make sense. Instead one should reshape the scalar into a vector + // of 1 element first and then pad. + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + + // Compute dimensions + m_dimensions = m_impl.dimensions(); + for (int i = 0; i < NumDims; ++i) { + m_dimensions[i] += m_padding[i].first + m_padding[i].second; + } + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = 1; + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + } + m_outputStrides[NumDims] = m_outputStrides[NumDims-1] * m_dimensions[NumDims-1]; + } else { + m_inputStrides[NumDims - 1] = 1; + m_outputStrides[NumDims] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_outputStrides[i+1] = m_outputStrides[i+2] * m_dimensions[i+1]; + } + m_outputStrides[0] = m_outputStrides[1] * m_dimensions[0]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + eigen_assert(index < dimensions().TotalSize()); + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (isPaddingAtIndexForDim(idx, i)) { + return m_paddingValue; + } + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (isPaddingAtIndexForDim(index, 0)) { + return m_paddingValue; + } + inputIndex += (index - m_padding[0].first); + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i+1]; + if (isPaddingAtIndexForDim(idx, i)) { + return m_paddingValue; + } + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i+1]; + } + if (isPaddingAtIndexForDim(index, NumDims-1)) { + return m_paddingValue; + } + inputIndex += (index - m_padding[NumDims-1].first); + } + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + if (static_cast(Layout) == static_cast(ColMajor)) { + return packetColMajor(index); + } + return packetRowMajor(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + TensorOpCost cost = m_impl.costPerCoeff(vectorized); + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims; ++i) + updateCostPerDimension(cost, i, i == 0); + } else { + for (int i = NumDims - 1; i >= 0; --i) + updateCostPerDimension(cost, i, i == NumDims - 1); + } + return cost; + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + private: + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isPaddingAtIndexForDim( + Index index, int dim_index) const { +#if defined(EIGEN_HAS_INDEX_LIST) + return (!internal::index_pair_first_statically_eq(dim_index, 0) && + index < m_padding[dim_index].first) || + (!internal::index_pair_second_statically_eq(dim_index, 0) && + index >= m_dimensions[dim_index] - m_padding[dim_index].second); +#else + return (index < m_padding[dim_index].first) || + (index >= m_dimensions[dim_index] - m_padding[dim_index].second); +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isLeftPaddingCompileTimeZero( + int dim_index) const { +#if defined(EIGEN_HAS_INDEX_LIST) + return internal::index_pair_first_statically_eq(dim_index, 0); +#else + EIGEN_UNUSED_VARIABLE(dim_index); + return false; +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isRightPaddingCompileTimeZero( + int dim_index) const { +#if defined(EIGEN_HAS_INDEX_LIST) + return internal::index_pair_second_statically_eq(dim_index, 0); +#else + EIGEN_UNUSED_VARIABLE(dim_index); + return false; +#endif + } + + + void updateCostPerDimension(TensorOpCost& cost, int i, bool first) const { + const double in = static_cast(m_impl.dimensions()[i]); + const double out = in + m_padding[i].first + m_padding[i].second; + if (out == 0) + return; + const double reduction = in / out; + cost *= reduction; + if (first) { + cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost() + + reduction * (1 * TensorOpCost::AddCost())); + } else { + cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost() + + 2 * TensorOpCost::MulCost() + + reduction * (2 * TensorOpCost::MulCost() + + 1 * TensorOpCost::DivCost())); + } + } + + protected: + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index initialIndex = index; + Index inputIndex = 0; + for (int i = NumDims - 1; i > 0; --i) { + const Index first = index; + const Index last = index + PacketSize - 1; + const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i]; + const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i]; + const Index lastPaddedRight = m_outputStrides[i+1]; + + if (!isLeftPaddingCompileTimeZero(i) && last < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(i) && first >= firstPaddedRight && last < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (first >= lastPaddedLeft && last < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + const Index idx = index / m_outputStrides[i]; + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + else { + // Every other case + return packetWithPossibleZero(initialIndex); + } + } + + const Index last = index + PacketSize - 1; + const Index first = index; + const Index lastPaddedLeft = m_padding[0].first; + const Index firstPaddedRight = (m_dimensions[0] - m_padding[0].second); + const Index lastPaddedRight = m_outputStrides[1]; + + if (!isLeftPaddingCompileTimeZero(0) && last < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(0) && first >= firstPaddedRight && last < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) || (first >= lastPaddedLeft && last < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + inputIndex += (index - m_padding[0].first); + return m_impl.template packet(inputIndex); + } + // Every other case + return packetWithPossibleZero(initialIndex); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index initialIndex = index; + Index inputIndex = 0; + + for (int i = 0; i < NumDims - 1; ++i) { + const Index first = index; + const Index last = index + PacketSize - 1; + const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i+1]; + const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i+1]; + const Index lastPaddedRight = m_outputStrides[i]; + + if (!isLeftPaddingCompileTimeZero(i) && last < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(i) && first >= firstPaddedRight && last < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (first >= lastPaddedLeft && last < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + const Index idx = index / m_outputStrides[i+1]; + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i+1]; + } + else { + // Every other case + return packetWithPossibleZero(initialIndex); + } + } + + const Index last = index + PacketSize - 1; + const Index first = index; + const Index lastPaddedLeft = m_padding[NumDims-1].first; + const Index firstPaddedRight = (m_dimensions[NumDims-1] - m_padding[NumDims-1].second); + const Index lastPaddedRight = m_outputStrides[NumDims-1]; + + if (!isLeftPaddingCompileTimeZero(NumDims-1) && last < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(NumDims-1) && first >= firstPaddedRight && last < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(NumDims-1) && isRightPaddingCompileTimeZero(NumDims-1)) || (first >= lastPaddedLeft && last < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + inputIndex += (index - m_padding[NumDims-1].first); + return m_impl.template packet(inputIndex); + } + // Every other case + return packetWithPossibleZero(initialIndex); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const + { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; + PaddingDimensions m_padding; + + Scalar m_paddingValue; +}; + + + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_PADDING_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h new file mode 100644 index 000000000..886a254f6 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h @@ -0,0 +1,269 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_PATCH_H +#define EIGEN_CXX11_TENSOR_TENSOR_PATCH_H + +namespace Eigen { + +/** \class TensorPatch + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor patch class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions + 1; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorPatchOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorPatchOp type; +}; + +} // end namespace internal + + + +template +class TensorPatchOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPatchOp(const XprType& expr, const PatchDim& patch_dims) + : m_xpr(expr), m_patch_dims(patch_dims) {} + + EIGEN_DEVICE_FUNC + const PatchDim& patch_dims() const { return m_patch_dims; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const PatchDim m_patch_dims; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorPatchOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value + 1; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + Index num_patches = 1; + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + const PatchDim& patch_dims = op.patch_dims(); + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims-1; ++i) { + m_dimensions[i] = patch_dims[i]; + num_patches *= (input_dims[i] - patch_dims[i] + 1); + } + m_dimensions[NumDims-1] = num_patches; + + m_inputStrides[0] = 1; + m_patchStrides[0] = 1; + for (int i = 1; i < NumDims-1; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_patchStrides[i] = m_patchStrides[i-1] * (input_dims[i-1] - patch_dims[i-1] + 1); + } + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + } + } else { + for (int i = 0; i < NumDims-1; ++i) { + m_dimensions[i+1] = patch_dims[i]; + num_patches *= (input_dims[i] - patch_dims[i] + 1); + } + m_dimensions[0] = num_patches; + + m_inputStrides[NumDims-2] = 1; + m_patchStrides[NumDims-2] = 1; + for (int i = NumDims-3; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_patchStrides[i] = m_patchStrides[i+1] * (input_dims[i+1] - patch_dims[i+1] + 1); + } + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims-2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + Index output_stride_index = (static_cast(Layout) == static_cast(ColMajor)) ? NumDims - 1 : 0; + // Find the location of the first element of the patch. + Index patchIndex = index / m_outputStrides[output_stride_index]; + // Find the offset of the element wrt the location of the first element. + Index patchOffset = index - patchIndex * m_outputStrides[output_stride_index]; + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 2; i > 0; --i) { + const Index patchIdx = patchIndex / m_patchStrides[i]; + patchIndex -= patchIdx * m_patchStrides[i]; + const Index offsetIdx = patchOffset / m_outputStrides[i]; + patchOffset -= offsetIdx * m_outputStrides[i]; + inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i]; + } + } else { + for (int i = 0; i < NumDims - 2; ++i) { + const Index patchIdx = patchIndex / m_patchStrides[i]; + patchIndex -= patchIdx * m_patchStrides[i]; + const Index offsetIdx = patchOffset / m_outputStrides[i+1]; + patchOffset -= offsetIdx * m_outputStrides[i+1]; + inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i]; + } + } + inputIndex += (patchIndex + patchOffset); + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + Index output_stride_index = (static_cast(Layout) == static_cast(ColMajor)) ? NumDims - 1 : 0; + Index indices[2] = {index, index + PacketSize - 1}; + Index patchIndices[2] = {indices[0] / m_outputStrides[output_stride_index], + indices[1] / m_outputStrides[output_stride_index]}; + Index patchOffsets[2] = {indices[0] - patchIndices[0] * m_outputStrides[output_stride_index], + indices[1] - patchIndices[1] * m_outputStrides[output_stride_index]}; + + Index inputIndices[2] = {0, 0}; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 2; i > 0; --i) { + const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i], + patchIndices[1] / m_patchStrides[i]}; + patchIndices[0] -= patchIdx[0] * m_patchStrides[i]; + patchIndices[1] -= patchIdx[1] * m_patchStrides[i]; + + const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i], + patchOffsets[1] / m_outputStrides[i]}; + patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i]; + patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i]; + + inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i]; + inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i]; + } + } else { + for (int i = 0; i < NumDims - 2; ++i) { + const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i], + patchIndices[1] / m_patchStrides[i]}; + patchIndices[0] -= patchIdx[0] * m_patchStrides[i]; + patchIndices[1] -= patchIdx[1] * m_patchStrides[i]; + + const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i+1], + patchOffsets[1] / m_outputStrides[i+1]}; + patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i+1]; + patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i+1]; + + inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i]; + inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i]; + } + } + inputIndices[0] += (patchIndices[0] + patchOffsets[0]); + inputIndices[1] += (patchIndices[1] + patchOffsets[1]); + + if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { + PacketReturnType rslt = m_impl.template packet(inputIndices[0]); + return rslt; + } + else { + EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize]; + values[0] = m_impl.coeff(inputIndices[0]); + values[PacketSize-1] = m_impl.coeff(inputIndices[1]); + for (int i = 1; i < PacketSize-1; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double compute_cost = NumDims * (TensorOpCost::DivCost() + + TensorOpCost::MulCost() + + 2 * TensorOpCost::AddCost()); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + + protected: + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + array m_patchStrides; + + TensorEvaluator m_impl; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_PATCH_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h new file mode 100644 index 000000000..1655a813e --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h @@ -0,0 +1,276 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H +#define EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H + +namespace Eigen { +namespace internal { + +namespace { + +EIGEN_DEVICE_FUNC uint64_t get_random_seed() { +#ifdef __CUDA_ARCH__ + // We don't support 3d kernels since we currently only use 1 and + // 2d kernels. + assert(threadIdx.z == 0); + return clock64() + + blockIdx.x * blockDim.x + threadIdx.x + + gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y); + +#elif defined _WIN32 + // Use the current time as a baseline. + SYSTEMTIME st; + GetSystemTime(&st); + int time = st.wSecond + 1000 * st.wMilliseconds; + // Mix in a random number to make sure that we get different seeds if + // we try to generate seeds faster than the clock resolution. + // We need 2 random values since the generator only generate 16 bits at + // a time (https://msdn.microsoft.com/en-us/library/398ax69y.aspx) + int rnd1 = ::rand(); + int rnd2 = ::rand(); + uint64_t rnd = (rnd1 | rnd2 << 16) ^ time; + return rnd; + +#elif defined __APPLE__ + // Same approach as for win32, except that the random number generator + // is better (// https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man3/random.3.html#//apple_ref/doc/man/3/random). + uint64_t rnd = ::random() ^ mach_absolute_time(); + return rnd; + +#else + // Augment the current time with pseudo random number generation + // to ensure that we get different seeds if we try to generate seeds + // faster than the clock resolution. + timespec ts; + clock_gettime(CLOCK_REALTIME, &ts); + uint64_t rnd = ::random() ^ ts.tv_nsec; + return rnd; +#endif +} + +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unsigned PCG_XSH_RS_generator(uint64_t* state) { + // TODO: Unify with the implementation in the non blocking thread pool. + uint64_t current = *state; + // Update the internal state + *state = current * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL; + // Generate the random output (using the PCG-XSH-RS scheme) + return static_cast((current ^ (current >> 22)) >> (22 + (current >> 61))); +} + +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE uint64_t PCG_XSH_RS_state(uint64_t seed) { + seed = seed ? seed : get_random_seed(); + return seed * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL; +} + +} // namespace + + +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +T RandomToTypeUniform(uint64_t* state) { + unsigned rnd = PCG_XSH_RS_generator(state); + return static_cast(rnd); +} + + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +Eigen::half RandomToTypeUniform(uint64_t* state) { + Eigen::half result; + // Generate 10 random bits for the mantissa + unsigned rnd = PCG_XSH_RS_generator(state); + result.x = static_cast(rnd & 0x3ffu); + // Set the exponent + result.x |= (static_cast(15) << 10); + // Return the final result + return result - Eigen::half(1.0f); +} + + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float RandomToTypeUniform(uint64_t* state) { + typedef union { + uint32_t raw; + float fp; + } internal; + internal result; + // Generate 23 random bits for the mantissa mantissa + const unsigned rnd = PCG_XSH_RS_generator(state); + result.raw = rnd & 0x7fffffu; + // Set the exponent + result.raw |= (static_cast(127) << 23); + // Return the final result + return result.fp - 1.0f; +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double RandomToTypeUniform(uint64_t* state) { + typedef union { + uint64_t raw; + double dp; + } internal; + internal result; + result.raw = 0; + // Generate 52 random bits for the mantissa + // First generate the upper 20 bits + unsigned rnd1 = PCG_XSH_RS_generator(state) & 0xfffffu; + // The generate the lower 32 bits + unsigned rnd2 = PCG_XSH_RS_generator(state); + result.raw = (static_cast(rnd1) << 32) | rnd2; + // Set the exponent + result.raw |= (static_cast(1023) << 52); + // Return the final result + return result.dp - 1.0; +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeUniform >(uint64_t* state) { + return std::complex(RandomToTypeUniform(state), + RandomToTypeUniform(state)); +} +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeUniform >(uint64_t* state) { + return std::complex(RandomToTypeUniform(state), + RandomToTypeUniform(state)); +} + +template class UniformRandomGenerator { + public: + static const bool PacketAccess = true; + + // Uses the given "seed" if non-zero, otherwise uses a random seed. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator( + uint64_t seed = 0) { + m_state = PCG_XSH_RS_state(seed); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator( + const UniformRandomGenerator& other) { + m_state = other.m_state; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T operator()(Index i) const { + uint64_t local_state = m_state + i; + T result = RandomToTypeUniform(&local_state); + m_state = local_state; + return result; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Packet packetOp(Index i) const { + const int packetSize = internal::unpacket_traits::size; + EIGEN_ALIGN_MAX T values[packetSize]; + uint64_t local_state = m_state + i; + for (int j = 0; j < packetSize; ++j) { + values[j] = RandomToTypeUniform(&local_state); + } + m_state = local_state; + return internal::pload(values); + } + + private: + mutable uint64_t m_state; +}; + +template +struct functor_traits > { + enum { + // Rough estimate for floating point, multiplied by ceil(sizeof(T) / sizeof(float)). + Cost = 12 * NumTraits::AddCost * + ((sizeof(Scalar) + sizeof(float) - 1) / sizeof(float)), + PacketAccess = UniformRandomGenerator::PacketAccess + }; +}; + + + +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +T RandomToTypeNormal(uint64_t* state) { + // Use the ratio of uniform method to generate numbers following a normal + // distribution. See for example Numerical Recipes chapter 7.3.9 for the + // details. + T u, v, q; + do { + u = RandomToTypeUniform(state); + v = T(1.7156) * (RandomToTypeUniform(state) - T(0.5)); + const T x = u - T(0.449871); + const T y = numext::abs(v) + T(0.386595); + q = x*x + y * (T(0.196)*y - T(0.25472)*x); + } while (q > T(0.27597) && + (q > T(0.27846) || v*v > T(-4) * numext::log(u) * u*u)); + + return v/u; +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeNormal >(uint64_t* state) { + return std::complex(RandomToTypeNormal(state), + RandomToTypeNormal(state)); +} +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeNormal >(uint64_t* state) { + return std::complex(RandomToTypeNormal(state), + RandomToTypeNormal(state)); +} + + +template class NormalRandomGenerator { + public: + static const bool PacketAccess = true; + + // Uses the given "seed" if non-zero, otherwise uses a random seed. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator(uint64_t seed = 0) { + m_state = PCG_XSH_RS_state(seed); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator( + const NormalRandomGenerator& other) { + m_state = other.m_state; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T operator()(Index i) const { + uint64_t local_state = m_state + i; + T result = RandomToTypeNormal(&local_state); + m_state = local_state; + return result; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Packet packetOp(Index i) const { + const int packetSize = internal::unpacket_traits::size; + EIGEN_ALIGN_MAX T values[packetSize]; + uint64_t local_state = m_state + i; + for (int j = 0; j < packetSize; ++j) { + values[j] = RandomToTypeNormal(&local_state); + } + m_state = local_state; + return internal::pload(values); + } + + private: + mutable uint64_t m_state; +}; + + +template +struct functor_traits > { + enum { + // On average, we need to generate about 3 random numbers + // 15 mul, 8 add, 1.5 logs + Cost = 3 * functor_traits >::Cost + + 15 * NumTraits::AddCost + 8 * NumTraits::AddCost + + 3 * functor_traits >::Cost / 2, + PacketAccess = NormalRandomGenerator::PacketAccess + }; +}; + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h new file mode 100644 index 000000000..41d0d0022 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h @@ -0,0 +1,781 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2016 Mehdi Goli, Codeplay Software Ltd +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H +#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H + +namespace Eigen { + +/** \class TensorReduction + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reduction class. + * + */ + +namespace internal { + template class MakePointer_ > + struct traits > + : traits +{ + typedef traits XprTraits; + typedef typename XprTraits::Scalar Scalar; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + static const int NumDimensions = XprTraits::NumDimensions - array_size::value; + static const int Layout = XprTraits::Layout; + + template struct MakePointer { + // Intermediate typedef to workaround MSVC issue. + typedef MakePointer_ MakePointerT; + typedef typename MakePointerT::Type Type; + }; +}; + +template class MakePointer_> +struct eval, Eigen::Dense> +{ + typedef const TensorReductionOp& type; +}; + +template class MakePointer_> +struct nested, 1, typename eval >::type> +{ + typedef TensorReductionOp type; +}; + + +template struct DimInitializer { + template EIGEN_DEVICE_FUNC + static void run(const InputDims& input_dims, + const array::value>& reduced, + OutputDims* output_dims, ReducedDims* reduced_dims) { + const int NumInputDims = internal::array_size::value; + int outputIndex = 0; + int reduceIndex = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (reduced[i]) { + (*reduced_dims)[reduceIndex] = input_dims[i]; + ++reduceIndex; + } else { + (*output_dims)[outputIndex] = input_dims[i]; + ++outputIndex; + } + } + } +}; + +template <> struct DimInitializer > { + template EIGEN_DEVICE_FUNC + static void run(const InputDims& input_dims, const array&, + Sizes<>*, array* reduced_dims) { + const int NumInputDims = internal::array_size::value; + for (int i = 0; i < NumInputDims; ++i) { + (*reduced_dims)[i] = input_dims[i]; + } + } +}; + + +template +struct are_inner_most_dims { + static const bool value = false; +}; +template +struct preserve_inner_most_dims { + static const bool value = false; +}; + +#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES +template +struct are_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_eq(0, 0); + static const bool tmp3 = index_statically_eq(array_size::value-1, array_size::value-1); + static const bool value = tmp1 & tmp2 & tmp3; +}; +template +struct are_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_eq(0, NumTensorDims - array_size::value); + static const bool tmp3 = index_statically_eq(array_size::value - 1, NumTensorDims - 1); + static const bool value = tmp1 & tmp2 & tmp3; + +}; +template +struct preserve_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_gt(0, 0); + static const bool value = tmp1 & tmp2; + +}; +template +struct preserve_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_lt(array_size::value - 1, NumTensorDims - 1); + static const bool value = tmp1 & tmp2; +}; +#endif + + +template +struct GenericDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) { + EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + for (int j = 0; j < self.m_reducedDims[DimIndex]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex]; + GenericDimReducer::reduce(self, input, reducer, accum); + } + } +}; +template +struct GenericDimReducer<0, Self, Op> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) { + for (int j = 0; j < self.m_reducedDims[0]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0]; + reducer.reduce(self.m_impl.coeff(input), accum); + } + } +}; +template +struct GenericDimReducer<-1, Self, Op> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index index, Op& reducer, typename Self::CoeffReturnType* accum) { + reducer.reduce(self.m_impl.coeff(index), accum); + } +}; + +template +struct InnerMostDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) { + typename Self::CoeffReturnType accum = reducer.initialize(); + for (typename Self::Index j = 0; j < numValuesToReduce; ++j) { + reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); + } + return reducer.finalize(accum); + } +}; + +template +struct InnerMostDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) { + const int packetSize = internal::unpacket_traits::size; + const typename Self::Index VectorizedSize = (numValuesToReduce / packetSize) * packetSize; + typename Self::PacketReturnType p = reducer.template initializePacket(); + for (typename Self::Index j = 0; j < VectorizedSize; j += packetSize) { + reducer.reducePacket(self.m_impl.template packet(firstIndex + j), &p); + } + typename Self::CoeffReturnType accum = reducer.initialize(); + for (typename Self::Index j = VectorizedSize; j < numValuesToReduce; ++j) { + reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); + } + return reducer.finalizeBoth(accum, p); + } +}; + +template +struct InnerMostDimPreserver { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) { + eigen_assert(false && "should never be called"); + } +}; + +template +struct InnerMostDimPreserver { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) { + EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + for (typename Self::Index j = 0; j < self.m_reducedDims[DimIndex]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex]; + InnerMostDimPreserver::reduce(self, input, reducer, accum); + } + } +}; + +template +struct InnerMostDimPreserver<0, Self, Op, true> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) { + for (typename Self::Index j = 0; j < self.m_reducedDims[0]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0]; + reducer.reducePacket(self.m_impl.template packet(input), accum); + } + } +}; +template +struct InnerMostDimPreserver<-1, Self, Op, true> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) { + eigen_assert(false && "should never be called"); + } +}; + +// Default full reducer +template +struct FullReducer { + static const bool HasOptimizedImplementation = false; + + static EIGEN_DEVICE_FUNC void run(const Self& self, Op& reducer, const Device&, typename Self::CoeffReturnType* output) { + const typename Self::Index num_coeffs = array_prod(self.m_impl.dimensions()); + *output = InnerMostDimReducer::reduce(self, 0, num_coeffs, reducer); + } +}; + + +#ifdef EIGEN_USE_THREADS +// Multithreaded full reducers +template +struct FullReducerShard { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(const Self& self, typename Self::Index firstIndex, + typename Self::Index numValuesToReduce, Op& reducer, + typename Self::CoeffReturnType* output) { + *output = InnerMostDimReducer::reduce( + self, firstIndex, numValuesToReduce, reducer); + } +}; + +// Multithreaded full reducer +template +struct FullReducer { + static const bool HasOptimizedImplementation = !Op::IsStateful; + static const int PacketSize = + unpacket_traits::size; + + // launch one reducer per thread and accumulate the result. + static void run(const Self& self, Op& reducer, const ThreadPoolDevice& device, + typename Self::CoeffReturnType* output) { + typedef typename Self::Index Index; + const Index num_coeffs = array_prod(self.m_impl.dimensions()); + if (num_coeffs == 0) { + *output = reducer.finalize(reducer.initialize()); + return; + } + const TensorOpCost cost = + self.m_impl.costPerCoeff(Vectorizable) + + TensorOpCost(0, 0, internal::functor_traits::Cost, Vectorizable, + PacketSize); + const int num_threads = TensorCostModel::numThreads( + num_coeffs, cost, device.numThreads()); + if (num_threads == 1) { + *output = + InnerMostDimReducer::reduce(self, 0, num_coeffs, reducer); + return; + } + const Index blocksize = + std::floor(static_cast(num_coeffs) / num_threads); + const Index numblocks = blocksize > 0 ? num_coeffs / blocksize : 0; + eigen_assert(num_coeffs >= numblocks * blocksize); + + Barrier barrier(internal::convert_index(numblocks)); + MaxSizeVector shards(numblocks, reducer.initialize()); + for (Index i = 0; i < numblocks; ++i) { + device.enqueue_with_barrier(&barrier, &FullReducerShard::run, + self, i * blocksize, blocksize, reducer, + &shards[i]); + } + typename Self::CoeffReturnType finalShard; + if (numblocks * blocksize < num_coeffs) { + finalShard = InnerMostDimReducer::reduce( + self, numblocks * blocksize, num_coeffs - numblocks * blocksize, + reducer); + } else { + finalShard = reducer.initialize(); + } + barrier.Wait(); + + for (Index i = 0; i < numblocks; ++i) { + reducer.reduce(shards[i], &finalShard); + } + *output = reducer.finalize(finalShard); + } +}; + +#endif + + +// Default inner reducer +template +struct InnerReducer { + static const bool HasOptimizedImplementation = false; + + EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { + eigen_assert(false && "Not implemented"); + return true; + } +}; + +// Default outer reducer +template +struct OuterReducer { + static const bool HasOptimizedImplementation = false; + + EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { + eigen_assert(false && "Not implemented"); + return true; + } +}; + + +#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) +template +__global__ void FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*); + + +#ifdef EIGEN_HAS_CUDA_FP16 +template +__global__ void ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*); +template +__global__ void FullReductionKernelHalfFloat(R, const S, I, half*, half2*); +template +__global__ void InnerReductionKernelHalfFloat(R, const S, I, I, half*); + +#endif + +template +__global__ void InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*); + +template +__global__ void OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*); +#endif + +} // end namespace internal + + +template class MakePointer_> +class TensorReductionOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReductionOp(const XprType& expr, const Dims& dims) : m_expr(expr), m_dims(dims) + { } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReductionOp(const XprType& expr, const Dims& dims, const Op& reducer) : m_expr(expr), m_dims(dims), m_reducer(reducer) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const XprType& expression() const { return m_expr; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Dims& dims() const { return m_dims; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Op& reducer() const { return m_reducer; } + + protected: + typename XprType::Nested m_expr; + const Dims m_dims; + const Op m_reducer; +}; + + +// Eval as rvalue +template class MakePointer_, typename Device> +struct TensorEvaluator, Device> +{ + typedef TensorReductionOp XprType; + typedef typename XprType::Index Index; + typedef ArgType ChildType; + typedef typename TensorEvaluator::Dimensions InputDimensions; + static const int NumInputDims = internal::array_size::value; + static const int NumReducedDims = internal::array_size::value; + static const int NumOutputDims = NumInputDims - NumReducedDims; + typedef typename internal::conditional, DSizes >::type Dimensions; + typedef typename XprType::Scalar Scalar; + typedef TensorEvaluator, Device> Self; + static const bool InputPacketAccess = TensorEvaluator::PacketAccess; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + + enum { + IsAligned = false, + PacketAccess = Self::InputPacketAccess && Op::PacketAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + static const bool ReducingInnerMostDims = internal::are_inner_most_dims::value; + static const bool PreservingInnerMostDims = internal::preserve_inner_most_dims::value; + static const bool RunningFullReduction = (NumOutputDims==0); + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device), m_xpr_dims(op.dims()) + { + EIGEN_STATIC_ASSERT((NumInputDims >= NumReducedDims), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((!ReducingInnerMostDims | !PreservingInnerMostDims | (NumReducedDims == NumInputDims)), + YOU_MADE_A_PROGRAMMING_MISTAKE); + + // Build the bitmap indicating if an input dimension is reduced or not. + for (int i = 0; i < NumInputDims; ++i) { + m_reduced[i] = false; + } + for (int i = 0; i < NumReducedDims; ++i) { + eigen_assert(op.dims()[i] >= 0); + eigen_assert(op.dims()[i] < NumInputDims); + m_reduced[op.dims()[i]] = true; + } + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + internal::DimInitializer::run(input_dims, m_reduced, &m_dimensions, &m_reducedDims); + + // Precompute output strides. + if (NumOutputDims > 0) { + if (static_cast(Layout) == static_cast(ColMajor)) { + m_outputStrides[0] = 1; + for (int i = 1; i < NumOutputDims; ++i) { + m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1]; + } + } else { + m_outputStrides.back() = 1; + for (int i = NumOutputDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1]; + } + } + } + + // Precompute input strides. + if (NumInputDims > 0) { + array input_strides; + if (static_cast(Layout) == static_cast(ColMajor)) { + input_strides[0] = 1; + for (int i = 1; i < NumInputDims; ++i) { + input_strides[i] = input_strides[i-1] * input_dims[i-1]; + } + } else { + input_strides.back() = 1; + for (int i = NumInputDims - 2; i >= 0; --i) { + input_strides[i] = input_strides[i + 1] * input_dims[i + 1]; + } + } + + int outputIndex = 0; + int reduceIndex = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (m_reduced[i]) { + m_reducedStrides[reduceIndex] = input_strides[i]; + ++reduceIndex; + } else { + m_preservedStrides[outputIndex] = input_strides[i]; + ++outputIndex; + } + } + } + + // Special case for full reductions + if (NumOutputDims == 0) { + m_preservedStrides[0] = internal::array_prod(input_dims); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(typename MakePointer_::Type data) { + m_impl.evalSubExprsIfNeeded(NULL); + + // Use the FullReducer if possible. + if ((RunningFullReduction && RunningOnSycl) ||(RunningFullReduction && + internal::FullReducer::HasOptimizedImplementation && + ((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) || + !RunningOnGPU))) { + bool need_assign = false; + if (!data) { + m_result = static_cast(m_device.allocate(sizeof(CoeffReturnType))); + data = m_result; + need_assign = true; + } + Op reducer(m_reducer); + internal::FullReducer::run(*this, reducer, m_device, data); + return need_assign; + } + else if(RunningOnSycl){ + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + if (!data) { + data = static_cast(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve)); + m_result = data; + } + Op reducer(m_reducer); + internal::InnerReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve); + return (m_result != NULL); + } + + // Attempt to use an optimized reduction. + else if (RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) { + bool reducing_inner_dims = true; + for (int i = 0; i < NumReducedDims; ++i) { + if (static_cast(Layout) == static_cast(ColMajor)) { + reducing_inner_dims &= m_reduced[i]; + } else { + reducing_inner_dims &= m_reduced[NumInputDims - 1 - i]; + } + } + if (internal::InnerReducer::HasOptimizedImplementation && + (reducing_inner_dims || ReducingInnerMostDims)) { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + if (!data) { + if (num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 128) { + data = static_cast(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve)); + m_result = data; + } + else { + return true; + } + } + Op reducer(m_reducer); + if (internal::InnerReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) { + if (m_result) { + m_device.deallocate(m_result); + m_result = NULL; + } + return true; + } else { + return (m_result != NULL); + } + } + + bool preserving_inner_dims = true; + for (int i = 0; i < NumReducedDims; ++i) { + if (static_cast(Layout) == static_cast(ColMajor)) { + preserving_inner_dims &= m_reduced[NumInputDims - 1 - i]; + } else { + preserving_inner_dims &= m_reduced[i]; + } + } + if (internal::OuterReducer::HasOptimizedImplementation && + preserving_inner_dims) { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + if (!data) { + if (num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 32) { + data = static_cast(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve)); + m_result = data; + } + else { + return true; + } + } + Op reducer(m_reducer); + if (internal::OuterReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) { + if (m_result) { + m_device.deallocate(m_result); + m_result = NULL; + } + return true; + } else { + return (m_result != NULL); + } + } + } + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + if (m_result) { + m_device.deallocate(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + if ((RunningOnSycl || RunningFullReduction || RunningOnGPU) && m_result) { + return *(m_result + index); + } + Op reducer(m_reducer); + if (ReducingInnerMostDims || RunningFullReduction) { + const Index num_values_to_reduce = + (static_cast(Layout) == static_cast(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumPreservedStrides - 1]; + return internal::InnerMostDimReducer::reduce(*this, firstInput(index), + num_values_to_reduce, reducer); + } else { + typename Self::CoeffReturnType accum = reducer.initialize(); + internal::GenericDimReducer::reduce(*this, firstInput(index), reducer, &accum); + return reducer.finalize(accum); + } + } + + // TODO(bsteiner): provide a more efficient implementation. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + PacketSize - 1 < Index(internal::array_prod(dimensions()))); + + if (RunningOnGPU && m_result) { + return internal::pload(m_result + index); + } + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + if (ReducingInnerMostDims) { + const Index num_values_to_reduce = + (static_cast(Layout) == static_cast(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumPreservedStrides - 1]; + const Index firstIndex = firstInput(index); + for (Index i = 0; i < PacketSize; ++i) { + Op reducer(m_reducer); + values[i] = internal::InnerMostDimReducer::reduce(*this, firstIndex + i * num_values_to_reduce, + num_values_to_reduce, reducer); + } + } else if (PreservingInnerMostDims) { + const Index firstIndex = firstInput(index); + const int innermost_dim = (static_cast(Layout) == static_cast(ColMajor)) ? 0 : NumOutputDims - 1; + // TBD: extend this the the n innermost dimensions that we preserve. + if (((firstIndex % m_dimensions[innermost_dim]) + PacketSize - 1) < m_dimensions[innermost_dim]) { + Op reducer(m_reducer); + typename Self::PacketReturnType accum = reducer.template initializePacket(); + internal::InnerMostDimPreserver::reduce(*this, firstIndex, reducer, &accum); + return reducer.finalizePacket(accum); + } else { + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index + i); + } + } + } else { + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index + i); + } + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + // Must be called after evalSubExprsIfNeeded(). + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + if (RunningFullReduction && m_result) { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } else { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const double compute_cost = num_values_to_reduce * internal::functor_traits::Cost; + return m_impl.costPerCoeff(vectorized) * num_values_to_reduce + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + } + + EIGEN_DEVICE_FUNC typename MakePointer_::Type data() const { return m_result; } + /// required by sycl in order to extract the accessor + const TensorEvaluator& impl() const { return m_impl; } + /// added for sycl in order to construct the buffer from the sycl device + const Device& device() const{return m_device;} + /// added for sycl in order to re-construct the reduction eval on the device for the sub-kernel + const Dims& xprDims() const {return m_xpr_dims;} + + + private: + template friend struct internal::GenericDimReducer; + template friend struct internal::InnerMostDimReducer; + template friend struct internal::InnerMostDimPreserver; + template friend struct internal::FullReducer; +#ifdef EIGEN_USE_THREADS + template friend struct internal::FullReducerShard; +#endif +#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) + template friend void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*); +#ifdef EIGEN_HAS_CUDA_FP16 + template friend void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*); + template friend void internal::FullReductionKernelHalfFloat(R, const S, I, half*, half2*); + template friend void internal::InnerReductionKernelHalfFloat(R, const S, I, I, half*); +#endif + template friend void internal::InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*); + + template friend void internal::OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*); +#endif + + template friend struct internal::InnerReducer; + + // Returns the Index in the input tensor of the first value that needs to be + // used to compute the reduction at output index "index". + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const { + if (ReducingInnerMostDims) { + if (static_cast(Layout) == static_cast(ColMajor)) { + return index * m_preservedStrides[0]; + } else { + return index * m_preservedStrides[NumPreservedStrides - 1]; + } + } + // TBD: optimize the case where we preserve the innermost dimensions. + Index startInput = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumOutputDims - 1; i > 0; --i) { + // This is index_i in the output tensor. + const Index idx = index / m_outputStrides[i]; + startInput += idx * m_preservedStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (PreservingInnerMostDims) { + eigen_assert(m_preservedStrides[0] == 1); + startInput += index; + } else { + startInput += index * m_preservedStrides[0]; + } + } else { + for (int i = 0; i < NumOutputDims - 1; ++i) { + // This is index_i in the output tensor. + const Index idx = index / m_outputStrides[i]; + startInput += idx * m_preservedStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (PreservingInnerMostDims) { + eigen_assert(m_preservedStrides[NumPreservedStrides - 1] == 1); + startInput += index; + } else { + startInput += index * m_preservedStrides[NumPreservedStrides - 1]; + } + } + return startInput; + } + + // Bitmap indicating if an input dimension is reduced or not. + array m_reduced; + // Dimensions of the output of the operation. + Dimensions m_dimensions; + // Precomputed strides for the output tensor. + array m_outputStrides; + // Subset of strides of the input tensor for the non-reduced dimensions. + // Indexed by output dimensions. + static const int NumPreservedStrides = max_n_1::size; + array m_preservedStrides; + + // Subset of strides of the input tensor for the reduced dimensions. + // Indexed by reduced dimensions. + array m_reducedStrides; + // Size of the input dimensions that are reduced. + // Indexed by reduced dimensions. + array m_reducedDims; + + // Evaluator for the input expression. + TensorEvaluator m_impl; + + // Operation to apply for computing the reduction. + Op m_reducer; + + // For full reductions +#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) + static const bool RunningOnGPU = internal::is_same::value; + static const bool RunningOnSycl = false; +#elif defined(EIGEN_USE_SYCL) +static const bool RunningOnSycl = internal::is_same::type, Eigen::SyclDevice>::value; +static const bool RunningOnGPU = false; +#else + static const bool RunningOnGPU = false; + static const bool RunningOnSycl = false; +#endif + typename MakePointer_::Type m_result; + + const Device& m_device; + const Dims& m_xpr_dims; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H diff --git a/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h new file mode 100644 index 000000000..65638b6a8 --- /dev/null +++ b/src/reference/fancy_poc/housekeeper/src/housekeeper_client/housekeeper_ai/housekeeper/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h @@ -0,0 +1,750 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H +#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H + +namespace Eigen { +namespace internal { + + +#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) +// Full reducers for GPU, don't vectorize for now + +// Reducer function that enables multiple cuda thread to safely accumulate at the same +// output address. It basically reads the current value of the output variable, and +// attempts to update it with the new value. If in the meantime another cuda thread +// updated the content of the output address it will try again. +template +__device__ EIGEN_ALWAYS_INLINE void atomicReduce(T* output, T accum, R& reducer) { +#if __CUDA_ARCH__ >= 300 + if (sizeof(T) == 4) + { + unsigned int oldval = *reinterpret_cast(output); + unsigned int newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + unsigned int readback; + while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) { + oldval = readback; + newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + } + } + else if (sizeof(T) == 8) { + unsigned long long oldval = *reinterpret_cast(output); + unsigned long long newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + unsigned long long readback; + while ((readback = atomicCAS((unsigned long long*)output, oldval, newval)) != oldval) { + oldval = readback; + newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + } + } + else { + assert(0 && "Wordsize not supported"); + } +#else + assert(0 && "Shouldn't be called on unsupported device"); +#endif +} + +// We extend atomicExch to support extra data types +template +__device__ inline Type atomicExchCustom(Type* address, Type val) { + return atomicExch(address, val); +} + +template <> +__device__ inline double atomicExchCustom(double* address, double val) { + unsigned long long int* address_as_ull = reinterpret_cast(address); + return __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(val))); +} + +#ifdef EIGEN_HAS_CUDA_FP16 +template