forked from CannyLab/tsne-cuda
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmeta.yaml
54 lines (48 loc) · 1.43 KB
/
meta.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
{% set name = "tsnecuda" %}
{% set version = "2.1.0" %}
package:
name: '{{ name|lower }}'
version: '{{ version }}'
source:
git_url: https://github.com/CannyLab/tsne-cuda.git
git_rev: HEAD
build:
features:
- '{{ cuda_version }}'
string: '{{ cuda_version }}'
noarch_python: True
requirements:
build:
# - {{ compiler('cxx') }}
- python
- gcc_49
- cmake
- setuptools
host:
- python
- openblas
- setuptools
- numpy >=1.14.1
- libopenblas
- libgcc-ng
- libstdcxx-ng
run:
- python
- openblas
- numpy >=1.14.1
- libopenblas
- libgcc-ng
- libstdcxx-ng
about:
home: https://github.com/CannyLab/tsne-cuda
license: LICENSE.txt
license_family: BSD
license_file: ''
summary: CUDA Implementation of T-SNE with Python bindings
description: "===========\ntsnecuda\n===========\n\ntsnecuda provides an optimized CUDA implementation of the T-SNE algorithm by L Van der Maaten. tsnecuda is able to compute the T-SNE of large numbers\
\ of points up to 1200 times faster than other leading libraries, and provides simple python bindings with a SKLearn style interface::\n\n #!/usr/bin/env python\n\n from tsnecuda import TSNE\n\
\ embeddedX = TSNE(n_components=2).fit_transform(X)\n\nFor more information, check out the repository at https://github.com/rmrao/tsne-cuda. \n\n\n"
doc_url: ''
dev_url: ''
extra:
recipe-maintainers: ''