Skip to content

Commit

Permalink
DOC: pydata/pandas -> pandas-dev/pandas (pandas-dev#14409)
Browse files Browse the repository at this point in the history
  • Loading branch information
jreback authored and jorisvandenbossche committed Oct 13, 2016
1 parent a40e185 commit 7d40f18
Show file tree
Hide file tree
Showing 64 changed files with 118 additions and 118 deletions.
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/attrs_caching.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,4 @@ def setup(self):
self.cur_index = self.df.index

def time_setattr_dataframe_index(self):
self.df.index = self.cur_index
self.df.index = self.cur_index
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/ctors.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,4 +49,4 @@ def setup(self):
self.s = Series(([Timestamp('20110101'), Timestamp('20120101'), Timestamp('20130101')] * 1000))

def time_index_from_series_ctor(self):
Index(self.s)
Index(self.s)
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/frame_ctor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1703,4 +1703,4 @@ def setup(self):
self.dict_list = [dict(zip(self.columns, row)) for row in self.frame.values]

def time_series_ctor_from_dict(self):
Series(self.some_dict)
Series(self.some_dict)
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/hdfstore_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,4 +348,4 @@ def remove(self, f):
try:
os.remove(self.f)
except:
pass
pass
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/index_object.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,4 +344,4 @@ def setup(self):
self.mi = MultiIndex.from_product([self.level1, self.level2])

def time_multiindex_with_datetime_level_sliced(self):
self.mi[:10].values
self.mi[:10].values
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/io_sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,4 +212,4 @@ def setup(self):
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)

def time_sql_write_sqlalchemy(self):
self.df.to_sql('test1', self.engine, if_exists='replace')
self.df.to_sql('test1', self.engine, if_exists='replace')
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/panel_ctor.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,4 +61,4 @@ def setup(self):
self.data_frames[x] = self.df

def time_panel_from_dict_two_different_indexes(self):
Panel.from_dict(self.data_frames)
Panel.from_dict(self.data_frames)
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/panel_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ def setup(self):
self.panel = Panel(np.random.randn(100, len(self.index), 1000))

def time_panel_shift_minor(self):
self.panel.shift(1, axis='minor')
self.panel.shift(1, axis='minor')
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,4 @@ def setup(self):
self.ts = Series(np.random.randn(self.N), index=self.rng)

def time_replace_replacena(self):
self.ts.replace(np.nan, 0.0, inplace=True)
self.ts.replace(np.nan, 0.0, inplace=True)
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,4 +73,4 @@ def setup(self):
break

def time_unstack_sparse_keyspace(self):
self.idf.unstack()
self.idf.unstack()
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/stat_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,4 +258,4 @@ def time_rolling_skew(self):
rolling_skew(self.arr, self.win)

def time_rolling_kurt(self):
rolling_kurt(self.arr, self.win)
rolling_kurt(self.arr, self.win)
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/strings.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,4 +390,4 @@ def time_strings_upper(self):
self.many.str.upper()

def make_series(self, letters, strlen, size):
return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
4 changes: 2 additions & 2 deletions doc/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,9 @@ Where to start?
---------------

There are a number of issues listed under `Docs
<https://github.com/pydata/pandas/issues?labels=Docs&sort=updated&state=open>`_
<https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_
and `Good as first PR
<https://github.com/pydata/pandas/issues?labels=Good+as+first+PR&sort=updated&state=open>`_
<https://github.com/pandas-dev/pandas/issues?labels=Good+as+first+PR&sort=updated&state=open>`_
where you could start out.

Or maybe you have an idea of your own, by using pandas, looking for something
Expand Down
2 changes: 1 addition & 1 deletion doc/_templates/autosummary/accessor_attribute.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@

.. currentmodule:: {{ module.split('.')[0] }}

.. autoaccessorattribute:: {{ [module.split('.')[1], objname]|join('.') }}
.. autoaccessorattribute:: {{ [module.split('.')[1], objname]|join('.') }}
2 changes: 1 addition & 1 deletion doc/_templates/autosummary/accessor_method.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@

.. currentmodule:: {{ module.split('.')[0] }}

.. autoaccessormethod:: {{ [module.split('.')[1], objname]|join('.') }}
.. autoaccessormethod:: {{ [module.split('.')[1], objname]|join('.') }}
2 changes: 1 addition & 1 deletion doc/source/categorical.rst
Original file line number Diff line number Diff line change
Expand Up @@ -973,7 +973,7 @@ are not numeric data (even in the case that ``.categories`` is numeric).
print("TypeError: " + str(e))
.. note::
If such a function works, please file a bug at https://github.com/pydata/pandas!
If such a function works, please file a bug at https://github.com/pandas-dev/pandas!

dtype in apply
~~~~~~~~~~~~~~
Expand Down
4 changes: 2 additions & 2 deletions doc/source/comparison_with_sas.rst
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ Reading External Data

Like SAS, pandas provides utilities for reading in data from
many formats. The ``tips`` dataset, found within the pandas
tests (`csv <https://raw.github.com/pydata/pandas/master/pandas/tests/data/tips.csv>`_)
tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv>`_)
will be used in many of the following examples.

SAS provides ``PROC IMPORT`` to read csv data into a data set.
Expand All @@ -131,7 +131,7 @@ The pandas method is :func:`read_csv`, which works similarly.

.. ipython:: python
url = 'https://raw.github.com/pydata/pandas/master/pandas/tests/data/tips.csv'
url = 'https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv'
tips = pd.read_csv(url)
tips.head()
Expand Down
2 changes: 1 addition & 1 deletion doc/source/comparison_with_sql.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ structure.

.. ipython:: python
url = 'https://raw.github.com/pydata/pandas/master/pandas/tests/data/tips.csv'
url = 'https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv'
tips = pd.read_csv(url)
tips.head()
Expand Down
8 changes: 4 additions & 4 deletions doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,9 +301,9 @@
autosummary_generate = glob.glob("*.rst")

# extlinks alias
extlinks = {'issue': ('https://github.com/pydata/pandas/issues/%s',
extlinks = {'issue': ('https://github.com/pandas-dev/pandas/issues/%s',
'GH'),
'wiki': ('https://github.com/pydata/pandas/wiki/%s',
'wiki': ('https://github.com/pandas-dev/pandas/wiki/%s',
'wiki ')}

ipython_exec_lines = [
Expand Down Expand Up @@ -468,10 +468,10 @@ def linkcode_resolve(domain, info):
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))

if '+' in pandas.__version__:
return "http://github.com/pydata/pandas/blob/master/pandas/%s%s" % (
return "http://github.com/pandas-dev/pandas/blob/master/pandas/%s%s" % (
fn, linespec)
else:
return "http://github.com/pydata/pandas/blob/v%s/pandas/%s%s" % (
return "http://github.com/pandas-dev/pandas/blob/v%s/pandas/%s%s" % (
pandas.__version__, fn, linespec)


Expand Down
30 changes: 15 additions & 15 deletions doc/source/contributing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,11 @@ All contributions, bug reports, bug fixes, documentation improvements,
enhancements and ideas are welcome.

If you are simply looking to start working with the *pandas* codebase, navigate to the
`GitHub "issues" tab <https://github.com/pydata/pandas/issues>`_ and start looking through
`GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ and start looking through
interesting issues. There are a number of issues listed under `Docs
<https://github.com/pydata/pandas/issues?labels=Docs&sort=updated&state=open>`_
<https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_
and `Difficulty Novice
<https://github.com/pydata/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_
<https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_
where you could start out.

Or maybe through using *pandas* you have an idea of your own or are looking for something
Expand All @@ -27,7 +27,7 @@ about it!

Feel free to ask questions on the `mailing list
<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_ or on `Gitter
<https://gitter.im/pydata/pandas>`_.
<https://gitter.im/pandas-dev/pandas>`_.

Bug reports and enhancement requests
====================================
Expand Down Expand Up @@ -79,7 +79,7 @@ It can very quickly become overwhelming, but sticking to the guidelines below wi
straightforward and mostly trouble free. As always, if you are having difficulties please
feel free to ask for help.

The code is hosted on `GitHub <https://www.github.com/pydata/pandas>`_. To
The code is hosted on `GitHub <https://www.github.com/pandas-dev/pandas>`_. To
contribute you will need to sign up for a `free GitHub account
<https://github.com/signup/free>`_. We use `Git <http://git-scm.com/>`_ for
version control to allow many people to work together on the project.
Expand All @@ -103,12 +103,12 @@ Forking
-------

You will need your own fork to work on the code. Go to the `pandas project
page <https://github.com/pydata/pandas>`_ and hit the ``Fork`` button. You will
page <https://github.com/pandas-dev/pandas>`_ and hit the ``Fork`` button. You will
want to clone your fork to your machine::

git clone git@github.com:your-user-name/pandas.git pandas-yourname
cd pandas-yourname
git remote add upstream git://github.com/pydata/pandas.git
git remote add upstream git://github.com/pandas-dev/pandas.git

This creates the directory `pandas-yourname` and connects your repository to
the upstream (main project) *pandas* repository.
Expand Down Expand Up @@ -467,7 +467,7 @@ and make these changes with::
pep8radius master --diff --in-place

Additional standards are outlined on the `code style wiki
page <https://github.com/pydata/pandas/wiki/Code-Style-and-Conventions>`_.
page <https://github.com/pandas-dev/pandas/wiki/Code-Style-and-Conventions>`_.

Please try to maintain backward compatibility. *pandas* has lots of users with lots of
existing code, so don't break it if at all possible. If you think breakage is required,
Expand Down Expand Up @@ -501,7 +501,7 @@ All tests should go into the ``tests`` subdirectory of the specific package.
This folder contains many current examples of tests, and we suggest looking to these for
inspiration. If your test requires working with files or
network connectivity, there is more information on the `testing page
<https://github.com/pydata/pandas/wiki/Testing>`_ of the wiki.
<https://github.com/pandas-dev/pandas/wiki/Testing>`_ of the wiki.

The ``pandas.util.testing`` module has many special ``assert`` functions that
make it easier to make statements about whether Series or DataFrame objects are
Expand Down Expand Up @@ -639,7 +639,7 @@ on Travis-CI. The first step is to create a `service account
Integration tests for ``pandas.io.gbq`` are skipped in pull requests because
the credentials that are required for running Google BigQuery integration
tests are `encrypted <https://docs.travis-ci.com/user/encrypting-files/>`__
on Travis-CI and are only accessible from the pydata/pandas repository. The
on Travis-CI and are only accessible from the pandas-dev/pandas repository. The
credentials won't be available on forks of pandas. Here are the steps to run
gbq integration tests on a forked repository:

Expand Down Expand Up @@ -688,7 +688,7 @@ performance regressions.

You can run specific benchmarks using the ``-r`` flag, which takes a regular expression.

See the `performance testing wiki <https://github.com/pydata/pandas/wiki/Performance-Testing>`_ for information
See the `performance testing wiki <https://github.com/pandas-dev/pandas/wiki/Performance-Testing>`_ for information
on how to write a benchmark.

Documenting your code
Expand All @@ -712,8 +712,8 @@ directive is used. The sphinx syntax for that is:
This will put the text *New in version 0.17.0* wherever you put the sphinx
directive. This should also be put in the docstring when adding a new function
or method (`example <https://github.com/pydata/pandas/blob/v0.16.2/pandas/core/generic.py#L1959>`__)
or a new keyword argument (`example <https://github.com/pydata/pandas/blob/v0.16.2/pandas/core/frame.py#L1171>`__).
or method (`example <https://github.com/pandas-dev/pandas/blob/v0.16.2/pandas/core/generic.py#L1959>`__)
or a new keyword argument (`example <https://github.com/pandas-dev/pandas/blob/v0.16.2/pandas/core/frame.py#L1171>`__).

Contributing your changes to *pandas*
=====================================
Expand Down Expand Up @@ -806,8 +806,8 @@ like::

origin git@github.com:yourname/pandas.git (fetch)
origin git@github.com:yourname/pandas.git (push)
upstream git://github.com/pydata/pandas.git (fetch)
upstream git://github.com/pydata/pandas.git (push)
upstream git://github.com/pandas-dev/pandas.git (fetch)
upstream git://github.com/pandas-dev/pandas.git (push)

Now your code is on GitHub, but it is not yet a part of the *pandas* project. For that to
happen, a pull request needs to be submitted on GitHub.
Expand Down
14 changes: 7 additions & 7 deletions doc/source/cookbook.rst
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ The :ref:`indexing <indexing>` docs.
df[(df.AAA <= 6) & (df.index.isin([0,2,4]))]
`Use loc for label-oriented slicing and iloc positional slicing
<https://github.com/pydata/pandas/issues/2904>`__
<https://github.com/pandas-dev/pandas/issues/2904>`__

.. ipython:: python
Expand Down Expand Up @@ -410,7 +410,7 @@ Sorting
df.sort_values(by=('Labs', 'II'), ascending=False)
`Partial Selection, the need for sortedness;
<https://github.com/pydata/pandas/issues/2995>`__
<https://github.com/pandas-dev/pandas/issues/2995>`__

Levels
******
Expand Down Expand Up @@ -787,7 +787,7 @@ The :ref:`Resample <timeseries.resampling>` docs.
<http://stackoverflow.com/questions/14569223/timegrouper-pandas>`__

`Using TimeGrouper and another grouping to create subgroups, then apply a custom function
<https://github.com/pydata/pandas/issues/3791>`__
<https://github.com/pandas-dev/pandas/issues/3791>`__

`Resampling with custom periods
<http://stackoverflow.com/questions/15408156/resampling-with-custom-periods>`__
Expand Down Expand Up @@ -823,7 +823,7 @@ ignore_index is needed in pandas < v0.13, and depending on df construction
df = df1.append(df2,ignore_index=True); df
`Self Join of a DataFrame
<https://github.com/pydata/pandas/issues/2996>`__
<https://github.com/pandas-dev/pandas/issues/2996>`__

.. ipython:: python
Expand Down Expand Up @@ -936,7 +936,7 @@ using that handle to read.
<http://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize>`__

`Dealing with bad lines
<http://github.com/pydata/pandas/issues/2886>`__
<http://github.com/pandas-dev/pandas/issues/2886>`__

`Dealing with bad lines II
<http://nipunbatra.github.io/2013/06/reading-unclean-data-csv-using-pandas/>`__
Expand Down Expand Up @@ -1075,7 +1075,7 @@ The :ref:`HDFStores <io.hdf5>` docs
<http://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__

`Managing heterogeneous data using a linked multiple table hierarchy
<http://github.com/pydata/pandas/issues/3032>`__
<http://github.com/pandas-dev/pandas/issues/3032>`__

`Merging on-disk tables with millions of rows
<http://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925>`__
Expand Down Expand Up @@ -1216,7 +1216,7 @@ Timedeltas
The :ref:`Timedeltas <timedeltas.timedeltas>` docs.

`Using timedeltas
<http://github.com/pydata/pandas/pull/2899>`__
<http://github.com/pandas-dev/pandas/pull/2899>`__

.. ipython:: python
Expand Down
2 changes: 1 addition & 1 deletion doc/source/ecosystem.rst
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ both "column wise min/max and global min/max coloring."
API
-----

`pandas-datareader <https://github.com/pydata/pandas-datareader>`__
`pandas-datareader <https://github.com/pandas-dev/pandas-datareader>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``pandas-datareader`` is a remote data access library for pandas. ``pandas.io`` from pandas < 0.17.0 is now refactored/split-off to and importable from ``pandas_datareader`` (PyPI:``pandas-datareader``). Many/most of the supported APIs have at least a documentation paragraph in the `pandas-datareader docs <https://pandas-datareader.readthedocs.io/en/latest/>`_:

Expand Down
2 changes: 1 addition & 1 deletion doc/source/gotchas.rst
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,7 @@ This is because ``reindex_like`` silently inserts ``NaNs`` and the ``dtype``
changes accordingly. This can cause some issues when using ``numpy`` ``ufuncs``
such as ``numpy.logical_and``.

See the `this old issue <https://github.com/pydata/pandas/issues/2388>`__ for a more
See the `this old issue <https://github.com/pandas-dev/pandas/issues/2388>`__ for a more
detailed discussion.

Parsing Dates from Text Files
Expand Down
2 changes: 1 addition & 1 deletion doc/source/install.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ This is the recommended installation method for most users.

Instructions for installing from source,
`PyPI <http://pypi.python.org/pypi/pandas>`__, various Linux distributions, or a
`development version <http://github.com/pydata/pandas>`__ are also provided.
`development version <http://github.com/pandas-dev/pandas>`__ are also provided.

Python version support
----------------------
Expand Down
2 changes: 1 addition & 1 deletion doc/source/io.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2035,7 +2035,7 @@ You can even pass in an instance of ``StringIO`` if you so desire
that having so many network-accessing functions slows down the documentation
build. If you spot an error or an example that doesn't run, please do not
hesitate to report it over on `pandas GitHub issues page
<http://www.github.com/pydata/pandas/issues>`__.
<http://www.github.com/pandas-dev/pandas/issues>`__.


Read a URL and match a table that contains specific text
Expand Down
4 changes: 2 additions & 2 deletions doc/source/overview.rst
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ Getting Support
---------------

The first stop for pandas issues and ideas is the `Github Issue Tracker
<https://github.com/pydata/pandas/issues>`__. If you have a general question,
<https://github.com/pandas-dev/pandas/issues>`__. If you have a general question,
pandas community experts can answer through `Stack Overflow
<http://stackoverflow.com/questions/tagged/pandas>`__.

Expand All @@ -103,7 +103,7 @@ training, and consulting for pandas.

pandas is only made possible by a group of people around the world like you
who have contributed new code, bug reports, fixes, comments and ideas. A
complete list can be found `on Github <http://www.github.com/pydata/pandas/contributors>`__.
complete list can be found `on Github <http://www.github.com/pandas-dev/pandas/contributors>`__.

Development Team
----------------
Expand Down
Loading

0 comments on commit 7d40f18

Please sign in to comment.