Repository: d6t/d6tstack
Branch: master
Commit: a0924bd7d63b
Files: 44
Total size: 444.9 KB
Directory structure:
gitextract_bcekc6b6/
├── .gitignore
├── LICENSE
├── MANIFEST.in
├── README.md
├── d6tstack/
│ ├── __init__.py
│ ├── combine_csv.py
│ ├── convert_xls.py
│ ├── helpers.py
│ ├── pyftp_final.py
│ ├── sniffer.py
│ ├── sync.py
│ └── utils.py
├── docs/
│ ├── Makefile
│ ├── make.bat
│ ├── make_zip_sample_csv.py
│ ├── make_zip_sample_xls.py
│ ├── shell-napoleon-html.sh
│ ├── shell-napoleon-recreate.sh
│ └── source/
│ ├── conf.py
│ ├── d6tstack.rst
│ ├── index.rst
│ ├── modules.rst
│ ├── setup.rst
│ └── tests.rst
├── examples-csv.ipynb
├── examples-dask.ipynb
├── examples-excel.ipynb
├── examples-pyspark.ipynb
├── examples-read-write.ipynb
├── examples-sql.ipynb
├── requirements-dev.txt
├── requirements.txt
├── setup.cfg
├── setup.py
└── tests/
├── __init__.py
├── pypi.sh
├── test-parquet.py
├── test_combine_csv.py
├── test_combine_old.py
├── test_sync.py
├── test_xls.py
├── tmp-reindex-withorder.py
├── tmp-runtest.py
└── tmp.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
tests/.test-cred.yaml
.idea/
.env
temp/
fiddle*
.pytest_cache/
test-data/output/
# add this manually
test-data/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
.static_storage/
.media/
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
# pypi config file
.pypirc
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2018 Databolt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: MANIFEST.in
================================================
include README.md
include LICENSE
================================================
FILE: README.md
================================================
# Databolt File Ingest
Quickly ingest raw files. Works for XLS, CSV, TXT which can be exported to CSV, Parquet, SQL and Pandas. `d6tstack` solves many performance and schema problems typically encountered when ingesting raw files.

### Features include
* Fast pd.to_sql() for postgres and mysql
* Quickly check columns for consistency across files
* Fix added/missing columns
* Fix renamed columns
* Check Excel tabs for consistency across files
* Excel to CSV converter (incl multi-sheet support)
* Out of core functionality to process large files
* Export to CSV, parquet, SQL, pandas dataframe
## Installation
Latest published version `pip install d6tstack`. Additional requirements:
* `d6tstack[psql]`: for pandas to postgres
* `d6tstack[mysql]`: for pandas to mysql
* `d6tstack[xls]`: for excel support
* `d6tstack[parquet]`: for ingest csv to parquet
Latest dev version from github `pip install git+https://github.com/d6t/d6tstack.git`
### Sample Use
```
import d6tstack
# fast CSV to SQL import - see SQL examples notebook
d6tstack.utils.pd_to_psql(df, 'postgresql+psycopg2://usr:pwd@localhost/db', 'tablename')
d6tstack.utils.pd_to_mysql(df, 'mysql+mysqlconnector://usr:pwd@localhost/db', 'tablename')
d6tstack.utils.pd_to_mssql(df, 'mssql+pymssql://usr:pwd@localhost/db', 'tablename') # experimental
# ingest mutiple CSVs which may have data schema changes - see CSV examples notebook
import glob
>>> c = d6tstack.combine_csv.CombinerCSV(glob.glob('data/*.csv'))
# show columns of each file
>>> c.columns()
# quick check if all files have consistent columns
>>> c.is_all_equal()
False
# show which files have missing columns
>>> c.is_column_present()
filename cost date profit profit2 sales
0 feb.csv True True True False True
2 mar.csv True True True True True
>>> c.combine_preview() # keep all columns
filename cost date profit profit2 sales
0 jan.csv -80 2011-01-01 20 NaN 100
0 mar.csv -100 2011-03-01 200 400 300
>>> d6tstack.combine_csv.CombinerCSV(glob.glob('*.csv'), columns_select_common=True).combine_preview() # keep common columns
filename cost date profit sales
0 jan.csv -80 2011-01-01 20 100
0 mar.csv -100 2011-03-01 200 300
>>> d6tstack.combine_csv.CombinerCSV(glob.glob('*.csv'), columns_rename={'sales':'revenue'}).combine_preview()
filename cost date profit profit2 revenue
0 jan.csv -80 2011-01-01 20 NaN 100
0 mar.csv -100 2011-03-01 200 400 300
# to come: check if columns match database
>>> c.is_columns_match_db('postgresql+psycopg2://usr:pwd@localhost/db', 'tablename')
# create csv with first nrows_preview rows of each file
>>> c.to_csv_head()
# export to csv, parquet, sql. Out of core with optimized fast imports for postgres and mysql
>>> c.to_pandas()
>>> c.to_csv_align(output_dir='process/')
>>> c.to_parquet_align(output_dir='process/')
>>> c.to_sql_combine('postgresql+psycopg2://usr:pwd@localhost/db', 'tablename')
>>> c.to_psql_combine('postgresql+psycopg2://usr:pwd@localhost/db', 'tablename') # fast, using COPY FROM
>>> c.to_mysql_combine('mysql+mysqlconnector://usr:pwd@localhost/db', 'tablename') # fast, using LOAD DATA LOCAL INFILE
# read Excel files - see Excel examples notebook for more details
import d6tstack.convert_xls
d6tstack.convert_xls.read_excel_advanced('test.xls',
sheet_name='Sheet1', header_xls_range="B2:E2")
d6tstack.convert_xls.XLStoCSVMultiSheet('test.xls').convert_all(header_xls_range="B2:E2")
d6tstack.convert_xls.XLStoCSVMultiFile(glob.glob('*.xls'),
cfg_xls_sheets_sel_mode='name_global',cfg_xls_sheets_sel='Sheet1')
.convert_all(header_xls_range="B2:E2")
```
## Documentation
* [SQL examples notebook](https://github.com/d6t/d6tstack/blob/master/examples-sql.ipynb) - Fast loading of CSV to SQL with pandas preprocessing
* [CSV examples notebook](https://github.com/d6t/d6tstack/blob/master/examples-csv.ipynb) - Quickly load any type of CSV files
* [Excel examples notebook](https://github.com/d6t/d6tstack/blob/master/examples-excel.ipynb) - Quickly extract from Excel to CSV
* [Dask Examples notebook](https://github.com/d6t/d6tstack/blob/master/examples-dask.ipynb) - How to use d6tstack to solve Dask input file problems
* [Pyspark Examples notebook](https://github.com/d6t/d6tstack/blob/master/examples-pyspark.ipynb) - How to use d6tstack to solve pyspark input file problems
* [Function reference docs](http://d6tstack.readthedocs.io/en/latest/py-modindex.html) - Detailed documentation for modules, classes, functions
## Faster Data Engineering
Check out other d6t libraries to solve common data engineering problems, including
* data worfklows,build highly effective data science workflows
* fuzzy joins, quickly join data
* data pipes, quickly share and distribute data
https://github.com/d6t/d6t-python
And we encourage you to join the Databolt blog to get updates and tips+tricks http://blog.databolt.tech
## Collecting Errors Messages and Usage statistics
We have put a lot of effort into making this library useful to you. To help us make this library even better, it collects ANONYMOUS error messages and usage statistics. See [d6tcollect](https://github.com/d6t/d6tcollect) for details including how to disable collection. Collection is asynchronous and doesn't impact your code in any way.
It may not catch all errors so if you run into any problems or have any questions, please raise an issue on github.
================================================
FILE: d6tstack/__init__.py
================================================
import d6tstack.combine_csv
#import d6tstack.convert_xls
import d6tstack.sniffer
#import d6tstack.sync
import d6tstack.utils
================================================
FILE: d6tstack/combine_csv.py
================================================
import numpy as np
import pandas as pd
pd.set_option('display.expand_frame_repr', False)
from scipy.stats import mode
import warnings
import ntpath, pathlib
import copy
import itertools
import os
import d6tcollect
# d6tcollect.init(__name__)
from .helpers import *
from .utils import PrintLogger
# ******************************************************************
# helpers
# ******************************************************************
def _dfconact(df):
return pd.concat(itertools.chain.from_iterable(df), sort=False, copy=False, join='inner', ignore_index=True)
def _direxists(fname, logger):
fdir = os.path.dirname(fname)
if fdir and not os.path.exists(fdir):
if logger:
logger.send_log('creating ' + fdir, 'ok')
os.makedirs(fdir)
return True
# ******************************************************************
# combiner
# ******************************************************************
class CombinerCSV(object, metaclass=d6tcollect.Collect):
"""
Core combiner class. Sniffs columns, generates preview, combines aka stacks to various output formats.
Args:
fname_list (list): file names, eg ['a.csv','b.csv']
sep (string): CSV delimiter, see pandas.read_csv()
has_header (boolean): data has header row
nrows_preview (int): number of rows in preview
chunksize (int): number of rows to read into memory while processing, see pandas.read_csv()
read_csv_params (dict): additional parameters to pass to pandas.read_csv()
columns_select (list): list of column names to keep
columns_select_common (bool): keep only common columns. Use this instead of `columns_select`
columns_rename (dict): dict of columns to rename `{'name_old':'name_new'}
add_filename (bool): add filename column to output data frame. If `False`, will not add column.
apply_after_read (function): function to apply after reading each file. needs to return a dataframe
log (bool): send logs to logger
logger (object): logger object with `send_log()`
"""
def __init__(self, fname_list, sep=',', nrows_preview=3, chunksize=1e6, read_csv_params=None,
columns_select=None, columns_select_common=False, columns_rename=None, add_filename=True,
apply_after_read=None, log=True, logger=None):
if not fname_list:
raise ValueError("Filename list should not be empty")
self.fname_list = np.sort(fname_list)
self.nrows_preview = nrows_preview
self.read_csv_params = read_csv_params
if not self.read_csv_params:
self.read_csv_params = {}
if not 'sep' in self.read_csv_params:
self.read_csv_params['sep'] = sep
if not 'chunksize' in self.read_csv_params:
self.read_csv_params['chunksize'] = chunksize
self.logger = logger
if not logger and log:
self.logger = PrintLogger()
if not log:
self.logger = None
self.sniff_results = None
self.add_filename = add_filename
self.columns_select = columns_select
self.columns_select_common = columns_select_common
if columns_select and columns_select_common:
warnings.warn('columns_select will override columns_select_common, pick either one')
self.columns_rename = columns_rename
self._columns_reindex = None
self._columns_rename_dict = None
self.apply_after_read = apply_after_read
self.df_combine_preview = None
if self.columns_select:
if max(collections.Counter(columns_select).values())>1:
raise ValueError('Duplicate entries in columns_select')
def _read_csv_yield(self, fname, read_csv_params):
self._columns_reindex_available()
dfs = pd.read_csv(fname, **read_csv_params)
for dfc in dfs:
if self.columns_rename and self._columns_rename_dict[fname]:
dfc = dfc.rename(columns=self._columns_rename_dict[fname])
dfc = dfc.reindex(columns=self._columns_reindex)
if self.apply_after_read:
dfc = self.apply_after_read(dfc)
if self.add_filename:
dfc['filepath'] = fname
dfc['filename'] = ntpath.basename(fname)
yield dfc
def sniff_columns(self):
"""
Checks column consistency by reading top nrows in all files. It checks both presence and order of columns in all files
Returns:
dict: results dictionary with
files_columns (dict): dictionary with information, keys = filename, value = list of columns in file
columns_all (list): all columns in files
columns_common (list): only columns present in every file
is_all_equal (boolean): all files equal in all files?
df_columns_present (dataframe): which columns are present in which file?
df_columns_order (dataframe): where in the file is the column?
"""
if self.logger:
self.logger.send_log('sniffing columns', 'ok')
read_csv_params = copy.deepcopy(self.read_csv_params)
read_csv_params['dtype'] = str
read_csv_params['nrows'] = self.nrows_preview
read_csv_params['chunksize'] = None
# read nrows of every file
self.dfl_all = []
for fname in self.fname_list:
# todo: make sure no nrows param in self.read_csv_params
df = pd.read_csv(fname, **read_csv_params)
self.dfl_all.append(df)
# process columns
dfl_all_col = [df.columns.tolist() for df in self.dfl_all]
col_files = dict(zip(self.fname_list, dfl_all_col))
col_common = list_common(list(col_files.values()))
col_all = list_unique(list(col_files.values()))
# find index in column list so can check order is correct
df_col_present = {}
for iFileName, iFileCol in col_files.items():
df_col_present[iFileName] = [iCol in iFileCol for iCol in col_all]
df_col_present = pd.DataFrame(df_col_present, index=col_all).T
df_col_present.index.names = ['file_path']
# find index in column list so can check order is correct
df_col_idx = {}
for iFileName, iFileCol in col_files.items():
df_col_idx[iFileName] = [iFileCol.index(iCol) if iCol in iFileCol else np.nan for iCol in col_all]
df_col_idx = pd.DataFrame(df_col_idx, index=col_all).T
# order columns by where they appear in file
m=mode(df_col_idx,axis=0)
df_col_pos = pd.DataFrame({'o':m[0][0],'c':m[1][0]},index=df_col_idx.columns)
df_col_pos = df_col_pos.sort_values(['o','c'])
df_col_pos['iscommon']=df_col_pos.index.isin(col_common)
# reorder by position
col_all = df_col_pos.index.values.tolist()
col_common = df_col_pos[df_col_pos['iscommon']].index.values.tolist()
col_unique = df_col_pos[~df_col_pos['iscommon']].index.values.tolist()
df_col_present = df_col_present[col_all]
df_col_idx = df_col_idx[col_all]
sniff_results = {'files_columns': col_files, 'columns_all': col_all, 'columns_common': col_common,
'columns_unique': col_unique, 'is_all_equal': columns_all_equal(dfl_all_col),
'df_columns_present': df_col_present, 'df_columns_order': df_col_idx}
self.sniff_results = sniff_results
return sniff_results
def get_sniff_results(self):
if not self.sniff_results:
self.sniff_columns()
return self.sniff_results
def _sniff_available(self):
if not self.sniff_results:
self.sniff_columns()
def is_all_equal(self):
"""
Checks if all columns are equal in all files
Returns:
bool: all columns are equal in all files?
"""
self._sniff_available()
return self.sniff_results['is_all_equal']
def is_column_present(self):
"""
Shows which columns are present in which files
Returns:
dataframe: boolean values for column presence in each file
"""
self._sniff_available()
return self.sniff_results['df_columns_present']
def is_column_present_unique(self):
"""
Shows unique columns by file
Returns:
dataframe: boolean values for column presence in each file
"""
self._sniff_available()
return self.is_column_present()[self.sniff_results['columns_unique']]
def columns_unique(self):
"""
Shows unique columns by file
Returns:
dataframe: boolean values for column presence in each file
"""
self.columns_unique()
def is_column_present_common(self):
"""
Shows common columns by file
Returns:
dataframe: boolean values for column presence in each file
"""
self._sniff_available()
return self.is_column_present()[self.sniff_results['columns_common']]
def columns_common(self):
"""
Shows common columns by file
Returns:
dataframe: boolean values for column presence in each file
"""
return self.is_column_present_common()
def columns(self):
"""
Shows columns by file
Returns:
dict: filename, columns
"""
self._sniff_available()
return self.sniff_results['files_columns']
def head(self):
"""
Shows preview rows for each file
Returns:
dict: filename, dataframe
"""
self._sniff_available()
return dict(zip(self.fname_list,self.dfl_all))
def _columns_reindex_prep(self):
self._sniff_available()
self._columns_select_dict = {} # select columns by filename
self._columns_rename_dict = {} # rename columns by filename
for fname in self.fname_list:
if self.columns_rename:
columns_rename = self.columns_rename.copy()
# check no naming conflicts
columns_select2 = [columns_rename[k] if k in columns_rename.keys() else k for k in self.sniff_results['files_columns'][fname]]
df_rename_count = collections.Counter(columns_select2)
if df_rename_count and max(df_rename_count.values()) > 1: # would the rename create naming conflict?
warnings.warn('Renaming conflict: {}'.format([(k, v) for k, v in df_rename_count.items() if v > 1]),
UserWarning)
while df_rename_count and max(df_rename_count.values()) > 1:
# remove key value pair causing conflict
conflicting_keys = [i for i, j in df_rename_count.items() if j > 1]
columns_rename = {k: v for k, v in columns_rename.items() if k in conflicting_keys}
columns_select2 = [columns_rename[k] if k in columns_rename.keys() else k for k in
self.sniff_results['files_columns'][fname]]
df_rename_count = collections.Counter(columns_select2)
# store rename by file. keep only renames for columns actually present in file
self._columns_rename_dict[fname] = dict((k,v) for k,v in columns_rename.items() if k in k in self.sniff_results['files_columns'][fname])
if self.columns_select:
columns_select2 = self.columns_select.copy()
else:
if self.columns_select_common:
columns_select2 = self.sniff_results['columns_common'].copy()
else:
columns_select2 = self.sniff_results['columns_all'].copy()
if self.columns_rename:
columns_select2 = list(dict.fromkeys([columns_rename[k] if k in columns_rename.keys() else k for k in columns_select2])) # set of columns after rename
# store select by file
self._columns_reindex = columns_select2
def _columns_reindex_available(self):
if not self._columns_rename_dict or not self._columns_reindex:
self._columns_reindex_prep()
def preview_rename(self):
"""
Shows which columns will be renamed in processing
Returns:
dataframe: columns to be renamed from which file
"""
self._columns_reindex_available()
df = pd.DataFrame(self._columns_rename_dict).T
return df
def preview_select(self):
"""
Shows which columns will be selected in processing
Returns:
list: columns to be selected from all files
"""
self._columns_reindex_available()
return self._columns_reindex
def combine_preview(self):
"""
Preview of what the combined data will look like
Returns:
dataframe: combined dataframe
"""
read_csv_params = copy.deepcopy(self.read_csv_params)
read_csv_params['nrows'] = self.nrows_preview
df = [[dfc for dfc in self._read_csv_yield(fname, read_csv_params)] for fname in self.fname_list]
df = _dfconact(df)
self.df_combine_preview = df.copy()
return df
def _combine_preview_available(self):
if self.df_combine_preview is None:
self.combine_preview()
def to_pandas(self):
"""
Combine all files to a pandas dataframe
Returns:
dataframe: combined data
"""
df = [[dfc for dfc in self._read_csv_yield(fname, self.read_csv_params)] for fname in self.fname_list]
df = _dfconact(df)
return df
def _get_filepath_out(self, fname, output_dir, output_prefix, ext):
# filename
fname_out = ntpath.basename(fname)
fname_out = os.path.splitext(fname_out)[0]
fname_out = output_prefix + fname_out + ext
# path
output_dir = output_dir if output_dir else os.path.dirname(fname)
fpath_out = os.path.join(output_dir, fname_out)
assert _direxists(fpath_out, self.logger)
return fpath_out
def _to_csv_prep(self, write_params):
if 'index' not in write_params:
write_params['index'] = False
write_params.pop('header', None) # library handles
self._combine_preview_available()
return write_params
def to_csv_head(self, output_dir=None, write_params={}):
"""
Save `nrows_preview` header rows as individual files
Args:
output_dir (str): directory to save files in. If not given save in the same directory as the original file
write_params (dict): additional params to pass to `pandas.to_csv()`
Returns:
list: list of filenames of processed files
"""
write_params = self._to_csv_prep(write_params)
fnamesout = []
for fname, dfg in dict(zip(self.fname_list,self.dfl_all)).items():
filename = f'{fname}-head.csv'
filename = filename if output_dir is None else str(pathlib.Path(output_dir)/filename)
dfg.to_csv(filename, **write_params)
fnamesout.append(filename)
return fnamesout
def to_csv_align(self, output_dir=None, output_prefix='d6tstack-', write_params={}):
"""
Create cleaned versions of original files. Automatically runs out of core, using `self.chunksize`.
Args:
output_dir (str): directory to save files in. If not given save in the same directory as the original file
output_prefix (str): prepend with prefix to distinguish from original files
write_params (dict): additional params to pass to `pandas.to_csv()`
Returns:
list: list of filenames of processed files
"""
# stream all chunks to multiple files
write_params = self._to_csv_prep(write_params)
fnamesout = []
for fname in self.fname_list:
filename = self._get_filepath_out(fname, output_dir, output_prefix, '.csv')
if self.logger:
self.logger.send_log('writing '+filename , 'ok')
fhandle = open(filename, 'w')
self.df_combine_preview[:0].to_csv(fhandle, **write_params)
for dfc in self._read_csv_yield(fname, self.read_csv_params):
dfc.to_csv(fhandle, header=False, **write_params)
fhandle.close()
fnamesout.append(filename)
return fnamesout
def to_csv_combine(self, filename, write_params={}):
"""
Combines all files to a single csv file. Automatically runs out of core, using `self.chunksize`.
Args:
filename (str): file names
write_params (dict): additional params to pass to `pandas.to_csv()`
Returns:
str: filename for combined data
"""
# stream all chunks from all files to a single file
write_params = self._to_csv_prep(write_params)
assert _direxists(filename, self.logger)
fhandle = open(filename, 'w')
self.df_combine_preview[:0].to_csv(fhandle, **write_params)
for fname in self.fname_list:
for dfc in self._read_csv_yield(fname, self.read_csv_params):
dfc.to_csv(fhandle, header=False, **write_params)
fhandle.close()
return filename
def to_parquet_align(self, output_dir=None, output_prefix='d6tstack-', write_params={}):
"""
Same as `to_csv_align` but outputs parquet files
"""
# write_params for pyarrow.parquet.write_table
# stream all chunks to multiple files
self._combine_preview_available()
import pyarrow as pa
import pyarrow.parquet as pq
fnamesout = []
pqschema = pa.Table.from_pandas(self.df_combine_preview).schema
for fname in self.fname_list:
filename = self._get_filepath_out(fname, output_dir, output_prefix, '.pq')
if self.logger:
self.logger.send_log('writing '+filename , 'ok')
pqwriter = pq.ParquetWriter(filename, pqschema)
for dfc in self._read_csv_yield(fname, self.read_csv_params):
pqwriter.write_table(pa.Table.from_pandas(dfc.astype(self.df_combine_preview.dtypes), schema=pqschema),**write_params)
pqwriter.close()
fnamesout.append(filename)
return fnamesout
def to_parquet_combine(self, filename, write_params={}):
"""
Same as `to_csv_combine` but outputs parquet files
"""
# stream all chunks from all files to a single file
self._combine_preview_available()
assert _direxists(filename, self.logger)
import pyarrow as pa
import pyarrow.parquet as pq
# todo: fix mixed data type writing. at least give a warning
pqwriter = pq.ParquetWriter(filename, pa.Table.from_pandas(self.df_combine_preview).schema)
for fname in self.fname_list:
for dfc in self._read_csv_yield(fname, self.read_csv_params):
pqwriter.write_table(pa.Table.from_pandas(dfc.astype(self.df_combine_preview.dtypes)),**write_params)
pqwriter.close()
return filename
def to_sql_combine(self, uri, tablename, if_exists='fail', write_params=None, return_create_sql=False):
"""
Load all files into a sql table using sqlalchemy. Generic but slower than the optmized functions
Args:
uri (str): sqlalchemy database uri
tablename (str): table to store data in
if_exists (str): {‘fail’, ‘replace’, ‘append’}, default ‘fail’. See `pandas.to_sql()` for details
write_params (dict): additional params to pass to `pandas.to_sql()`
return_create_sql (dict): show create sql statement for combined file schema. Doesn't run data load
Returns:
bool: True if loader finished
"""
if not write_params:
write_params = {}
if 'if_exists' not in write_params:
write_params['if_exists'] = if_exists
if 'index' not in write_params:
write_params['index'] = False
self._combine_preview_available()
if 'mysql' in uri and not 'mysql+pymysql' in uri:
raise ValueError('need to use pymysql for mysql (pip install pymysql)')
import sqlalchemy
sql_engine = sqlalchemy.create_engine(uri)
# create table
dfhead = self.df_combine_preview.astype(self.df_combine_preview.dtypes)[:0]
if return_create_sql:
return pd.io.sql.get_schema(dfhead, tablename).replace('"',"`")
dfhead.to_sql(tablename, sql_engine, **write_params)
# append data
write_params['if_exists'] = 'append'
for fname in self.fname_list:
for dfc in self._read_csv_yield(fname, self.read_csv_params):
dfc.astype(self.df_combine_preview.dtypes).to_sql(tablename, sql_engine, **write_params)
return True
def to_psql_combine(self, uri, table_name, if_exists='fail', sep=','):
"""
Load all files into a sql table using native postgres COPY FROM. Chunks data load to reduce memory consumption
Args:
uri (str): postgres psycopg2 sqlalchemy database uri
table_name (str): table to store data in
if_exists (str): {‘fail’, ‘replace’, ‘append’}, default ‘fail’. See `pandas.to_sql()` for details
sep (str): separator for temp file, eg ',' or '\t'
Returns:
bool: True if loader finished
"""
if not 'psycopg2' in uri:
raise ValueError('need to use psycopg2 uri')
self._combine_preview_available()
import sqlalchemy
import io
sql_engine = sqlalchemy.create_engine(uri)
sql_cnxn = sql_engine.raw_connection()
cursor = sql_cnxn.cursor()
self.df_combine_preview[:0].to_sql(table_name, sql_engine, if_exists=if_exists, index=False)
for fname in self.fname_list:
for dfc in self._read_csv_yield(fname, self.read_csv_params):
fbuf = io.StringIO()
dfc.astype(self.df_combine_preview.dtypes).to_csv(fbuf, index=False, header=False, sep=sep)
fbuf.seek(0)
cursor.copy_from(fbuf, table_name, sep=sep, null='')
sql_cnxn.commit()
cursor.close()
return True
def to_mysql_combine(self, uri, table_name, if_exists='fail', tmpfile='mysql.csv', sep=','):
"""
Load all files into a sql table using native postgres LOAD DATA LOCAL INFILE. Chunks data load to reduce memory consumption
Args:
uri (str): mysql mysqlconnector sqlalchemy database uri
table_name (str): table to store data in
if_exists (str): {‘fail’, ‘replace’, ‘append’}, default ‘fail’. See `pandas.to_sql()` for details
tmpfile (str): filename for temporary file to load from
sep (str): separator for temp file, eg ',' or '\t'
Returns:
bool: True if loader finished
"""
if not 'mysql+mysqlconnector' in uri:
raise ValueError('need to use mysql+mysqlconnector uri (pip install mysql-connector)')
self._combine_preview_available()
import sqlalchemy
sql_engine = sqlalchemy.create_engine(uri)
self.df_combine_preview[:0].to_sql(table_name, sql_engine, if_exists=if_exists, index=False)
if self.logger:
self.logger.send_log('creating ' + tmpfile, 'ok')
self.to_csv_combine(tmpfile, write_params={'na_rep':'\\N','sep':sep})
if self.logger:
self.logger.send_log('loading ' + tmpfile, 'ok')
sql_load = "LOAD DATA LOCAL INFILE '{}' INTO TABLE {} FIELDS TERMINATED BY '{}' IGNORE 1 LINES;".format(tmpfile, table_name, sep)
sql_engine.execute(sql_load)
os.remove(tmpfile)
return True
def to_mssql_combine(self, uri, table_name, schema_name=None, if_exists='fail', tmpfile='mysql.csv'):
"""
Load all files into a sql table using native postgres LOAD DATA LOCAL INFILE. Chunks data load to reduce memory consumption
Args:
uri (str): mysql mysqlconnector sqlalchemy database uri
table_name (str): table to store data in
schema_name (str): name of schema to write to
if_exists (str): {‘fail’, ‘replace’, ‘append’}, default ‘fail’. See `pandas.to_sql()` for details
tmpfile (str): filename for temporary file to load from
Returns:
bool: True if loader finished
"""
if not 'mssql+pymssql' in uri:
raise ValueError('need to use mssql+pymssql uri (conda install -c prometeia pymssql)')
self._combine_preview_available()
import sqlalchemy
sql_engine = sqlalchemy.create_engine(uri)
self.df_combine_preview[:0].to_sql(table_name, sql_engine, schema=schema_name, if_exists=if_exists, index=False)
if self.logger:
self.logger.send_log('creating ' + tmpfile, 'ok')
self.to_csv_combine(tmpfile, write_params={'na_rep':'\\N'})
if self.logger:
self.logger.send_log('loading ' + tmpfile, 'ok')
if schema_name is not None:
table_name = '{}.{}'.format(schema_name,table_name)
sql_load = "BULK INSERT {} FROM '{}';".format()(table_name, tmpfile)
sql_engine.execute(sql_load)
os.remove(tmpfile)
return True
# todo: ever need to rerun _available fct instead of using cache?
================================================
FILE: d6tstack/convert_xls.py
================================================
import warnings
import os.path
import numpy as np
import pandas as pd
import ntpath
import openpyxl
import xlrd
try:
from openpyxl.utils.cell import coordinate_from_string
except:
from openpyxl.utils import coordinate_from_string
from d6tstack.helpers import compare_pandas_versions, check_valid_xls
import d6tcollect
# d6tcollect.init(__name__)
#******************************************************************
# read_excel_advanced
#******************************************************************
def read_excel_advanced(fname, remove_blank_cols=True, remove_blank_rows=True, collapse_header=True,
header_xls_range=None, header_xls_start=None, header_xls_end=None,
is_preview=False, nrows_preview=3, **kwds):
"""
Read Excel files to pandas dataframe with advanced options like set header ranges and remove blank columns and rows
Args:
fname (str): Excel file path
remove_blank_cols (bool): remove blank columns
remove_blank_rows (bool): remove blank rows
collapse_header (bool): to convert multiline header to a single line string
header_xls_range (string): range of headers in excel, eg: A4:B16
header_xls_start (string): Starting cell of excel for header range, eg: A4
header_xls_end (string): End cell of excel for header range, eg: B16
is_preview (bool): Read only first `nrows_preview` lines
nrows_preview (integer): Initial number of rows to be used for preview columns (default: 3)
kwds (mixed): parameters for `pandas.read_excel()` to pass through
Returns:
df (dataframe): pandas dataframe
Note:
You can pass in any `pandas.read_excel()` parameters in particular `sheet_name`
"""
header = []
if header_xls_range:
if not (header_xls_start and header_xls_end):
header_xls_range = header_xls_range.split(':')
header_xls_start, header_xls_end = header_xls_range
else:
raise ValueError('Parameter conflict. Can only pass header_xls_range or header_xls_start with header_xls_end')
if header_xls_start and header_xls_end:
if 'skiprows' in kwds or 'usecols' in kwds:
raise ValueError('Parameter conflict. Cannot pass skiprows or usecols with header_xls')
scol, srow = coordinate_from_string(header_xls_start)
ecol, erow = coordinate_from_string(header_xls_end)
# header, skiprows, usecols
header = list(range(erow - srow + 1))
usecols = scol + ":" + ecol
skiprows = srow - 1
if compare_pandas_versions(pd.__version__, "0.20.3") > 0:
df = pd.read_excel(fname, header=header, skiprows=skiprows, usecols=usecols, **kwds)
else:
df = pd.read_excel(fname, header=header, skiprows=skiprows, parse_cols=usecols, **kwds)
else:
df = pd.read_excel(fname, **kwds)
# remove blank cols and rows
if remove_blank_cols:
df = df.dropna(axis='columns', how='all')
if remove_blank_rows:
df = df.dropna(axis='rows', how='all')
# todo: add df.reset_index() once no actual data in index
# clean up header
if collapse_header:
if len(header) > 1:
df.columns = [' '.join([s for s in col if not 'Unnamed' in s]).strip().replace("\n", ' ')
for col in df.columns.values]
df = df.reset_index()
else:
df.rename(columns=lambda x: x.strip().replace("\n", ' '), inplace=True)
# preview
if is_preview:
df = df.head(nrows_preview)
return df
#******************************************************************
# XLSSniffer
#******************************************************************
class XLSSniffer(object, metaclass=d6tcollect.Collect):
"""
Extracts available sheets from MULTIPLE Excel files and runs diagnostics
Args:
fname_list (list): file paths, eg ['dir/a.csv','dir/b.csv']
logger (object): logger object with send_log(), optional
"""
def __init__(self, fname_list, logger=None):
if not fname_list:
raise ValueError("Filename list should not be empty")
self.fname_list = fname_list
self.logger = logger
check_valid_xls(self.fname_list)
self.sniff()
def sniff(self):
"""
Executes sniffer
Returns:
boolean: True if everything ok. Results are accessible in ``.df_xls_sheets``
"""
xls_sheets = {}
for fname in self.fname_list:
if self.logger:
self.logger.send_log('sniffing sheets in '+ntpath.basename(fname),'ok')
xls_fname = {}
xls_fname['file_name'] = ntpath.basename(fname)
if fname[-5:]=='.xlsx':
fh = openpyxl.load_workbook(fname,read_only=True)
xls_fname['sheets_names'] = fh.sheetnames
fh.close()
# todo: need to close file?
elif fname[-4:]=='.xls':
fh = xlrd.open_workbook(fname, on_demand=True)
xls_fname['sheets_names'] = fh.sheet_names()
fh.release_resources()
else:
raise IOError('Only .xls or .xlsx files can be combined')
xls_fname['sheets_count'] = len(xls_fname['sheets_names'])
xls_fname['sheets_idx'] = np.arange(xls_fname['sheets_count']).tolist()
xls_sheets[fname] = xls_fname
self.xls_sheets = xls_sheets
df_xls_sheets = pd.DataFrame(xls_sheets).T
df_xls_sheets.index.names = ['file_path']
self.dict_xls_sheets = xls_sheets
self.df_xls_sheets = df_xls_sheets
return True
def all_contain_sheetname(self,sheet_name):
"""
Check if all files contain a certain sheet
Args:
sheet_name (string): sheetname to check
Returns:
boolean: If true
"""
return np.all([sheet_name in self.dict_xls_sheets[fname]['sheets_names'] for fname in self.fname_list])
def all_have_idx(self,sheet_idx):
"""
Check if all files contain a certain index
Args:
sheet_idx (string): index to check
Returns:
boolean: If true
"""
return np.all([sheet_idx<=(d['sheets_count']-1) for k,d in self.dict_xls_sheets.items()])
def all_same_count(self):
"""
Check if all files contain the same number of sheets
Args:
sheet_idx (string): index to check
Returns:
boolean: If true
"""
first_elem = next(iter(self.dict_xls_sheets.values()))
return np.all([first_elem['sheets_count']==d['sheets_count'] for k,d in self.dict_xls_sheets.items()])
def all_same_names(self):
first_elem = next(iter(self.dict_xls_sheets.values()))
return np.all([first_elem['sheets_names']==d['sheets_names'] for k,d in self.dict_xls_sheets.items()])
#******************************************************************
# convertor
#******************************************************************
class XLStoBase(object, metaclass=d6tcollect.Collect):
def __init__(self, if_exists='skip', output_dir=None, logger=None):
"""
Base class for converting Excel files
Args:
if_exists (str): Possible values: skip and replace, default: skip, optional
output_dir (str): If present, file is saved in given directory, optional
logger (object): logger object with send_log('msg','status'), optional
"""
if if_exists not in ['skip', 'replace']:
raise ValueError("Possible value of 'if_exists' are 'skip' and 'replace'")
self.logger = logger
self.if_exists = if_exists
self.output_dir = output_dir
if self.output_dir:
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def _get_output_filename(self, fname):
if self.output_dir:
basename = os.path.basename(fname)
fname_out = os.path.join(self.output_dir, basename)
else:
fname_out = fname
is_skip = (self.if_exists == 'skip' and os.path.isfile(fname_out))
return fname_out, is_skip
def convert_single(self, fname, sheet_name, **kwds):
"""
Converts single file
Args:
fname: path to file
sheet_name (str): optional sheet_name to override global `cfg_xls_sheets_sel`
Same as `d6tstack.utils.read_excel_advanced()`
Returns:
list: output file names
"""
if self.logger:
msg = 'converting file: '+ntpath.basename(fname)+' | sheet: '
if hasattr(self, 'cfg_xls_sheets_sel'):
msg += str(self.cfg_xls_sheets_sel[fname])
self.logger.send_log(msg,'ok')
fname_out = fname + '-' + str(sheet_name) + '.csv'
fname_out, is_skip = self._get_output_filename(fname_out)
if not is_skip:
df = read_excel_advanced(fname, sheet_name=sheet_name, **kwds)
df.to_csv(fname_out, index=False)
else:
warnings.warn('File %s exists, skipping' %fname)
return fname_out
class XLStoCSVMultiFile(XLStoBase, metaclass=d6tcollect.Collect):
"""
Converts xls|xlsx files to csv files. Selects a SINGLE SHEET from each file. To extract MULTIPLE SHEETS from a file use XLStoCSVMultiSheet
Args:
fname_list (list): file paths, eg ['dir/a.csv','dir/b.csv']
cfg_xls_sheets_sel_mode (string): mode to select tabs
* ``name``: select by name, provide name for each file, can customize by file
* ``name_global``: select by name, one name for all files
* ``idx``: select by index, provide index for each file, can customize by file
* ``idx_global``: select by index, one index for all files
cfg_xls_sheets_sel (dict): values to select tabs `{'filename':'value'}`
output_dir (str): If present, file is saved in given directory, optional
if_exists (str): Possible values: skip and replace, default: skip, optional
logger (object): logger object with send_log('msg','status'), optional
"""
def __init__(self, fname_list, cfg_xls_sheets_sel_mode='idx_global', cfg_xls_sheets_sel=0,
output_dir=None, if_exists='skip', logger=None):
super().__init__(if_exists, output_dir, logger)
if not fname_list:
raise ValueError("Filename list should not be empty")
self.set_files(fname_list)
self.set_select_mode(cfg_xls_sheets_sel_mode, cfg_xls_sheets_sel)
def set_files(self, fname_list):
"""
Update input files. You will also need to update sheet selection with ``.set_select_mode()``.
Args:
fname_list (list): see class description for details
"""
self.fname_list = fname_list
self.xlsSniffer = XLSSniffer(fname_list)
def set_select_mode(self, cfg_xls_sheets_sel_mode, cfg_xls_sheets_sel):
"""
Update sheet selection values
Args:
cfg_xls_sheets_sel_mode (string): see class description for details
cfg_xls_sheets_sel (list): see class description for details
"""
assert cfg_xls_sheets_sel_mode in ['name','idx','name_global','idx_global']
sheets = self.xlsSniffer.dict_xls_sheets
if cfg_xls_sheets_sel_mode=='name_global':
cfg_xls_sheets_sel_mode = 'name'
cfg_xls_sheets_sel = dict(zip(self.fname_list,[cfg_xls_sheets_sel]*len(self.fname_list)))
elif cfg_xls_sheets_sel_mode=='idx_global':
cfg_xls_sheets_sel_mode = 'idx'
cfg_xls_sheets_sel = dict(zip(self.fname_list,[cfg_xls_sheets_sel]*len(self.fname_list)))
if not set(cfg_xls_sheets_sel.keys())==set(sheets.keys()):
raise ValueError('Need to select a sheet from every file')
# check given selection actually present in files
if cfg_xls_sheets_sel_mode=='name':
if not np.all([cfg_xls_sheets_sel[fname] in sheets[fname]['sheets_names'] for fname in self.fname_list]):
raise ValueError('Invalid sheet name selected in one of the files')
# todo show which file is mismatched
elif cfg_xls_sheets_sel_mode=='idx':
if not np.all([cfg_xls_sheets_sel[fname] <= sheets[fname]['sheets_count'] for fname in self.fname_list]):
raise ValueError('Invalid index selected in one of the files')
# todo show which file is mismatched
else:
raise ValueError('Invalid xls_sheets_mode')
self.cfg_xls_sheets_sel_mode = cfg_xls_sheets_sel_mode
self.cfg_xls_sheets_sel = cfg_xls_sheets_sel
def convert_all(self, **kwds):
"""
Converts all files
Args:
Any parameters for `d6tstack.utils.read_excel_advanced()`
Returns:
list: output file names
"""
fnames_converted = []
for fname in self.fname_list:
fname_out = self.convert_single(fname, self.cfg_xls_sheets_sel[fname], **kwds)
fnames_converted.append(fname_out)
return fnames_converted
class XLStoCSVMultiSheet(XLStoBase, metaclass=d6tcollect.Collect):
"""
Converts ALL SHEETS from a SINGLE xls|xlsx files to separate csv files
Args:
fname (string): file path
sheet_names (list): list of int or str. If not given, will convert all sheets in the file
output_dir (str): If present, file is saved in given directory, optional
if_exists (str): Possible values: skip and replace, default: skip, optional
logger (object): logger object with send_log('msg','status'), optional
"""
def __init__(self, fname, sheet_names=None, output_dir=None, if_exists='skip', logger=None):
super().__init__(if_exists, output_dir, logger)
self.fname = fname
if sheet_names:
if not isinstance(sheet_names, (list,str)):
raise ValueError('sheet_names needs to be a list')
self.sheet_names = sheet_names
else:
self.xlsSniffer = XLSSniffer([fname, ])
self.sheet_names = self.xlsSniffer.xls_sheets[self.fname]['sheets_names']
def convert_single(self, sheet_name, **kwds):
"""
Converts all files
Args:
sheet_name (str): Excel sheet
Any parameters for `d6tstack.utils.read_excel_advanced()`
Returns:
str: output file name
"""
return super().convert_single(self.fname, sheet_name, **kwds)
def convert_all(self, **kwds):
"""
Converts all files
Args:
Any parameters for `d6tstack.utils.read_excel_advanced()`
Returns:
list: output file names
"""
fnames_converted = []
for iSheet in self.sheet_names:
fname_out = self.convert_single(iSheet, **kwds)
fnames_converted.append(fname_out)
return fnames_converted
================================================
FILE: d6tstack/helpers.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module with several helper functions
"""
import os
import collections
import re
def file_extensions_get(fname_list):
"""Returns file extensions in list
Args:
fname_list (list): file names, eg ['a.csv','b.csv']
Returns:
list: file extensions for each file name in input list, eg ['.csv','.csv']
"""
return [os.path.splitext(fname)[-1] for fname in fname_list]
def file_extensions_all_equal(ext_list):
"""Checks that all file extensions are equal.
Args:
ext_list (list): file extensions, eg ['.csv','.csv']
Returns:
bool: all extensions are equal to first extension in list?
"""
return len(set(ext_list))==1
def file_extensions_contains_xls(ext_list):
# Assumes all file extensions are equal! Only checks first file
return ext_list[0] == '.xls'
def file_extensions_contains_xlsx(ext_list):
# Assumes all file extensions are equal! Only checks first file
return ext_list[0] == '.xlsx'
def file_extensions_contains_csv(ext_list):
# Assumes all file extensions are equal! Only checks first file
return (ext_list[0] == '.csv' or ext_list[0] == '.txt')
def file_extensions_valid(ext_list):
"""Checks if file list contains only valid files
Notes:
Assumes all file extensions are equal! Only checks first file
Args:
ext_list (list): file extensions, eg ['.csv','.csv']
Returns:
bool: first element in list is one of ['.csv','.txt','.xls','.xlsx']?
"""
ext_list_valid = ['.csv','.txt','.xls','.xlsx']
return ext_list[0] in ext_list_valid
def columns_all_equal(col_list):
"""Checks that all lists in col_list are equal.
Args:
col_list (list): columns, eg [['a','b'],['a','b','c']]
Returns:
bool: all lists in list are equal?
"""
return all([l==col_list[0] for l in col_list])
def list_common(_list, sort=True):
l = list(set.intersection(*[set(l) for l in _list]))
if sort:
return sorted(l)
else:
return l
def list_unique(_list, sort=True):
l = list(set.union(*[set(l) for l in _list]))
if sort:
return sorted(l)
else:
return l
def list_tofront(_list,val):
return _list.insert(0, _list.pop(_list.index(val)))
def cols_filename_tofront(_list):
return list_tofront(_list,'filename')
def df_filename_tofront(dfg):
cfg_col = dfg.columns.tolist()
return dfg[cols_filename_tofront(cfg_col)]
def check_valid_xls(fname_list):
ext_list = file_extensions_get(fname_list)
if not file_extensions_all_equal(ext_list):
raise IOError('All file types and extensions have to be equal')
if not(file_extensions_contains_xls(ext_list) or file_extensions_contains_xlsx(ext_list)):
raise IOError('Only .xls, .xlsx files can be processed')
return True
def compare_pandas_versions(version1, version2):
def cmp(a, b):
return (a > b) - (a < b)
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
================================================
FILE: d6tstack/pyftp_final.py
================================================
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import os
import ftputil
def get_ftp_files():
fileSetftp = set()
with ftputil.FTPHost(cfg_ftp_host, cfg_ftp_usr, cfg_ftp_pwd) as ftp_host:
ftp_host.use_list_a_option = False
for dir_, _, files in ftp_host.walk(cfg_dir_ftp):
for fileName in files:
relDir = os.path.relpath(dir_, cfg_dir_ftp)
relFile = os.path.join(relDir, fileName)
fileSetftp.add(relFile)
return fileSetftp
def upload_ftp_files_s3(ftp_files, s3_files, bucket):
files_ftp_sync = set(ftp_files).difference(s3_files)
with ftputil.FTPHost(cfg_ftp_host, cfg_ftp_usr, cfg_ftp_pwd) as ftp_host:
for ftp_file in files_ftp_sync:
full_name = cfg_dir_ftp + ftp_file
basename = os.path.basename(full_name)
temp_path = '/tmp/'+basename
ftp_host.download(full_name, temp_path)
with open(temp_path, 'rb') as f:
key = Key(bucket, ftp_file)
key.set_contents_from_file(f)
def list_s3_files(bucket):
s3_files = set()
for key in bucket.list():
s3_files.add(key.name.encode('utf-8'))
return s3_files
def upload_to_s3(bucket):
fname = '/home/anuj/Pictures/test/hp.jpg'
basename = os.path.basename(fname)
key = Key(bucket, basename)
with open(fname, 'rb') as f:
key.set_contents_from_file(f)
if __name__ == "__main__":
print("S3 File sync")
s3_id = ''
s3_key = ''
bucket_name = 'test-anuj-ftp-sync'
cfg_ftp_host = 'ftp.fic.com.tw'
cfg_ftp_usr = 'anonymous'
cfg_ftp_pwd = 'random'
cfg_dir_ftp = '/photo/ia/'
s3_conn = S3Connection(s3_id, s3_key, host='s3.ap-south-1.amazonaws.com')
bucket = s3_conn.get_bucket(bucket_name)
s3_files = list_s3_files(bucket)
upload_to_s3(bucket)
ftp_files = get_ftp_files()
print(ftp_files)
upload_ftp_files_s3(ftp_files, s3_files, bucket)
================================================
FILE: d6tstack/sniffer.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Finds CSV settings and Excel sheets in multiple files. Often needed as input for stacking
"""
import collections
import csv
import d6tcollect
# d6tcollect.init(__name__)
#******************************************************************
# csv
#******************************************************************
def csv_count_rows(fname):
def blocks(files, size=65536):
while True:
b = files.read(size)
if not b: break
yield b
with open(fname) as f:
nrows = sum(bl.count("\n") for bl in blocks(f))
return nrows
class CSVSniffer(object, metaclass=d6tcollect.Collect):
"""
Automatically detects settings needed to read csv files. SINGLE file only, for MULTI file use CSVSnifferList
Args:
fname (string): file path
nlines (int): number of lines to sample from each file
delims (string): possible delimiters, default ",;\t|"
"""
def __init__(self, fname, nlines = 10, delims=',;\t|'):
self.cfg_fname = fname
self.nrows = csv_count_rows(fname) # todo: check for file size, if large don't run this
self.cfg_nlines = min(nlines,self.nrows) # read_lines() doesn't check EOF # todo: check 1% of file up to a max
self.cfg_delims_pool = delims
self.delim = None # delim used for the file
self.csv_lines = None # top n lines read from file
self.csv_lines_delim = None # detected delim for each line in file
self.csv_rows = None # top n lines split usingn delim
def read_nlines(self):
# read top lines
fhandle = open(self.cfg_fname)
self.csv_lines = [fhandle.readline().rstrip() for _ in range(self.cfg_nlines)]
fhandle.close()
def scan_delim(self):
if not self.csv_lines:
self.read_nlines()
# get delimiter for each line in file
delims = []
for line in self.csv_lines:
try:
csv_sniff = csv.Sniffer().sniff(line, self.cfg_delims_pool)
delims.append(csv_sniff.delimiter)
except:
delims.append(None) # todo: able to catch exception more specifically?
self.csv_lines_delim = delims
def get_delim(self):
if not self.csv_lines_delim:
self.scan_delim()
# all delimiters the same?
if len(set(self.csv_lines_delim))>1:
self.delim_is_consistent = False
csv_delim_count = collections.Counter(self.csv_lines_delim)
csv_delim = csv_delim_count.most_common(1)[0][0] # use the most common used delimeter
# todo: rerun on cfg_csv_scan_topline**2 files in case there is a large # of header rows
else:
self.delim_is_consistent = True
csv_delim = self.csv_lines_delim[0]
if csv_delim==None:
raise IOError('Could not determine a valid delimiter, pleaes check your files are .csv or .txt using one delimiter of %s' %(self.cfg_delims_pool))
else:
self.delim = csv_delim
self.csv_rows = [s.split(self.delim) for s in self.csv_lines][self.count_skiprows():]
if self.check_column_length_consistent():
self.certainty = 'high'
else:
self.certainty = 'probable'
return self.delim
def check_column_length_consistent(self):
# check if all rows have the same length. NB: this is just on the sample!
if not self.csv_rows:
self.get_delim()
return len(set([len(row) for row in self.csv_rows]))==1
def count_skiprows(self):
# finds the number of rows to skip by finding the last line which doesn't use the selected delimiter
if not self.delim:
self.get_delim()
if self.delim_is_consistent: # all delims the same so nothing to skip
return 0
l = [d != self.delim for d in self.csv_lines_delim]
l = list(reversed(l))
return len(l) - l.index(True)
def has_header_inverse(self):
# checks if head present if all columns in first row contain a letter
if not self.csv_rows:
self.get_delim()
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
self.is_all_rows_number_col = all([any([is_number(s) for s in row]) for row in self.csv_rows])
'''
self.row_distance = [distance.jaccard(self.csv_rows[0], self.csv_rows[i]) for i in range(1,len(self.csv_rows))]
iqr_low, iqr_high = np.percentile(self.row_distance[1:], [5, 95])
is_first_row_different = not(iqr_low <= self.row_distance[0] <= iqr_high)
'''
def has_header(self):
# more likely than not to contain headers so have to prove no header present
self.has_header_inverse()
return not self.is_all_rows_number_col
class CSVSnifferList(object, metaclass=d6tcollect.Collect):
"""
Automatically detects settings needed to read csv files. MULTI file use
Args:
fname_list (list): file names, eg ['a.csv','b.csv']
nlines (int): number of lines to sample from each file
delims (string): possible delimiters, default ',;\t|'
"""
def __init__(self, fname_list, nlines = 10, delims=',;\t|'):
self.cfg_fname_list = fname_list
self.sniffers = [CSVSniffer(fname, nlines, delims) for fname in fname_list]
def get_all(self, fun_name, msg_error):
val = []
for sniffer in self.sniffers:
func = getattr(sniffer, fun_name)
val.append(func())
if len(set(val))>1:
raise NotImplementedError(msg_error+' Make sure all files have the same format')
# todo: want to raise an exception here...? or just use whatever got detected for each file?
else:
return val[0]
def get_delim(self):
return self.get_all('get_delim','Inconsistent delimiters detected!')
def count_skiprows(self):
return self.get_all('count_skiprows','Inconsistent skiprows detected!')
def has_header(self):
return self.get_all('has_header','Inconsistent header setting detected!')
# todo: propagate status of individual sniffers. instead of raising exception pass back status to get user input
def sniff_settings_csv(fname_list):
sniff = CSVSnifferList(fname_list)
csv_sniff = {}
csv_sniff['delim'] = sniff.get_delim()
csv_sniff['skiprows'] = sniff.count_skiprows()
csv_sniff['has_header'] = sniff.has_header()
csv_sniff['header'] = 0 if sniff.has_header() else None
return csv_sniff
================================================
FILE: d6tstack/sync.py
================================================
import boto3
import botocore
import os
import ftputil
import numpy as np
class FTPSync:
"""
FTP Sync class. It allows users to sync their files to s3 or local.
Args:
cfg_ftp_host (string): FTP host name
cfg_ftp_usr (string): FTP login username
cfg_ftp_pwd (string): FTP login password
cfg_ftp_dir (string): FTP starting directory to be used for sync.
cfg_s3_key (string): AWS S3 key for connection
cfg_s3_secret (string): AWS S3 secret for connection
bucket_name (string): Bucket name in s3 for syncing the files
local_dir (string): local dir path to be used for sync. dir will be created if not exist.
logger (object): logger object with send_log()
"""
def __init__(self, cfg_ftp_host, cfg_ftp_usr, cfg_ftp_pwd, cfg_ftp_dir,
cfg_s3_key=None, cfg_s3_secret=None, bucket_name=None,
local_dir='./data/', logger=None):
self.cfg_ftp_host = cfg_ftp_host
self.cfg_ftp_usr = cfg_ftp_usr
self.cfg_ftp_pwd = cfg_ftp_pwd
self.cfg_ftp_dir = cfg_ftp_dir
self.ftp_host = ftputil.FTPHost(cfg_ftp_host, cfg_ftp_usr, cfg_ftp_pwd)
self.ftp_host.use_list_a_option = False
self.s3_client = None
self.bucket_name = None
if cfg_s3_key and cfg_s3_secret and bucket_name:
self.s3_client = boto3.client(
's3',
aws_access_key_id=cfg_s3_key,
aws_secret_access_key=cfg_s3_secret
)
exists = True
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
if not exists:
if logger:
logger.send_log('Bucket does not exist. Creating bucket', 'ok')
self.s3_client.create_bucket(Bucket=bucket_name)
self.bucket_name = bucket_name
self.local_dir = local_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
self.logger = logger
def get_all_files(self, subdirs=True, ftp=False):
"""
Get all file list from local or ftp
Args:
subdirs (bool): return all the files in directory recursively? If `false` it will not go to sub directories
ftp (bool): local files if `true` otherwise local files
Returns:
Alphabetically Sorted file list
"""
fileSet = set()
host = os
from_dir = self.local_dir
if ftp:
host = self.ftp_host
from_dir = self.cfg_ftp_dir
if subdirs:
for dir_, _, files in host.walk(from_dir):
for fileName in files:
relDir = os.path.relpath(dir_, from_dir)
relFile = os.path.join(relDir, fileName)
fileSet.add(relFile)
else:
for fileName in host.listdir(from_dir):
relFile = os.path.join(from_dir, fileName)
if host.path.isfile(relFile):
fileSet.add(relFile)
return np.sort(list(fileSet))
def get_s3_files(self):
"""
Get all file list from s3 in the given bucket
Returns:
File list from s3 in bucket
"""
if not self.s3_client or not self.bucket_name:
raise ValueError("S3 credentials are mandatory to use this functionality")
s3_files = set()
all_files = self.s3_client.list_objects(Bucket=self.bucket_name)
for content in all_files.get('Contents', []):
s3_files.add(content.get('Key'))
return s3_files
def upload_to_s3(self, fname, local_path):
"""
Upload a single file from local to s3
Args:
fname (string): Filename in s3
local_path (string): Local path of file to be uploaded
"""
with open(local_path, 'rb') as f:
self.s3_client.upload_fileobj(f, self.bucket_name, fname)
def get_files_for_sync(self, subdirs=True, to_s3=False):
"""
Get File list for sync along with total file size
Args:
subdirs (bool): return all the files in directory recursively? If `false` it will not go to sub directories, Optional
to_s3 (bool): get files to be sync from ftp to local. If `true` all files will be synced from ftp to s3
"""
ftp_files = self.get_all_files(subdirs=subdirs, ftp=True)
if to_s3:
server_files = self.get_s3_files()
else:
server_files = self.get_all_files(subdirs=subdirs)
files_ftp_sync = set(ftp_files).difference(set(server_files))
total_file_size = sum([self.ftp_host.path.getsize(os.path.join(self.cfg_ftp_dir, f))
for f in files_ftp_sync])
return files_ftp_sync, total_file_size
def upload_ftp_files(self, subdirs=True, to_s3=False):
"""
Get File list for sync along with total file size
Args:
subdirs (bool): Upload files from ftp recursively? If `false` it will not go to sub directories, Optional
to_s3 (bool): upload files from ftp to local. If `true` files will be uploaded from ftp to s3
"""
files_ftp_sync, total_file_size = self.get_files_for_sync(subdirs=subdirs, to_s3=to_s3)
for ftp_file in files_ftp_sync:
full_name = os.path.join(self.cfg_ftp_dir, ftp_file)
local_path = os.path.join(self.local_dir, ftp_file)
file_dir_local = os.path.dirname(local_path)
if not os.path.exists(file_dir_local):
os.makedirs(file_dir_local)
self.ftp_host.download(full_name, local_path)
if to_s3:
self.upload_to_s3(ftp_file, local_path)
================================================
FILE: d6tstack/utils.py
================================================
import pandas as pd
import warnings
import d6tcollect
d6tcollect.init(__name__)
class PrintLogger(object):
def send_log(self, msg, status):
print(msg,status)
def send(self, data):
print(data)
import os
@d6tcollect.collect
def pd_readsql_query_from_sqlengine(uri, sql, schema_name=None, connect_args=None):
"""
Load SQL statement into pandas dataframe using `sql_engine.execute` making execution faster.
Args:
uri (str): postgres psycopg2 sqlalchemy database uri
sql (str): sql query
schema_name (str): name of schema
connect_args (dict): dictionary of connection arguments to pass to `sqlalchemy.create_engine`
Returns:
df: pandas dataframe
"""
import sqlalchemy
if connect_args is not None:
sql_engine = sqlalchemy.create_engine(uri, connect_args=connect_args)
elif schema_name is not None:
if 'psycopg2' in uri:
sql_engine = sqlalchemy.create_engine(uri, connect_args={'options': '-csearch_path={}'.format(schema_name)})
else:
raise NotImplementedError('only `psycopg2` supported with schema_name, pass connect_args for your db engine')
else:
sql_engine = sqlalchemy.create_engine(uri)
sql = sql_engine.execute(sql)
df = pd.DataFrame(sql.fetchall())
return df
@d6tcollect.collect
def pd_readsql_table_from_sqlengine(uri, table_name, schema_name=None, connect_args=None):
"""
Load SQL table into pandas dataframe using `sql_engine.execute` making execution faster. Convenience function that returns full table.
Args:
uri (str): postgres psycopg2 sqlalchemy database uri
table_name (str): table
schema_name (str): name of schema
connect_args (dict): dictionary of connection arguments to pass to `sqlalchemy.create_engine`
Returns:
df: pandas dataframe
"""
return pd_readsql_query_from_sqlengine(uri, "SELECT * FROM {};".fromat(table_name), schema_name=schema_name, connect_args=connect_args)
@d6tcollect.collect
def pd_to_psql(df, uri, table_name, schema_name=None, if_exists='fail', sep=','):
"""
Load pandas dataframe into a sql table using native postgres COPY FROM.
Args:
df (dataframe): pandas dataframe
uri (str): postgres psycopg2 sqlalchemy database uri
table_name (str): table to store data in
schema_name (str): name of schema in db to write to
if_exists (str): {‘fail’, ‘replace’, ‘append’}, default ‘fail’. See `pandas.to_sql()` for details
sep (str): separator for temp file, eg ',' or '\t'
Returns:
bool: True if loader finished
"""
if not 'psycopg2' in uri:
raise ValueError('need to use psycopg2 uri eg postgresql+psycopg2://psqlusr:psqlpwdpsqlpwd@localhost/psqltest. install with `pip install psycopg2-binary`')
table_name = table_name.lower()
if schema_name:
schema_name = schema_name.lower()
import sqlalchemy
import io
if schema_name is not None:
sql_engine = sqlalchemy.create_engine(uri, connect_args={'options': '-csearch_path={}'.format(schema_name)})
else:
sql_engine = sqlalchemy.create_engine(uri)
sql_cnxn = sql_engine.raw_connection()
cursor = sql_cnxn.cursor()
df[:0].to_sql(table_name, sql_engine, schema=schema_name, if_exists=if_exists, index=False)
fbuf = io.StringIO()
df.to_csv(fbuf, index=False, header=False, sep=sep)
fbuf.seek(0)
cursor.copy_from(fbuf, table_name, sep=sep, null='')
sql_cnxn.commit()
cursor.close()
return True
@d6tcollect.collect
def pd_to_mysql(df, uri, table_name, if_exists='fail', tmpfile='mysql.csv', sep=',', newline='\n'):
"""
Load dataframe into a sql table using native postgres LOAD DATA LOCAL INFILE.
Args:
df (dataframe): pandas dataframe
uri (str): mysql mysqlconnector sqlalchemy database uri
table_name (str): table to store data in
if_exists (str): {‘fail’, ‘replace’, ‘append’}, default ‘fail’. See `pandas.to_sql()` for details
tmpfile (str): filename for temporary file to load from
sep (str): separator for temp file, eg ',' or '\t'
Returns:
bool: True if loader finished
"""
if not 'mysql+mysqlconnector' in uri:
raise ValueError('need to use mysql+mysqlconnector uri eg mysql+mysqlconnector://testusr:testpwd@localhost/testdb. install with `pip install mysql-connector`')
table_name = table_name.lower()
import sqlalchemy
sql_engine = sqlalchemy.create_engine(uri)
df[:0].to_sql(table_name, sql_engine, if_exists=if_exists, index=False)
logger = PrintLogger()
logger.send_log('creating ' + tmpfile, 'ok')
with open(tmpfile, mode='w', newline=newline) as fhandle:
df.to_csv(fhandle, na_rep='\\N', index=False, sep=sep)
logger.send_log('loading ' + tmpfile, 'ok')
sql_load = "LOAD DATA LOCAL INFILE '{}' INTO TABLE {} FIELDS TERMINATED BY '{}' LINES TERMINATED BY '{}' IGNORE 1 LINES;".format(tmpfile, table_name, sep, newline)
sql_engine.execute(sql_load)
os.remove(tmpfile)
return True
@d6tcollect.collect
def pd_to_mssql(df, uri, table_name, schema_name=None, if_exists='fail', tmpfile='mysql.csv'):
"""
Load dataframe into a sql table using native postgres LOAD DATA LOCAL INFILE.
Args:
df (dataframe): pandas dataframe
uri (str): mysql mysqlconnector sqlalchemy database uri
table_name (str): table to store data in
schema_name (str): name of schema in db to write to
if_exists (str): {‘fail’, ‘replace’, ‘append’}, default ‘fail’. See `pandas.to_sql()` for details
tmpfile (str): filename for temporary file to load from
Returns:
bool: True if loader finished
"""
if not 'mssql+pymssql' in uri:
raise ValueError('need to use mssql+pymssql uri (conda install -c prometeia pymssql)')
table_name = table_name.lower()
if schema_name:
schema_name = schema_name.lower()
warnings.warn('`.pd_to_mssql()` is experimental, if any problems please raise an issue on https://github.com/d6t/d6tstack/issues or make a pull request')
import sqlalchemy
sql_engine = sqlalchemy.create_engine(uri)
df[:0].to_sql(table_name, sql_engine, if_exists=if_exists, index=False)
logger = PrintLogger()
logger.send_log('creating ' + tmpfile, 'ok')
df.to_csv(tmpfile, na_rep='\\N', index=False)
logger.send_log('loading ' + tmpfile, 'ok')
if schema_name is not None:
table_name = '{}.{}'.format(schema_name,table_name)
sql_load = "BULK INSERT {} FROM '{}';".format(table_name, tmpfile)
sql_engine.execute(sql_load)
os.remove(tmpfile)
return True
================================================
FILE: docs/Makefile
================================================
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = python -msphinx
SPHINXPROJ = d6tstack
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
================================================
FILE: docs/make.bat
================================================
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=python -msphinx
)
set SOURCEDIR=source
set BUILDDIR=build
set SPHINXPROJ=d6t-lib
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The Sphinx module was not found. Make sure you have Sphinx installed,
echo.then set the SPHINXBUILD environment variable to point to the full
echo.path of the 'sphinx-build' executable. Alternatively you may add the
echo.Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd
================================================
FILE: docs/make_zip_sample_csv.py
================================================
import zipfile
import glob
import os
if not os.path.exists('test-data/output/__init__.py'):
fhandle = open('test-data/output/__init__.py', 'w')
fhandle.close()
ziphandle = zipfile.ZipFile('test-data.zip', 'w')
cfg_path_base = 'test-data/input/test-data-input'
for fname in glob.glob(cfg_path_base+'*.csv')+glob.glob(cfg_path_base+'*.xls')+glob.glob(cfg_path_base+'*.xlsx'):
ziphandle.write(fname)
ziphandle.write('test-data/output/__init__.py')
ziphandle.close()
================================================
FILE: docs/make_zip_sample_xls.py
================================================
import zipfile
import glob
import os
import pandas as pd
import numpy as np
# generate fake data
cfg_tickers = ['AAP','M','SPLS']
cfg_ntickers = len(cfg_tickers)
cfg_ndates = 10
cfg_dates = pd.bdate_range('2018-01-01',periods=cfg_ndates).tolist()+pd.bdate_range('2018-02-01',periods=cfg_ndates).tolist()
cfg_nobs = cfg_ndates*2
dft = pd.DataFrame({'date':np.tile(cfg_dates,cfg_ntickers), 'ticker':np.repeat(cfg_tickers,cfg_nobs)})
#****************************************
# xls
#****************************************
def write_file_xls(dfg, fname, sheets, startrow=0,startcol=0):
writer = pd.ExcelWriter(fname)
for isheet in sheets:
dft['data'] = np.random.normal(size=dfg.shape[0])
dfg['xls_sheet'] = isheet
dfg.to_excel(writer, isheet, index=False,startrow=startrow,startcol=startcol)
writer.save()
# excel - bad case => d6tstack. Fake data
cfg_path_base = 'test-data/excel_adv_data/sample-xls-'
df = dft
np.random.seed(0)
write_file_xls(df, cfg_path_base+'case-simple.xlsx',['Sheet1'])
write_file_xls(df, cfg_path_base+'case-multisheet.xlsx',['Sheet1','Sheet2'])
write_file_xls(df, cfg_path_base+'case-multifile1.xlsx',['Sheet1'])
write_file_xls(df, cfg_path_base+'case-multifile2.xlsx',['Sheet1'])
write_file_xls(df, cfg_path_base+'case-badlayout1.xlsx',['Sheet1','Sheet2'],startrow=1,startcol=1)
ziphandle = zipfile.ZipFile('test-data-xls.zip', 'w')
for fname in glob.glob(cfg_path_base+'*.xlsx'):
ziphandle.write(fname)
ziphandle.write('test-data/output/__init__.py')
ziphandle.close()
================================================
FILE: docs/shell-napoleon-html.sh
================================================
make html
================================================
FILE: docs/shell-napoleon-recreate.sh
================================================
#rm ./source/*
#cp ./source-bak/* ./source/
sphinx-apidoc -f -o ./source ..
make clean
make html
================================================
FILE: docs/source/conf.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# d6t-lib documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 28 11:32:56 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.dirname(os.path.abspath('.'))) # todo: why is this not working?
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath('.'))))
sys.path.insert(0, os.path.join(os.path.dirname((os.path.abspath('.'))), "d6tstack"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'd6tstack'
copyright = '2017, databolt'
author = 'databolt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' # 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'd6tstack-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'd6tstack.tex', 'd6tstack Documentation',
'nn', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'd6tstack', 'd6tstack Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'd6tstack', 'd6tstack Documentation',
author, 'd6tstack', 'Databolt python library - Accelerate data engineering',
'Miscellaneous'),
]
================================================
FILE: docs/source/d6tstack.rst
================================================
d6tstack package
================
Submodules
----------
d6tstack.combine\_csv module
----------------------------
.. automodule:: d6tstack.combine_csv
:members:
:undoc-members:
:show-inheritance:
d6tstack.convert\_xls module
----------------------------
.. automodule:: d6tstack.convert_xls
:members:
:undoc-members:
:show-inheritance:
d6tstack.helpers module
-----------------------
.. automodule:: d6tstack.helpers
:members:
:undoc-members:
:show-inheritance:
d6tstack.helpers\_ui module
---------------------------
.. automodule:: d6tstack.helpers_ui
:members:
:undoc-members:
:show-inheritance:
d6tstack.sniffer module
-----------------------
.. automodule:: d6tstack.sniffer
:members:
:undoc-members:
:show-inheritance:
d6tstack.sync module
--------------------
.. automodule:: d6tstack.sync
:members:
:undoc-members:
:show-inheritance:
d6tstack.utils module
---------------------
.. automodule:: d6tstack.utils
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: d6tstack
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/index.rst
================================================
.. d6t-celery-combine documentation master file, created by
sphinx-quickstart on Tue Nov 28 11:32:56 2017.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to d6tstack documentation!
==============================================
Documentation for using the databolt python File Stack Combine library.
Library Docs
==================
* :ref:`modindex`
Search
==================
* :ref:`search`
================================================
FILE: docs/source/modules.rst
================================================
d6tstack
========
.. toctree::
:maxdepth: 4
d6tstack
setup
================================================
FILE: docs/source/setup.rst
================================================
setup module
============
.. automodule:: setup
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/tests.rst
================================================
tests package
=============
Submodules
----------
tests.test\_combine module
--------------------------
.. automodule:: tests.test_combine
:members:
:undoc-members:
:show-inheritance:
tests.test\_sync module
-----------------------
.. automodule:: tests.test_sync
:members:
:undoc-members:
:show-inheritance:
tests.test\_xls module
----------------------
.. automodule:: tests.test_xls
:members:
:undoc-members:
:show-inheritance:
tests.tmp module
----------------
.. automodule:: tests.tmp
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: tests
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: examples-csv.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data Engineering in Python with databolt - Quickly Load Any Type of CSV (d6tlib/d6tstack)\n",
"\n",
"Vendors often send large datasets in multiple files. Often there are missing and misaligned columns between files that have to be manually cleaned. With DataBolt File Stack you can easily stack them together into one consistent dataset.\n",
"\n",
"Features include:\n",
"* Quickly check column consistency across multiple files\n",
"* Fix added/missing columns\n",
"* Fix renamed columns\n",
"* Out of core functionality to process large files\n",
"* Export to pandas, CSV, SQL, parquet\n",
" * Fast export to postgres and mysql with out of core support\n",
" \n",
"In this workbook we will demonstrate the usage of the d6tstack library."
]
},
{
"cell_type": "code",
"execution_count": 100,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import importlib\n",
"import pandas as pd\n",
"import glob\n",
"\n",
"import d6tstack.combine_csv as d6tc\n",
"import d6tstack"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Get sample data\n",
"\n",
"We've created some dummy sample data which you can download. "
]
},
{
"cell_type": "code",
"execution_count": 78,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import urllib.request\n",
"cfg_fname_sample = 'test-data.zip'\n",
"urllib.request.urlretrieve(\"https://github.com/d6t/d6tstack/raw/master/\"+cfg_fname_sample, cfg_fname_sample)\n",
"import zipfile\n",
"zip_ref = zipfile.ZipFile(cfg_fname_sample, 'r')\n",
"zip_ref.extractall('.')\n",
"zip_ref.close()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Use Case: Checking Column Consistency\n",
"\n",
"Let's say you receive a bunch of csv files you want to ingest them, say for example into pandas, dask, pyspark, database."
]
},
{
"cell_type": "code",
"execution_count": 79,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['test-data/input/test-data-input-csv-clean-mar.csv', 'test-data/input/test-data-input-csv-clean-feb.csv', 'test-data/input/test-data-input-csv-clean-jan.csv']\n"
]
}
],
"source": [
"cfg_fnames = list(glob.glob('test-data/input/test-data-input-csv-clean-*.csv'))\n",
"print(cfg_fnames)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Check column consistency across all files\n",
"\n",
"Even if you think the files have a consistent column layout, it worthwhile using `d6tstack` to assert that that is actually the case. It's very quick to do even with very many large files!"
]
},
{
"cell_type": "code",
"execution_count": 80,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n"
]
}
],
"source": [
"# get previews\n",
"c = d6tc.CombinerCSV(cfg_fnames) # all_strings=True makes reading faster\n",
"col_sniff = c.sniff_columns()"
]
},
{
"cell_type": "code",
"execution_count": 81,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"all columns equal? True\n",
"\n",
"which columns are present in which files?\n",
"\n",
" date sales cost profit\n",
"file_path \n",
"test-data/input/test-data-input-csv-clean-feb.csv True True True True\n",
"test-data/input/test-data-input-csv-clean-jan.csv True True True True\n",
"test-data/input/test-data-input-csv-clean-mar.csv True True True True\n",
"\n",
"in what order do columns appear in the files?\n",
"\n",
" date sales cost profit\n",
"0 0 1 2 3\n",
"1 0 1 2 3\n",
"2 0 1 2 3\n"
]
}
],
"source": [
"print('all columns equal?', c.is_all_equal())\n",
"print('')\n",
"print('which columns are present in which files?')\n",
"print('')\n",
"print(c.is_column_present())\n",
"print('')\n",
"print('in what order do columns appear in the files?')\n",
"print('')\n",
"print(col_sniff['df_columns_order'].reset_index(drop=True))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Preview Combined Data\n",
"\n",
"You can see a preview of what the combined data from all files will look like."
]
},
{
"cell_type": "code",
"execution_count": 82,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
" filepath | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-clean-feb.csv | \n",
" test-data-input-csv-clean-feb.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-clean-feb.csv | \n",
" test-data-input-csv-clean-feb.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-clean-feb.csv | \n",
" test-data-input-csv-clean-feb.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-01-01 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-clean-jan.csv | \n",
" test-data-input-csv-clean-jan.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-01-02 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-clean-jan.csv | \n",
" test-data-input-csv-clean-jan.csv | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-01-03 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-clean-jan.csv | \n",
" test-data-input-csv-clean-jan.csv | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-03-01 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-clean-mar.csv | \n",
" test-data-input-csv-clean-mar.csv | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-03-02 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-clean-mar.csv | \n",
" test-data-input-csv-clean-mar.csv | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-03-03 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-clean-mar.csv | \n",
" test-data-input-csv-clean-mar.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit filepath filename\n",
"0 2011-02-01 200 -90 110 test-data/input/test-data-input-csv-clean-feb.csv test-data-input-csv-clean-feb.csv\n",
"1 2011-02-02 200 -90 110 test-data/input/test-data-input-csv-clean-feb.csv test-data-input-csv-clean-feb.csv\n",
"2 2011-02-03 200 -90 110 test-data/input/test-data-input-csv-clean-feb.csv test-data-input-csv-clean-feb.csv\n",
"3 2011-01-01 100 -80 20 test-data/input/test-data-input-csv-clean-jan.csv test-data-input-csv-clean-jan.csv\n",
"4 2011-01-02 100 -80 20 test-data/input/test-data-input-csv-clean-jan.csv test-data-input-csv-clean-jan.csv\n",
"5 2011-01-03 100 -80 20 test-data/input/test-data-input-csv-clean-jan.csv test-data-input-csv-clean-jan.csv\n",
"6 2011-03-01 300 -100 200 test-data/input/test-data-input-csv-clean-mar.csv test-data-input-csv-clean-mar.csv\n",
"7 2011-03-02 300 -100 200 test-data/input/test-data-input-csv-clean-mar.csv test-data-input-csv-clean-mar.csv\n",
"8 2011-03-03 300 -100 200 test-data/input/test-data-input-csv-clean-mar.csv test-data-input-csv-clean-mar.csv"
]
},
"execution_count": 82,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.combine_preview()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Read All Files to Pandas\n",
"\n",
"You can quickly load the combined data into a pandas dataframe with a single command. "
]
},
{
"cell_type": "code",
"execution_count": 83,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
" filepath | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-clean-feb.csv | \n",
" test-data-input-csv-clean-feb.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-clean-feb.csv | \n",
" test-data-input-csv-clean-feb.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-clean-feb.csv | \n",
" test-data-input-csv-clean-feb.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-02-04 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-clean-feb.csv | \n",
" test-data-input-csv-clean-feb.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-02-05 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-clean-feb.csv | \n",
" test-data-input-csv-clean-feb.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit filepath filename\n",
"0 2011-02-01 200 -90 110 test-data/input/test-data-input-csv-clean-feb.csv test-data-input-csv-clean-feb.csv\n",
"1 2011-02-02 200 -90 110 test-data/input/test-data-input-csv-clean-feb.csv test-data-input-csv-clean-feb.csv\n",
"2 2011-02-03 200 -90 110 test-data/input/test-data-input-csv-clean-feb.csv test-data-input-csv-clean-feb.csv\n",
"3 2011-02-04 200 -90 110 test-data/input/test-data-input-csv-clean-feb.csv test-data-input-csv-clean-feb.csv\n",
"4 2011-02-05 200 -90 110 test-data/input/test-data-input-csv-clean-feb.csv test-data-input-csv-clean-feb.csv"
]
},
"execution_count": 83,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.to_pandas().head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Use Case: Identifying and fixing inconsistent columns\n",
"\n",
"The first case was clean: all files had the same columns. It happens very frequently that the data schema changes over time with columns being added or deleted over time. Let's look at a case where an extra columns got added."
]
},
{
"cell_type": "code",
"execution_count": 84,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['test-data/input/test-data-input-csv-colmismatch-mar.csv', 'test-data/input/test-data-input-csv-colmismatch-feb.csv', 'test-data/input/test-data-input-csv-colmismatch-jan.csv']\n"
]
}
],
"source": [
"cfg_fnames = list(glob.glob('test-data/input/test-data-input-csv-colmismatch-*.csv'))\n",
"print(cfg_fnames)"
]
},
{
"cell_type": "code",
"execution_count": 85,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n"
]
}
],
"source": [
"# get previews\n",
"c = d6tc.CombinerCSV(cfg_fnames) # all_strings=True makes reading faster\n",
"col_sniff = c.sniff_columns()"
]
},
{
"cell_type": "code",
"execution_count": 86,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"all columns equal? False\n",
"\n",
"which columns are unique? ['profit2']\n",
"\n",
"which files have unique columns?\n",
"\n",
" profit2\n",
"file_path \n",
"test-data/input/test-data-input-csv-colmismatch... False\n",
"test-data/input/test-data-input-csv-colmismatch... False\n",
"test-data/input/test-data-input-csv-colmismatch... True\n"
]
}
],
"source": [
"print('all columns equal?', c.is_all_equal())\n",
"print('')\n",
"print('which columns are unique?', col_sniff['columns_unique'])\n",
"print('')\n",
"print('which files have unique columns?')\n",
"print('')\n",
"print(c.is_column_present_unique())"
]
},
{
"cell_type": "code",
"execution_count": 87,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
" profit2 | \n",
" filepath | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-02-04 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-02-05 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit profit2 filepath filename\n",
"0 2011-02-01 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"1 2011-02-02 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"2 2011-02-03 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"3 2011-02-04 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"4 2011-02-05 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv"
]
},
"execution_count": 87,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.to_pandas().head() # keep all columns"
]
},
{
"cell_type": "code",
"execution_count": 88,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
" filepath | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-02-04 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-02-05 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit filepath filename\n",
"0 2011-02-01 200 -90 110 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"1 2011-02-02 200 -90 110 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"2 2011-02-03 200 -90 110 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"3 2011-02-04 200 -90 110 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"4 2011-02-05 200 -90 110 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv"
]
},
"execution_count": 88,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"d6tc.CombinerCSV(cfg_fnames, columns_select_common=True).to_pandas().head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Use Case: align renamed columns. Select subset of columns\n",
"\n",
"Say a column has been renamed and now the data doesn't line up with the data from the old column name. You can easily fix such a situation by using `CombinerCSVAdvanced` which allows you to rename columns and automatically lines up the data. It also allows you to just load data from a subset of columns."
]
},
{
"cell_type": "code",
"execution_count": 89,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n",
" revenue sales\n",
"file_path \n",
"test-data/input/test-data-input-csv-renamed-feb... False True\n",
"test-data/input/test-data-input-csv-renamed-jan... False True\n",
"test-data/input/test-data-input-csv-renamed-mar... True False\n"
]
}
],
"source": [
"cfg_fnames = list(glob.glob('test-data/input/test-data-input-csv-renamed-*.csv'))\n",
"c = d6tc.CombinerCSV(cfg_fnames)\n",
"print(c.is_column_present_unique())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The column `sales` got renamed to `revenue` in the March file, this would causes problems when reading the files. "
]
},
{
"cell_type": "code",
"execution_count": 90,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" filename | \n",
" revenue | \n",
" sales | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" test-data-input-csv-renamed-feb.csv | \n",
" NaN | \n",
" 200.0 | \n",
"
\n",
" \n",
" | 1 | \n",
" test-data-input-csv-renamed-feb.csv | \n",
" NaN | \n",
" 200.0 | \n",
"
\n",
" \n",
" | 2 | \n",
" test-data-input-csv-renamed-feb.csv | \n",
" NaN | \n",
" 200.0 | \n",
"
\n",
" \n",
" | 3 | \n",
" test-data-input-csv-renamed-jan.csv | \n",
" NaN | \n",
" 100.0 | \n",
"
\n",
" \n",
" | 4 | \n",
" test-data-input-csv-renamed-jan.csv | \n",
" NaN | \n",
" 100.0 | \n",
"
\n",
" \n",
" | 5 | \n",
" test-data-input-csv-renamed-jan.csv | \n",
" NaN | \n",
" 100.0 | \n",
"
\n",
" \n",
" | 6 | \n",
" test-data-input-csv-renamed-mar.csv | \n",
" 300.0 | \n",
" NaN | \n",
"
\n",
" \n",
" | 7 | \n",
" test-data-input-csv-renamed-mar.csv | \n",
" 300.0 | \n",
" NaN | \n",
"
\n",
" \n",
" | 8 | \n",
" test-data-input-csv-renamed-mar.csv | \n",
" 300.0 | \n",
" NaN | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" filename revenue sales\n",
"0 test-data-input-csv-renamed-feb.csv NaN 200.0\n",
"1 test-data-input-csv-renamed-feb.csv NaN 200.0\n",
"2 test-data-input-csv-renamed-feb.csv NaN 200.0\n",
"3 test-data-input-csv-renamed-jan.csv NaN 100.0\n",
"4 test-data-input-csv-renamed-jan.csv NaN 100.0\n",
"5 test-data-input-csv-renamed-jan.csv NaN 100.0\n",
"6 test-data-input-csv-renamed-mar.csv 300.0 NaN\n",
"7 test-data-input-csv-renamed-mar.csv 300.0 NaN\n",
"8 test-data-input-csv-renamed-mar.csv 300.0 NaN"
]
},
"execution_count": 90,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"col_sniff = c.sniff_columns()\n",
"c.combine_preview()[['filename']+col_sniff['columns_unique']]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can pass the columns you want to rename to `columns_rename` and it will rename and align those columns."
]
},
{
"cell_type": "code",
"execution_count": 91,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# only select particular columns\n",
"cfg_col_sel = ['date','sales','cost','profit'] # don't select profit2\n",
"# rename colums\n",
"cfg_col_rename = {'sales':'revenue'} # rename all instances of sales to revenue"
]
},
{
"cell_type": "code",
"execution_count": 92,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" revenue | \n",
" cost | \n",
" profit | \n",
" filepath | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-renamed-fe... | \n",
" test-data-input-csv-renamed-feb.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-renamed-fe... | \n",
" test-data-input-csv-renamed-feb.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-renamed-fe... | \n",
" test-data-input-csv-renamed-feb.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-01-01 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-renamed-ja... | \n",
" test-data-input-csv-renamed-jan.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-01-02 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-renamed-ja... | \n",
" test-data-input-csv-renamed-jan.csv | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-01-03 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-renamed-ja... | \n",
" test-data-input-csv-renamed-jan.csv | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-03-01 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-renamed-ma... | \n",
" test-data-input-csv-renamed-mar.csv | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-03-02 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-renamed-ma... | \n",
" test-data-input-csv-renamed-mar.csv | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-03-03 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-renamed-ma... | \n",
" test-data-input-csv-renamed-mar.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date revenue cost profit filepath filename\n",
"0 2011-02-01 200 -90 110 test-data/input/test-data-input-csv-renamed-fe... test-data-input-csv-renamed-feb.csv\n",
"1 2011-02-02 200 -90 110 test-data/input/test-data-input-csv-renamed-fe... test-data-input-csv-renamed-feb.csv\n",
"2 2011-02-03 200 -90 110 test-data/input/test-data-input-csv-renamed-fe... test-data-input-csv-renamed-feb.csv\n",
"3 2011-01-01 100 -80 20 test-data/input/test-data-input-csv-renamed-ja... test-data-input-csv-renamed-jan.csv\n",
"4 2011-01-02 100 -80 20 test-data/input/test-data-input-csv-renamed-ja... test-data-input-csv-renamed-jan.csv\n",
"5 2011-01-03 100 -80 20 test-data/input/test-data-input-csv-renamed-ja... test-data-input-csv-renamed-jan.csv\n",
"6 2011-03-01 300 -100 200 test-data/input/test-data-input-csv-renamed-ma... test-data-input-csv-renamed-mar.csv\n",
"7 2011-03-02 300 -100 200 test-data/input/test-data-input-csv-renamed-ma... test-data-input-csv-renamed-mar.csv\n",
"8 2011-03-03 300 -100 200 test-data/input/test-data-input-csv-renamed-ma... test-data-input-csv-renamed-mar.csv"
]
},
"execution_count": 92,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c = d6tc.CombinerCSV(cfg_fnames, columns_rename = cfg_col_rename, columns_select = cfg_col_sel) \n",
"c.combine_preview() \n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Case: Identify change in column order\n",
"\n",
"If you read your files into a database this will be a real problem because it look like the files are all the same whereas in fact they have changes. This is because programs like dask or sql loaders assume the column order is the same. With `d6tstack` you can easily identify and fix such a case."
]
},
{
"cell_type": "code",
"execution_count": 93,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['test-data/input/test-data-input-csv-reorder-jan.csv', 'test-data/input/test-data-input-csv-reorder-mar.csv', 'test-data/input/test-data-input-csv-reorder-feb.csv']\n"
]
}
],
"source": [
"cfg_fnames = list(glob.glob('test-data/input/test-data-input-csv-reorder-*.csv'))\n",
"print(cfg_fnames)"
]
},
{
"cell_type": "code",
"execution_count": 94,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n"
]
}
],
"source": [
"# get previews\n",
"c = d6tc.CombinerCSV(cfg_fnames) # all_strings=True makes reading faster\n",
"col_sniff = c.sniff_columns()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here we can see that all columns are not equal"
]
},
{
"cell_type": "code",
"execution_count": 95,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"all columns equal? False\n",
"\n",
"in what order do columns appear in the files?\n",
"\n",
" date sales cost profit\n",
"0 0 1 2 3\n",
"1 0 1 2 3\n",
"2 0 1 3 2\n"
]
}
],
"source": [
"print('all columns equal?', col_sniff['is_all_equal'])\n",
"print('')\n",
"print('in what order do columns appear in the files?')\n",
"print('')\n",
"print(col_sniff['df_columns_order'].reset_index(drop=True))"
]
},
{
"cell_type": "code",
"execution_count": 96,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
" filepath | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-01-01 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-01-02 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-01-03 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-03-01 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-03-02 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-03-03 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit filepath filename\n",
"0 2011-02-01 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"1 2011-02-02 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"2 2011-02-03 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"3 2011-01-01 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"4 2011-01-02 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"5 2011-01-03 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"6 2011-03-01 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"7 2011-03-02 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"8 2011-03-03 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv"
]
},
"execution_count": 96,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.combine_preview() # automatically puts it in the right order"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Customize separator and pass pd.read_csv() params\n",
"\n",
"You can pass additional parameters such as separators and any params for `pd.read_csv()` to the combiner."
]
},
{
"cell_type": "code",
"execution_count": 97,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n",
"{'files_columns': {'test-data/input/test-data-input-csv-reorder-feb.csv': ['date', 'sales', 'cost', 'profit'], 'test-data/input/test-data-input-csv-reorder-jan.csv': ['date', 'sales', 'cost', 'profit'], 'test-data/input/test-data-input-csv-reorder-mar.csv': ['date', 'sales', 'profit', 'cost']}, 'columns_all': ['date', 'sales', 'cost', 'profit'], 'columns_common': ['date', 'sales', 'cost', 'profit'], 'columns_unique': [], 'is_all_equal': False, 'df_columns_present': date sales cost profit\n",
"file_path \n",
"test-data/input/test-data-input-csv-reorder-feb... True True True True\n",
"test-data/input/test-data-input-csv-reorder-jan... True True True True\n",
"test-data/input/test-data-input-csv-reorder-mar... True True True True, 'df_columns_order': date sales cost profit\n",
"test-data/input/test-data-input-csv-reorder-feb... 0 1 2 3\n",
"test-data/input/test-data-input-csv-reorder-jan... 0 1 2 3\n",
"test-data/input/test-data-input-csv-reorder-mar... 0 1 3 2}\n"
]
}
],
"source": [
"c = d6tc.CombinerCSV(cfg_fnames, sep=',', read_csv_params={'header': None})\n",
"col_sniff = c.sniff_columns()\n",
"print(col_sniff)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# CSV out of core functionality\n",
"\n",
"If your files are large you don't want to read them all in memory and then save. Instead you can write directly to the output file."
]
},
{
"cell_type": "code",
"execution_count": 98,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/plain": [
"'test-data/output/test.csv'"
]
},
"execution_count": 98,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.to_csv_combine('test-data/output/test.csv')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Auto Detect pd.read_csv() settings"
]
},
{
"cell_type": "code",
"execution_count": 103,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"### Detect CSV settings across a single file"
]
},
{
"cell_type": "code",
"execution_count": 104,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'delim': ',', 'skiprows': 0, 'has_header': True, 'header': 0}\n"
]
}
],
"source": [
"cfg_sniff = d6tstack.sniffer.sniff_settings_csv([cfg_fnames[0]])\n",
"print(cfg_sniff)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Detect CSV settings across multiple files"
]
},
{
"cell_type": "code",
"execution_count": 105,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'delim': ',', 'skiprows': 0, 'has_header': True, 'header': 0}\n"
]
}
],
"source": [
"# finds common csv across all files\n",
"cfg_sniff = d6tstack.sniffer.sniff_settings_csv(cfg_fnames)\n",
"print(cfg_sniff)\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
================================================
FILE: examples-dask.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# d6tstack with Dask\n",
"\n",
"Dask is a great library for out-of-core computing. But if input files are not properly organized it quickly breaks. For example:\n",
"\n",
"1) if columns are different between files, dask won't even read the data! It doesn't tell you what you need to do to fix it.\n",
"\n",
"2) if column order is rearranged between files it will read data, but into the wrong columns and you won't notice it\n",
"\n",
"Dask can't handle those scenarios. With d6tstack you can easily fix the situation with just a few lines of code!\n",
"\n",
"For more instructions, examples and documentation see https://github.com/d6t/d6tstack"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Base Case: Columns are same between all files\n",
"As a base case, we have input files which have consistent input columns and thus can be easily read in dask."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
" return f(*args, **kwds)\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-02-04 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-02-05 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-02-06 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-02-07 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-02-08 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-02-09 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-02-10 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 0 | \n",
" 2011-01-01 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-01-02 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-01-03 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-01-04 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-01-05 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-01-06 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-01-07 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-01-08 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-01-09 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-01-10 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 0 | \n",
" 2011-03-01 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-03-02 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-03-03 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-03-04 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-03-05 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-03-06 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-03-07 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-03-08 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-03-09 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-03-10 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit\n",
"0 2011-02-01 200 -90 110\n",
"1 2011-02-02 200 -90 110\n",
"2 2011-02-03 200 -90 110\n",
"3 2011-02-04 200 -90 110\n",
"4 2011-02-05 200 -90 110\n",
"5 2011-02-06 200 -90 110\n",
"6 2011-02-07 200 -90 110\n",
"7 2011-02-08 200 -90 110\n",
"8 2011-02-09 200 -90 110\n",
"9 2011-02-10 200 -90 110\n",
"0 2011-01-01 100 -80 20\n",
"1 2011-01-02 100 -80 20\n",
"2 2011-01-03 100 -80 20\n",
"3 2011-01-04 100 -80 20\n",
"4 2011-01-05 100 -80 20\n",
"5 2011-01-06 100 -80 20\n",
"6 2011-01-07 100 -80 20\n",
"7 2011-01-08 100 -80 20\n",
"8 2011-01-09 100 -80 20\n",
"9 2011-01-10 100 -80 20\n",
"0 2011-03-01 300 -100 200\n",
"1 2011-03-02 300 -100 200\n",
"2 2011-03-03 300 -100 200\n",
"3 2011-03-04 300 -100 200\n",
"4 2011-03-05 300 -100 200\n",
"5 2011-03-06 300 -100 200\n",
"6 2011-03-07 300 -100 200\n",
"7 2011-03-08 300 -100 200\n",
"8 2011-03-09 300 -100 200\n",
"9 2011-03-10 300 -100 200"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import dask.dataframe as dd\n",
"\n",
"# consistent format\n",
"ddf = dd.read_csv('test-data/input/test-data-input-csv-clean-*.csv')\n",
"ddf.compute()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Problem Case 1: Columns are different between files\n",
"That worked well. But what happens if your input files have inconsistent columns across files? Say for example one file has a new column that the other files don't have."
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"ename": "ValueError",
"evalue": "Length mismatch: Expected axis has 5 elements, new values have 4 elements",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;31m# consistent format\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[0mddf\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'test-data/input/test-data-input-csv-colmismatch-*.csv'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[0mddf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcompute\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\dask\\base.py\u001b[0m in \u001b[0;36mcompute\u001b[1;34m(self, **kwargs)\u001b[0m\n\u001b[0;32m 153\u001b[0m \u001b[0mdask\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbase\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcompute\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 154\u001b[0m \"\"\"\n\u001b[1;32m--> 155\u001b[1;33m \u001b[1;33m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcompute\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtraverse\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 156\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 157\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\dask\\base.py\u001b[0m in \u001b[0;36mcompute\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 402\u001b[0m postcomputes = [a.__dask_postcompute__() if is_dask_collection(a)\n\u001b[0;32m 403\u001b[0m else (None, a) for a in args]\n\u001b[1;32m--> 404\u001b[1;33m \u001b[0mresults\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mget\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdsk\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mkeys\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 405\u001b[0m \u001b[0mresults_iter\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0miter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mresults\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 406\u001b[0m return tuple(a if f is None else f(next(results_iter), *a)\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\dask\\threaded.py\u001b[0m in \u001b[0;36mget\u001b[1;34m(dsk, result, cache, num_workers, **kwargs)\u001b[0m\n\u001b[0;32m 73\u001b[0m results = get_async(pool.apply_async, len(pool._pool), dsk, result,\n\u001b[0;32m 74\u001b[0m \u001b[0mcache\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcache\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mget_id\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0m_thread_get_id\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 75\u001b[1;33m pack_exception=pack_exception, **kwargs)\n\u001b[0m\u001b[0;32m 76\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 77\u001b[0m \u001b[1;31m# Cleanup pools associated to dead threads\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\dask\\local.py\u001b[0m in \u001b[0;36mget_async\u001b[1;34m(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)\u001b[0m\n\u001b[0;32m 519\u001b[0m \u001b[0m_execute_task\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtask\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m# Re-execute locally\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 520\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 521\u001b[1;33m \u001b[0mraise_exception\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mexc\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtb\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 522\u001b[0m \u001b[0mres\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mworker_id\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mloads\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mres_info\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 523\u001b[0m \u001b[0mstate\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'cache'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mkey\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mres\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\dask\\compatibility.py\u001b[0m in \u001b[0;36mreraise\u001b[1;34m(exc, tb)\u001b[0m\n\u001b[0;32m 65\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mexc\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__traceback__\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mtb\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 66\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwith_traceback\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtb\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 67\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 68\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 69\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\dask\\local.py\u001b[0m in \u001b[0;36mexecute_task\u001b[1;34m(key, task_info, dumps, loads, get_id, pack_exception)\u001b[0m\n\u001b[0;32m 288\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 289\u001b[0m \u001b[0mtask\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mloads\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtask_info\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 290\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_execute_task\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtask\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 291\u001b[0m \u001b[0mid\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mget_id\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 292\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdumps\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mid\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\dask\\local.py\u001b[0m in \u001b[0;36m_execute_task\u001b[1;34m(arg, cache, dsk)\u001b[0m\n\u001b[0;32m 269\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0margs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0marg\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marg\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 270\u001b[0m \u001b[0margs2\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[0m_execute_task\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcache\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0ma\u001b[0m \u001b[1;32min\u001b[0m \u001b[0margs\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 271\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs2\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 272\u001b[0m \u001b[1;32melif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mishashable\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0marg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 273\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0marg\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\dask\\compatibility.py\u001b[0m in \u001b[0;36mapply\u001b[1;34m(func, args, kwargs)\u001b[0m\n\u001b[0;32m 46\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mapply\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 47\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 48\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 49\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 50\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\dask\\dataframe\\io\\csv.py\u001b[0m in \u001b[0;36mpandas_read_text\u001b[1;34m(reader, b, header, kwargs, dtypes, columns, write_header, enforce)\u001b[0m\n\u001b[0;32m 69\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Columns do not match\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcolumns\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 70\u001b[0m \u001b[1;32melif\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 71\u001b[1;33m \u001b[0mdf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcolumns\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 72\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mdf\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 73\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\pandas\\core\\generic.py\u001b[0m in \u001b[0;36m__setattr__\u001b[1;34m(self, name, value)\u001b[0m\n\u001b[0;32m 3625\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3626\u001b[0m \u001b[0mobject\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__getattribute__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 3627\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mobject\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__setattr__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3628\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mAttributeError\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3629\u001b[0m \u001b[1;32mpass\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mpandas/_libs/properties.pyx\u001b[0m in \u001b[0;36mpandas._libs.properties.AxisProperty.__set__\u001b[1;34m()\u001b[0m\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\pandas\\core\\generic.py\u001b[0m in \u001b[0;36m_set_axis\u001b[1;34m(self, axis, labels)\u001b[0m\n\u001b[0;32m 557\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 558\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_set_axis\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlabels\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 559\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_data\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mset_axis\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maxis\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlabels\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 560\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_clear_item_cache\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 561\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mC:\\Anaconda3\\lib\\site-packages\\pandas\\core\\internals.py\u001b[0m in \u001b[0;36mset_axis\u001b[1;34m(self, axis, new_labels)\u001b[0m\n\u001b[0;32m 3072\u001b[0m raise ValueError('Length mismatch: Expected axis has %d elements, '\n\u001b[0;32m 3073\u001b[0m \u001b[1;34m'new values have %d elements'\u001b[0m \u001b[1;33m%\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 3074\u001b[1;33m (old_len, new_len))\n\u001b[0m\u001b[0;32m 3075\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3076\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0maxes\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0maxis\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnew_labels\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mValueError\u001b[0m: Length mismatch: Expected axis has 5 elements, new values have 4 elements"
]
}
],
"source": [
"# consistent format\n",
"ddf = dd.read_csv('test-data/input/test-data-input-csv-colmismatch-*.csv')\n",
"ddf.compute()\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Fixing the problem with d6stack\n",
"Urgh! There's no way to use these files in dask. You don't even know what's going on. What file caused the problem? Why did it cause a problem? All you know is one file has more columns than the first file.\n",
"\n",
"You can either manually process those files or use d6tstack to easily check for such a situation and fix it with a few lines of code - no manual processing required. Let's take a look!"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n",
"all equal False\n",
"\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
" profit2 | \n",
"
\n",
" \n",
" | file_path | \n",
" | \n",
" | \n",
" | \n",
" | \n",
" | \n",
"
\n",
" \n",
" \n",
" \n",
" | test-data/input/test-data-input-csv-colmismatch-feb.csv | \n",
" True | \n",
" True | \n",
" True | \n",
" True | \n",
" False | \n",
"
\n",
" \n",
" | test-data/input/test-data-input-csv-colmismatch-jan.csv | \n",
" True | \n",
" True | \n",
" True | \n",
" True | \n",
" False | \n",
"
\n",
" \n",
" | test-data/input/test-data-input-csv-colmismatch-mar.csv | \n",
" True | \n",
" True | \n",
" True | \n",
" True | \n",
" True | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit profit2\n",
"file_path \n",
"test-data/input/test-data-input-csv-colmismatch... True True True True False\n",
"test-data/input/test-data-input-csv-colmismatch... True True True True False\n",
"test-data/input/test-data-input-csv-colmismatch... True True True True True"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import glob\n",
"import d6tstack.combine_csv\n",
"\n",
"cfg_fnames = list(glob.glob('test-data/input/test-data-input-csv-colmismatch-*.csv'))\n",
"c = d6tstack.combine_csv.CombinerCSV(cfg_fnames)\n",
"\n",
"# check columns\n",
"print('all equal',c.is_all_equal())\n",
"print('')\n",
"c.is_column_present()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Before using dask you can quickly use d6stack to check if all colums are consistent with `d6tstack.combine_csv.CombinerCSV.is_all_equal()`. If they are not consistent you can easily see which files are causing problems with `d6tstack.combine_csv.CombinerCSV.is_col_present()`, in this case there is a new column \"profit2\" in \"test-data-input-csv-colmismatch-mar.csv\".\n",
"\n",
"**Let's use d6stack to fix the situation.** We will use out-of-core processing with `d6tstack.combine_csv.CombinerCSVAdvanced.combine_save()` to save data from all files into one combined file with constistent columns. Any missing data is filled with NaN (to keep only common columns use `cfg_col_sel=c.col_preview['columns_common']`) Just 2 lines of code! "
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n",
"writing test-data/output/d6tstack-test-data-input-csv-colmismatch-feb.csv ok\n",
"writing test-data/output/d6tstack-test-data-input-csv-colmismatch-jan.csv ok\n",
"writing test-data/output/d6tstack-test-data-input-csv-colmismatch-mar.csv ok\n"
]
}
],
"source": [
"# out-of-core combining\n",
"fnames = d6tstack.combine_csv.CombinerCSV(cfg_fnames).to_csv_align(output_dir='test-data/output/')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"NB: Instead of `to_csv_align()` you can also run `to_csv_combine()` which creates a single combined file.\n",
"\n",
"Now you can read this in dask and do whatever you wanted to do in the first place."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
" profit2 | \n",
" filepath | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-02-04 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-02-05 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-02-06 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-02-07 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-02-08 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-02-09 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-02-10 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 0 | \n",
" 2011-01-01 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-01-02 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-01-03 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-01-04 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-01-05 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-01-06 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-01-07 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-01-08 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-01-09 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-01-10 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" NaN | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 0 | \n",
" 2011-03-01 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" 400.0 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-03-02 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" 400.0 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-03-03 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" 400.0 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-03-04 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" 400.0 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-03-05 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" 400.0 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-03-06 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" 400.0 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-03-07 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" 400.0 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-03-08 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" 400.0 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-03-09 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" 400.0 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-03-10 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" 400.0 | \n",
" test-data/input/test-data-input-csv-colmismatc... | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit profit2 filepath filename\n",
"0 2011-02-01 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"1 2011-02-02 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"2 2011-02-03 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"3 2011-02-04 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"4 2011-02-05 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"5 2011-02-06 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"6 2011-02-07 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"7 2011-02-08 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"8 2011-02-09 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"9 2011-02-10 200 -90 110 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-feb.csv\n",
"0 2011-01-01 100 -80 20 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-jan.csv\n",
"1 2011-01-02 100 -80 20 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-jan.csv\n",
"2 2011-01-03 100 -80 20 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-jan.csv\n",
"3 2011-01-04 100 -80 20 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-jan.csv\n",
"4 2011-01-05 100 -80 20 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-jan.csv\n",
"5 2011-01-06 100 -80 20 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-jan.csv\n",
"6 2011-01-07 100 -80 20 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-jan.csv\n",
"7 2011-01-08 100 -80 20 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-jan.csv\n",
"8 2011-01-09 100 -80 20 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-jan.csv\n",
"9 2011-01-10 100 -80 20 NaN test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-jan.csv\n",
"0 2011-03-01 300 -100 200 400.0 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-mar.csv\n",
"1 2011-03-02 300 -100 200 400.0 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-mar.csv\n",
"2 2011-03-03 300 -100 200 400.0 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-mar.csv\n",
"3 2011-03-04 300 -100 200 400.0 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-mar.csv\n",
"4 2011-03-05 300 -100 200 400.0 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-mar.csv\n",
"5 2011-03-06 300 -100 200 400.0 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-mar.csv\n",
"6 2011-03-07 300 -100 200 400.0 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-mar.csv\n",
"7 2011-03-08 300 -100 200 400.0 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-mar.csv\n",
"8 2011-03-09 300 -100 200 400.0 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-mar.csv\n",
"9 2011-03-10 300 -100 200 400.0 test-data/input/test-data-input-csv-colmismatc... test-data-input-csv-colmismatch-mar.csv"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# consistent format\n",
"ddf = dd.read_csv('test-data/output/d6tstack-test-data-input-csv-colmismatch-*.csv')\n",
"ddf.compute()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Problem Case 2: Columns are reordered between files\n",
"This is a sneaky case. The columns are the same but the order is different! Dask will read everything just fine without a warning but your data is totally messed up!\n",
"\n",
"In the example below, the \"profit\" column contains data from the \"cost\" column!"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-02-04 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-02-05 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-02-06 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-02-07 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-02-08 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-02-09 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-02-10 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 0 | \n",
" 2011-01-01 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-01-02 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-01-03 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-01-04 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-01-05 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-01-06 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-01-07 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-01-08 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-01-09 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-01-10 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 0 | \n",
" 2011-03-01 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-03-02 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-03-03 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-03-04 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-03-05 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-03-06 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-03-07 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-03-08 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-03-09 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-03-10 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit\n",
"0 2011-02-01 200 -90 110\n",
"1 2011-02-02 200 -90 110\n",
"2 2011-02-03 200 -90 110\n",
"3 2011-02-04 200 -90 110\n",
"4 2011-02-05 200 -90 110\n",
"5 2011-02-06 200 -90 110\n",
"6 2011-02-07 200 -90 110\n",
"7 2011-02-08 200 -90 110\n",
"8 2011-02-09 200 -90 110\n",
"9 2011-02-10 200 -90 110\n",
"0 2011-01-01 100 -80 20\n",
"1 2011-01-02 100 -80 20\n",
"2 2011-01-03 100 -80 20\n",
"3 2011-01-04 100 -80 20\n",
"4 2011-01-05 100 -80 20\n",
"5 2011-01-06 100 -80 20\n",
"6 2011-01-07 100 -80 20\n",
"7 2011-01-08 100 -80 20\n",
"8 2011-01-09 100 -80 20\n",
"9 2011-01-10 100 -80 20\n",
"0 2011-03-01 300 200 -100\n",
"1 2011-03-02 300 200 -100\n",
"2 2011-03-03 300 200 -100\n",
"3 2011-03-04 300 200 -100\n",
"4 2011-03-05 300 200 -100\n",
"5 2011-03-06 300 200 -100\n",
"6 2011-03-07 300 200 -100\n",
"7 2011-03-08 300 200 -100\n",
"8 2011-03-09 300 200 -100\n",
"9 2011-03-10 300 200 -100"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# consistent format\n",
"ddf = dd.read_csv('test-data/input/test-data-input-csv-reorder-*.csv')\n",
"ddf.compute()"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n",
"all columns equal? False\n",
"\n",
"in what order do columns appear in the files?\n",
"\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 0 | \n",
" 1 | \n",
" 2 | \n",
" 3 | \n",
"
\n",
" \n",
" | 1 | \n",
" 0 | \n",
" 1 | \n",
" 2 | \n",
" 3 | \n",
"
\n",
" \n",
" | 2 | \n",
" 0 | \n",
" 1 | \n",
" 3 | \n",
" 2 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit\n",
"0 0 1 2 3\n",
"1 0 1 2 3\n",
"2 0 1 3 2"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"cfg_fnames = list(glob.glob('test-data/input/test-data-input-csv-reorder-*.csv'))\n",
"c = d6tstack.combine_csv.CombinerCSV(cfg_fnames)\n",
"\n",
"# check columns\n",
"col_sniff = c.sniff_columns()\n",
"print('all columns equal?' , c.is_all_equal())\n",
"print('')\n",
"print('in what order do columns appear in the files?')\n",
"print('')\n",
"col_sniff['df_columns_order'].reset_index(drop=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Again, just a useful check before loading data into dask you can see that the columns don't line up. It's very fast to run because it only reads the headers, there's NO reason for you NOT to do it from a QA perspective.\n",
"\n",
"Same as above, the fix is the same few lines of code with d6stack."
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n",
"writing test-data/output/d6tstack-test-data-input-csv-reorder-feb.csv ok\n",
"writing test-data/output/d6tstack-test-data-input-csv-reorder-jan.csv ok\n",
"writing test-data/output/d6tstack-test-data-input-csv-reorder-mar.csv ok\n"
]
}
],
"source": [
"# out-of-core combining\n",
"fnames = d6tstack.combine_csv.CombinerCSV(cfg_fnames).to_csv_align(output_dir='test-data/output/')"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
" filepath | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-02-04 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-02-05 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-02-06 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-02-07 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-02-08 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-02-09 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-02-10 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data/input/test-data-input-csv-reorder-fe... | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 0 | \n",
" 2011-01-01 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-01-02 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-01-03 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-01-04 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-01-05 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-01-06 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-01-07 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-01-08 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-01-09 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-01-10 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data/input/test-data-input-csv-reorder-ja... | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 0 | \n",
" 2011-03-01 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-03-02 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-03-03 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-03-04 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-03-05 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-03-06 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-03-07 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-03-08 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-03-09 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-03-10 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data/input/test-data-input-csv-reorder-ma... | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit filepath filename\n",
"0 2011-02-01 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"1 2011-02-02 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"2 2011-02-03 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"3 2011-02-04 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"4 2011-02-05 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"5 2011-02-06 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"6 2011-02-07 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"7 2011-02-08 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"8 2011-02-09 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"9 2011-02-10 200 -90 110 test-data/input/test-data-input-csv-reorder-fe... test-data-input-csv-reorder-feb.csv\n",
"0 2011-01-01 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"1 2011-01-02 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"2 2011-01-03 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"3 2011-01-04 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"4 2011-01-05 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"5 2011-01-06 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"6 2011-01-07 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"7 2011-01-08 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"8 2011-01-09 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"9 2011-01-10 100 -80 20 test-data/input/test-data-input-csv-reorder-ja... test-data-input-csv-reorder-jan.csv\n",
"0 2011-03-01 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"1 2011-03-02 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"2 2011-03-03 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"3 2011-03-04 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"4 2011-03-05 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"5 2011-03-06 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"6 2011-03-07 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"7 2011-03-08 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"8 2011-03-09 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv\n",
"9 2011-03-10 300 -100 200 test-data/input/test-data-input-csv-reorder-ma... test-data-input-csv-reorder-mar.csv"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# consistent format\n",
"ddf = dd.read_csv('test-data/output/d6tstack-test-data-input-csv-reorder-*.csv')\n",
"ddf.compute()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
================================================
FILE: examples-excel.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data Engineering in Python with databolt - Quickly Extract data from Excel Files (d6tlib/d6tstack)\n",
"\n",
"Excel are very common because non-technical user like accessing and manipulating data in Excel. For data engineering and data science those Excel files are not easily read however, for example `dask` and `pyspark` don't read Excel files. \n",
"\n",
"** In this workbook we will demonstrate how to use d6tstack to quickly extract data from messy Excel files into clean CSV data.**\n",
"\n",
"We will be covering the following use cases:\n",
"* Check tab consistency for across multiple files\n",
"* Exract tabs from multipe Excel files\n",
"* Exract all tabs from an Excel file\n",
"* Extract data given unstrcutured files\n",
"* Clean empty columns and rows"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import d6tstack.convert_xls\n",
"from d6tstack.convert_xls import XLSSniffer\n",
"from d6tstack.utils import PrintLogger\n",
"\n",
"import pandas as pd\n",
"import dask.dataframe as dd\n",
"import glob"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Get sample data\n",
"\n",
"We've created some dummy sample data which you can download. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import urllib.request\n",
"cfg_fname_sample = 'test-data-xls.zip'\n",
"urllib.request.urlretrieve(\"https://github.com/d6t/d6tstack/raw/master/\"+cfg_fname_sample, cfg_fname_sample)\n",
"import zipfile\n",
"zip_ref = zipfile.ZipFile(cfg_fname_sample, 'r')\n",
"zip_ref.extractall('.')\n",
"zip_ref.close()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Use Case: Extract all sheets from a single file\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"converting file: sample-xls-case-multisheet.xlsx | sheet: ok\n",
"converting file: sample-xls-case-multisheet.xlsx | sheet: ok\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/mnt/data/dev/d6t-lib/d6tstack/d6tstack/convert_xls.py:72: UserWarning: File test-data/excel_adv_data/sample-xls-case-multisheet.xlsx exists, skipping\n",
" warnings.warn('File %s exists, skipping' %fname)\n"
]
},
{
"data": {
"text/plain": [
"['test-data/output/sample-xls-case-multisheet.xlsx-Sheet1.csv',\n",
" 'test-data/output/sample-xls-case-multisheet.xlsx-Sheet2.csv']"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c = d6tstack.convert_xls.XLStoCSVMultiSheet('test-data/excel_adv_data/sample-xls-case-multisheet.xlsx', \n",
" output_dir = 'test-data/output', logger=PrintLogger())\n",
"c.convert_all()"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" ticker | \n",
" data | \n",
" xls_sheet | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2018-01-01 | \n",
" AAP | \n",
" -0.672460 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 1 | \n",
" 2018-01-02 | \n",
" AAP | \n",
" -0.359553 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2018-01-03 | \n",
" AAP | \n",
" -0.813146 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 3 | \n",
" 2018-01-04 | \n",
" AAP | \n",
" -1.726283 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 4 | \n",
" 2018-01-05 | \n",
" AAP | \n",
" 0.177426 | \n",
" Sheet1 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date ticker data xls_sheet\n",
"0 2018-01-01 AAP -0.672460 Sheet1\n",
"1 2018-01-02 AAP -0.359553 Sheet1\n",
"2 2018-01-03 AAP -0.813146 Sheet1\n",
"3 2018-01-04 AAP -1.726283 Sheet1\n",
"4 2018-01-05 AAP 0.177426 Sheet1"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ddf = dd.read_csv('test-data/output/sample-xls-case-multisheet.xlsx-*.csv')\n",
"ddf.compute().head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Use Case: Extract a sheets from multiple files\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Checking if the sheet exists across all files"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"cfg_fnames = list(glob.glob('test-data/excel_adv_data/sample-xls-case-multifile*.xlsx'))"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"all files have same sheet count? True\n",
"\n",
"all files have same sheet names? True\n",
"\n",
"all files contain sheet? True\n",
"\n",
"detailed dataframe\n",
"\n",
" file_name sheets_count sheets_idx sheets_names\n",
"0 sample-xls-case-multifile1.xlsx 1 [0] [Sheet1]\n",
"1 sample-xls-case-multifile2.xlsx 1 [0] [Sheet1]\n"
]
}
],
"source": [
"# finds sheets across all files\n",
"sniffer = XLSSniffer(cfg_fnames)\n",
"\n",
"print('all files have same sheet count?', sniffer.all_same_count())\n",
"print('')\n",
"print('all files have same sheet names?', sniffer.all_same_names())\n",
"print('')\n",
"print('all files contain sheet?', sniffer.all_contain_sheetname('Sheet1'))\n",
"print('')\n",
"print('detailed dataframe')\n",
"print('')\n",
"print(sniffer.df_xls_sheets.reset_index(drop=True).head())\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Extracting to csv"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"converting file: sample-xls-case-multifile1.xlsx | sheet: Sheet1 ok\n",
"converting file: sample-xls-case-multifile2.xlsx | sheet: Sheet1 ok\n"
]
},
{
"data": {
"text/plain": [
"['test-data/output/sample-xls-case-multifile1.xlsx-Sheet1.csv',\n",
" 'test-data/output/sample-xls-case-multifile2.xlsx-Sheet1.csv']"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"d6tstack.convert_xls.XLStoCSVMultiFile(cfg_fnames,output_dir = 'test-data/output',\n",
" cfg_xls_sheets_sel_mode='name_global',cfg_xls_sheets_sel='Sheet1',logger=PrintLogger()).convert_all()\n"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" ticker | \n",
" data | \n",
" xls_sheet | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2018-01-01 | \n",
" AAP | \n",
" -0.353994 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 1 | \n",
" 2018-01-02 | \n",
" AAP | \n",
" -1.374951 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2018-01-03 | \n",
" AAP | \n",
" -0.643618 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 3 | \n",
" 2018-01-04 | \n",
" AAP | \n",
" -2.223403 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 4 | \n",
" 2018-01-05 | \n",
" AAP | \n",
" 0.625231 | \n",
" Sheet1 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date ticker data xls_sheet\n",
"0 2018-01-01 AAP -0.353994 Sheet1\n",
"1 2018-01-02 AAP -1.374951 Sheet1\n",
"2 2018-01-03 AAP -0.643618 Sheet1\n",
"3 2018-01-04 AAP -2.223403 Sheet1\n",
"4 2018-01-05 AAP 0.625231 Sheet1"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ddf = dd.read_csv('test-data/output/sample-xls-case-multifile1.xlsx-*.csv')\n",
"ddf.compute().head()\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Use Case: Extract a sheets from multiple files, with complex layout\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Checking if the sheet exists across all files"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1\n"
]
}
],
"source": [
"cfg_fnames = list(glob.glob('test-data/excel_adv_data/sample-xls-case-badlayout1*.xlsx'))\n",
"print(len(cfg_fnames))"
]
},
{
"cell_type": "code",
"execution_count": 36,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" Unnamed: 0 | \n",
" date | \n",
" ticker | \n",
" data | \n",
" xls_sheet | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" NaN | \n",
" 2018-01-01 | \n",
" AAP | \n",
" -1.306527 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 1 | \n",
" NaN | \n",
" 2018-01-02 | \n",
" AAP | \n",
" 1.658131 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 2 | \n",
" NaN | \n",
" 2018-01-03 | \n",
" AAP | \n",
" -0.118164 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 3 | \n",
" NaN | \n",
" 2018-01-04 | \n",
" AAP | \n",
" -0.680178 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 4 | \n",
" NaN | \n",
" 2018-01-05 | \n",
" AAP | \n",
" 0.666383 | \n",
" Sheet1 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" Unnamed: 0 date ticker data xls_sheet\n",
"0 NaN 2018-01-01 AAP -1.306527 Sheet1\n",
"1 NaN 2018-01-02 AAP 1.658131 Sheet1\n",
"2 NaN 2018-01-03 AAP -0.118164 Sheet1\n",
"3 NaN 2018-01-04 AAP -0.680178 Sheet1\n",
"4 NaN 2018-01-05 AAP 0.666383 Sheet1"
]
},
"execution_count": 36,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pd.read_excel(cfg_fnames[0]).head()"
]
},
{
"cell_type": "code",
"execution_count": 38,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" ticker | \n",
" data | \n",
" xls_sheet | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2018-01-01 | \n",
" AAP | \n",
" -1.306527 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 1 | \n",
" 2018-01-02 | \n",
" AAP | \n",
" 1.658131 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2018-01-03 | \n",
" AAP | \n",
" -0.118164 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 3 | \n",
" 2018-01-04 | \n",
" AAP | \n",
" -0.680178 | \n",
" Sheet1 | \n",
"
\n",
" \n",
" | 4 | \n",
" 2018-01-05 | \n",
" AAP | \n",
" 0.666383 | \n",
" Sheet1 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date ticker data xls_sheet\n",
"0 2018-01-01 AAP -1.306527 Sheet1\n",
"1 2018-01-02 AAP 1.658131 Sheet1\n",
"2 2018-01-03 AAP -0.118164 Sheet1\n",
"3 2018-01-04 AAP -0.680178 Sheet1\n",
"4 2018-01-05 AAP 0.666383 Sheet1"
]
},
"execution_count": 38,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"d6tstack.convert_xls.read_excel_advanced(cfg_fnames[0],\n",
" sheet_name='Sheet1', header_xls_range=\"B2:E2\").head()"
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"converting file: sample-xls-case-badlayout1.xlsx | sheet: ok\n",
"converting file: sample-xls-case-badlayout1.xlsx | sheet: ok\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/mnt/data/dev/d6t-lib/d6tstack/d6tstack/convert_xls.py:72: UserWarning: File test-data/excel_adv_data/sample-xls-case-badlayout1.xlsx exists, skipping\n",
" warnings.warn('File %s exists, skipping' %fname)\n"
]
},
{
"data": {
"text/plain": [
"['test-data/output/sample-xls-case-badlayout1.xlsx-Sheet1.csv',\n",
" 'test-data/output/sample-xls-case-badlayout1.xlsx-Sheet2.csv']"
]
},
"execution_count": 39,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c = d6tstack.convert_xls.XLStoCSVMultiSheet(cfg_fnames[0],output_dir = 'test-data/output',logger=PrintLogger())\n",
"c.convert_all(header_xls_range=\"B2:B2\")\n"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [
{
"ename": "ValueError",
"evalue": "Length mismatch: Expected axis has 1 elements, new values have 4 elements",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0mddf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'test-data/output/sample-xls-case-badlayout1.xlsx-*.csv'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mddf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhead\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/dask/base.py\u001b[0m in \u001b[0;36mcompute\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mdask\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbase\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \"\"\"\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtraverse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/dask/base.py\u001b[0m in \u001b[0;36mcompute\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 400\u001b[0m \u001b[0mkeys\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__dask_keys__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mx\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mcollections\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 401\u001b[0m \u001b[0mpostcomputes\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__dask_postcompute__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mx\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mcollections\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 402\u001b[0;31m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mschedule\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdsk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkeys\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 403\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mrepack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresults\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpostcomputes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 404\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/dask/threaded.py\u001b[0m in \u001b[0;36mget\u001b[0;34m(dsk, result, cache, num_workers, **kwargs)\u001b[0m\n\u001b[1;32m 73\u001b[0m results = get_async(pool.apply_async, len(pool._pool), dsk, result,\n\u001b[1;32m 74\u001b[0m \u001b[0mcache\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcache\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mget_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0m_thread_get_id\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 75\u001b[0;31m pack_exception=pack_exception, **kwargs)\n\u001b[0m\u001b[1;32m 76\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 77\u001b[0m \u001b[0;31m# Cleanup pools associated to dead threads\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/dask/local.py\u001b[0m in \u001b[0;36mget_async\u001b[0;34m(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)\u001b[0m\n\u001b[1;32m 519\u001b[0m \u001b[0m_execute_task\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Re-execute locally\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 520\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 521\u001b[0;31m \u001b[0mraise_exception\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtb\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 522\u001b[0m \u001b[0mres\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mworker_id\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloads\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mres_info\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 523\u001b[0m \u001b[0mstate\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'cache'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mres\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/dask/compatibility.py\u001b[0m in \u001b[0;36mreraise\u001b[0;34m(exc, tb)\u001b[0m\n\u001b[1;32m 67\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mtb\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 68\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwith_traceback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtb\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 69\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 70\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/dask/local.py\u001b[0m in \u001b[0;36mexecute_task\u001b[0;34m(key, task_info, dumps, loads, get_id, pack_exception)\u001b[0m\n\u001b[1;32m 288\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 289\u001b[0m \u001b[0mtask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloads\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtask_info\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 290\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_execute_task\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 291\u001b[0m \u001b[0mid\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_id\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 292\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdumps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mid\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/dask/local.py\u001b[0m in \u001b[0;36m_execute_task\u001b[0;34m(arg, cache, dsk)\u001b[0m\n\u001b[1;32m 269\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0marg\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0marg\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 270\u001b[0m \u001b[0margs2\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0m_execute_task\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcache\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ma\u001b[0m \u001b[0;32min\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 271\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 272\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mishashable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 273\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0marg\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/dask/compatibility.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(func, args, kwargs)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/dask/dataframe/io/csv.py\u001b[0m in \u001b[0;36mpandas_read_text\u001b[0;34m(reader, b, header, kwargs, dtypes, columns, write_header, enforce)\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Columns do not match\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolumns\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 70\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 71\u001b[0;31m \u001b[0mdf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolumns\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 72\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdf\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36m__setattr__\u001b[0;34m(self, name, value)\u001b[0m\n\u001b[1;32m 4387\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4388\u001b[0m \u001b[0mobject\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__getattribute__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4389\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mobject\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__setattr__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4390\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mAttributeError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4391\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32mpandas/_libs/properties.pyx\u001b[0m in \u001b[0;36mpandas._libs.properties.AxisProperty.__set__\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36m_set_axis\u001b[0;34m(self, axis, labels)\u001b[0m\n\u001b[1;32m 644\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 645\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_set_axis\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlabels\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 646\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_data\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_axis\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0maxis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlabels\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 647\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_clear_item_cache\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 648\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/core/internals.py\u001b[0m in \u001b[0;36mset_axis\u001b[0;34m(self, axis, new_labels)\u001b[0m\n\u001b[1;32m 3321\u001b[0m raise ValueError(\n\u001b[1;32m 3322\u001b[0m \u001b[0;34m'Length mismatch: Expected axis has {old} elements, new '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3323\u001b[0;31m 'values have {new} elements'.format(old=old_len, new=new_len))\n\u001b[0m\u001b[1;32m 3324\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3325\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maxes\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0maxis\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnew_labels\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mValueError\u001b[0m: Length mismatch: Expected axis has 1 elements, new values have 4 elements"
]
}
],
"source": [
"ddf = dd.read_csv('test-data/output/sample-xls-case-badlayout1.xlsx-*.csv')\n",
"ddf.compute().head() # dask breaks! use d6tstack.combine_csv\n"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" ticker | \n",
" data | \n",
" xls_sheet | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2018-01-01 | \n",
" AAP | \n",
" -1.306526851735317 | \n",
" Sheet1 | \n",
" sample-xls-case-badlayout1.xlsx-Sheet1.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2018-01-02 | \n",
" AAP | \n",
" 1.658130679618188 | \n",
" Sheet1 | \n",
" sample-xls-case-badlayout1.xlsx-Sheet1.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2018-01-03 | \n",
" AAP | \n",
" -0.1181640451285698 | \n",
" Sheet1 | \n",
" sample-xls-case-badlayout1.xlsx-Sheet1.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 2018-01-04 | \n",
" AAP | \n",
" -0.6801782039968504 | \n",
" Sheet1 | \n",
" sample-xls-case-badlayout1.xlsx-Sheet1.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 2018-01-05 | \n",
" AAP | \n",
" 0.6663830820319143 | \n",
" Sheet1 | \n",
" sample-xls-case-badlayout1.xlsx-Sheet1.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date ticker data xls_sheet \\\n",
"0 2018-01-01 AAP -1.306526851735317 Sheet1 \n",
"1 2018-01-02 AAP 1.658130679618188 Sheet1 \n",
"2 2018-01-03 AAP -0.1181640451285698 Sheet1 \n",
"3 2018-01-04 AAP -0.6801782039968504 Sheet1 \n",
"4 2018-01-05 AAP 0.6663830820319143 Sheet1 \n",
"\n",
" filename \n",
"0 sample-xls-case-badlayout1.xlsx-Sheet1.csv \n",
"1 sample-xls-case-badlayout1.xlsx-Sheet1.csv \n",
"2 sample-xls-case-badlayout1.xlsx-Sheet1.csv \n",
"3 sample-xls-case-badlayout1.xlsx-Sheet1.csv \n",
"4 sample-xls-case-badlayout1.xlsx-Sheet1.csv "
]
},
"execution_count": 42,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"cfg_fnames = list(glob.glob('test-data/output/sample-xls-case-badlayout1.xlsx-*.csv'))\n",
"len(cfg_fnames)\n",
"c = d6tstack.combine_csv.CombinerCSV(cfg_fnames, all_strings=True)\n",
"c.combine().head()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
================================================
FILE: examples-pyspark.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# d6tstack with pyspark\n",
"\n",
"Pyspark is a great library for out-of-core computing. But if input files are not properly organized it quickly breaks. For example:\n",
"\n",
"1) if columns are different between files: [unlike dask](https://github.com/d6t/d6tstack/blob/master/examples-dask.ipynb) pyspark actually handles that\n",
"\n",
"2) if column order is rearranged between files it will read data, but into the wrong columns and you won't notice it\n",
"\n",
"3) if columns are named between files, you'll have to manually fix the situation\n",
"\n",
"Pyspark can't easily handle those scenarios. With d6tstack you can easily fix the situation with just a few lines of code!\n",
"\n",
"For more instructions, examples and documentation see https://github.com/d6t/d6tstack"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import findspark\n",
"findspark.init(r'E:\\progs.install\\spark-2.2.0-bin-hadoop2.7')\n",
"\n",
"import pyspark\n",
"sc = pyspark.SparkContext(appName=\"myAppName\")\n",
"from pyspark.sql import SQLContext\n",
"sqlc = SQLContext(sc)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Base Case: Columns are same between all files\n",
"As a base case, we have input files which have consistent input columns and thus can be easily read in dask."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" cost | \n",
" date | \n",
" profit | \n",
" sales | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" -100 | \n",
" 2011-03-01 | \n",
" 200 | \n",
" 300 | \n",
"
\n",
" \n",
" | 1 | \n",
" -100 | \n",
" 2011-03-02 | \n",
" 200 | \n",
" 300 | \n",
"
\n",
" \n",
" | 2 | \n",
" -100 | \n",
" 2011-03-03 | \n",
" 200 | \n",
" 300 | \n",
"
\n",
" \n",
" | 3 | \n",
" -100 | \n",
" 2011-03-04 | \n",
" 200 | \n",
" 300 | \n",
"
\n",
" \n",
" | 4 | \n",
" -100 | \n",
" 2011-03-05 | \n",
" 200 | \n",
" 300 | \n",
"
\n",
" \n",
" | 5 | \n",
" -100 | \n",
" 2011-03-06 | \n",
" 200 | \n",
" 300 | \n",
"
\n",
" \n",
" | 6 | \n",
" -100 | \n",
" 2011-03-07 | \n",
" 200 | \n",
" 300 | \n",
"
\n",
" \n",
" | 7 | \n",
" -100 | \n",
" 2011-03-08 | \n",
" 200 | \n",
" 300 | \n",
"
\n",
" \n",
" | 8 | \n",
" -100 | \n",
" 2011-03-09 | \n",
" 200 | \n",
" 300 | \n",
"
\n",
" \n",
" | 9 | \n",
" -100 | \n",
" 2011-03-10 | \n",
" 200 | \n",
" 300 | \n",
"
\n",
" \n",
" | 10 | \n",
" -90 | \n",
" 2011-02-01 | \n",
" 110 | \n",
" 200 | \n",
"
\n",
" \n",
" | 11 | \n",
" -90 | \n",
" 2011-02-02 | \n",
" 110 | \n",
" 200 | \n",
"
\n",
" \n",
" | 12 | \n",
" -90 | \n",
" 2011-02-03 | \n",
" 110 | \n",
" 200 | \n",
"
\n",
" \n",
" | 13 | \n",
" -90 | \n",
" 2011-02-04 | \n",
" 110 | \n",
" 200 | \n",
"
\n",
" \n",
" | 14 | \n",
" -90 | \n",
" 2011-02-05 | \n",
" 110 | \n",
" 200 | \n",
"
\n",
" \n",
" | 15 | \n",
" -90 | \n",
" 2011-02-06 | \n",
" 110 | \n",
" 200 | \n",
"
\n",
" \n",
" | 16 | \n",
" -90 | \n",
" 2011-02-07 | \n",
" 110 | \n",
" 200 | \n",
"
\n",
" \n",
" | 17 | \n",
" -90 | \n",
" 2011-02-08 | \n",
" 110 | \n",
" 200 | \n",
"
\n",
" \n",
" | 18 | \n",
" -90 | \n",
" 2011-02-09 | \n",
" 110 | \n",
" 200 | \n",
"
\n",
" \n",
" | 19 | \n",
" -90 | \n",
" 2011-02-10 | \n",
" 110 | \n",
" 200 | \n",
"
\n",
" \n",
" | 20 | \n",
" -80 | \n",
" 2011-01-01 | \n",
" 20 | \n",
" 100 | \n",
"
\n",
" \n",
" | 21 | \n",
" -80 | \n",
" 2011-01-02 | \n",
" 20 | \n",
" 100 | \n",
"
\n",
" \n",
" | 22 | \n",
" -80 | \n",
" 2011-01-03 | \n",
" 20 | \n",
" 100 | \n",
"
\n",
" \n",
" | 23 | \n",
" -80 | \n",
" 2011-01-04 | \n",
" 20 | \n",
" 100 | \n",
"
\n",
" \n",
" | 24 | \n",
" -80 | \n",
" 2011-01-05 | \n",
" 20 | \n",
" 100 | \n",
"
\n",
" \n",
" | 25 | \n",
" -80 | \n",
" 2011-01-06 | \n",
" 20 | \n",
" 100 | \n",
"
\n",
" \n",
" | 26 | \n",
" -80 | \n",
" 2011-01-07 | \n",
" 20 | \n",
" 100 | \n",
"
\n",
" \n",
" | 27 | \n",
" -80 | \n",
" 2011-01-08 | \n",
" 20 | \n",
" 100 | \n",
"
\n",
" \n",
" | 28 | \n",
" -80 | \n",
" 2011-01-09 | \n",
" 20 | \n",
" 100 | \n",
"
\n",
" \n",
" | 29 | \n",
" -80 | \n",
" 2011-01-10 | \n",
" 20 | \n",
" 100 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" cost date profit sales\n",
"0 -100 2011-03-01 200 300\n",
"1 -100 2011-03-02 200 300\n",
"2 -100 2011-03-03 200 300\n",
"3 -100 2011-03-04 200 300\n",
"4 -100 2011-03-05 200 300\n",
"5 -100 2011-03-06 200 300\n",
"6 -100 2011-03-07 200 300\n",
"7 -100 2011-03-08 200 300\n",
"8 -100 2011-03-09 200 300\n",
"9 -100 2011-03-10 200 300\n",
"10 -90 2011-02-01 110 200\n",
"11 -90 2011-02-02 110 200\n",
"12 -90 2011-02-03 110 200\n",
"13 -90 2011-02-04 110 200\n",
"14 -90 2011-02-05 110 200\n",
"15 -90 2011-02-06 110 200\n",
"16 -90 2011-02-07 110 200\n",
"17 -90 2011-02-08 110 200\n",
"18 -90 2011-02-09 110 200\n",
"19 -90 2011-02-10 110 200\n",
"20 -80 2011-01-01 20 100\n",
"21 -80 2011-01-02 20 100\n",
"22 -80 2011-01-03 20 100\n",
"23 -80 2011-01-04 20 100\n",
"24 -80 2011-01-05 20 100\n",
"25 -80 2011-01-06 20 100\n",
"26 -80 2011-01-07 20 100\n",
"27 -80 2011-01-08 20 100\n",
"28 -80 2011-01-09 20 100\n",
"29 -80 2011-01-10 20 100"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"sdf = sqlc.read.csv('test-data/input/test-data-input-csv-clean-*.csv', inferSchema=False, header=True)\n",
"sdf.toPandas()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Problem Case 1: Columns are different between files\n",
"That worked well. But what happens if your input files have inconsistent columns across files? Say for example one file has a new column that the other files don't have.\n",
"\n",
"[unlike dask](https://github.com/d6t/d6tstack/blob/master/examples-dask.ipynb) pyspark actually handles that. The new column got correctly added."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" cost | \n",
" date | \n",
" profit | \n",
" sales | \n",
" profit2 | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" -100 | \n",
" 2011-03-01 | \n",
" 200 | \n",
" 300 | \n",
" 400 | \n",
"
\n",
" \n",
" | 1 | \n",
" -100 | \n",
" 2011-03-02 | \n",
" 200 | \n",
" 300 | \n",
" 400 | \n",
"
\n",
" \n",
" | 2 | \n",
" -100 | \n",
" 2011-03-03 | \n",
" 200 | \n",
" 300 | \n",
" 400 | \n",
"
\n",
" \n",
" | 3 | \n",
" -100 | \n",
" 2011-03-04 | \n",
" 200 | \n",
" 300 | \n",
" 400 | \n",
"
\n",
" \n",
" | 4 | \n",
" -100 | \n",
" 2011-03-05 | \n",
" 200 | \n",
" 300 | \n",
" 400 | \n",
"
\n",
" \n",
" | 5 | \n",
" -100 | \n",
" 2011-03-06 | \n",
" 200 | \n",
" 300 | \n",
" 400 | \n",
"
\n",
" \n",
" | 6 | \n",
" -100 | \n",
" 2011-03-07 | \n",
" 200 | \n",
" 300 | \n",
" 400 | \n",
"
\n",
" \n",
" | 7 | \n",
" -100 | \n",
" 2011-03-08 | \n",
" 200 | \n",
" 300 | \n",
" 400 | \n",
"
\n",
" \n",
" | 8 | \n",
" -100 | \n",
" 2011-03-09 | \n",
" 200 | \n",
" 300 | \n",
" 400 | \n",
"
\n",
" \n",
" | 9 | \n",
" -100 | \n",
" 2011-03-10 | \n",
" 200 | \n",
" 300 | \n",
" 400 | \n",
"
\n",
" \n",
" | 10 | \n",
" -90 | \n",
" 2011-02-01 | \n",
" 110 | \n",
" 200 | \n",
" None | \n",
"
\n",
" \n",
" | 11 | \n",
" -90 | \n",
" 2011-02-02 | \n",
" 110 | \n",
" 200 | \n",
" None | \n",
"
\n",
" \n",
" | 12 | \n",
" -90 | \n",
" 2011-02-03 | \n",
" 110 | \n",
" 200 | \n",
" None | \n",
"
\n",
" \n",
" | 13 | \n",
" -90 | \n",
" 2011-02-04 | \n",
" 110 | \n",
" 200 | \n",
" None | \n",
"
\n",
" \n",
" | 14 | \n",
" -90 | \n",
" 2011-02-05 | \n",
" 110 | \n",
" 200 | \n",
" None | \n",
"
\n",
" \n",
" | 15 | \n",
" -90 | \n",
" 2011-02-06 | \n",
" 110 | \n",
" 200 | \n",
" None | \n",
"
\n",
" \n",
" | 16 | \n",
" -90 | \n",
" 2011-02-07 | \n",
" 110 | \n",
" 200 | \n",
" None | \n",
"
\n",
" \n",
" | 17 | \n",
" -90 | \n",
" 2011-02-08 | \n",
" 110 | \n",
" 200 | \n",
" None | \n",
"
\n",
" \n",
" | 18 | \n",
" -90 | \n",
" 2011-02-09 | \n",
" 110 | \n",
" 200 | \n",
" None | \n",
"
\n",
" \n",
" | 19 | \n",
" -90 | \n",
" 2011-02-10 | \n",
" 110 | \n",
" 200 | \n",
" None | \n",
"
\n",
" \n",
" | 20 | \n",
" -80 | \n",
" 2011-01-01 | \n",
" 20 | \n",
" 100 | \n",
" None | \n",
"
\n",
" \n",
" | 21 | \n",
" -80 | \n",
" 2011-01-02 | \n",
" 20 | \n",
" 100 | \n",
" None | \n",
"
\n",
" \n",
" | 22 | \n",
" -80 | \n",
" 2011-01-03 | \n",
" 20 | \n",
" 100 | \n",
" None | \n",
"
\n",
" \n",
" | 23 | \n",
" -80 | \n",
" 2011-01-04 | \n",
" 20 | \n",
" 100 | \n",
" None | \n",
"
\n",
" \n",
" | 24 | \n",
" -80 | \n",
" 2011-01-05 | \n",
" 20 | \n",
" 100 | \n",
" None | \n",
"
\n",
" \n",
" | 25 | \n",
" -80 | \n",
" 2011-01-06 | \n",
" 20 | \n",
" 100 | \n",
" None | \n",
"
\n",
" \n",
" | 26 | \n",
" -80 | \n",
" 2011-01-07 | \n",
" 20 | \n",
" 100 | \n",
" None | \n",
"
\n",
" \n",
" | 27 | \n",
" -80 | \n",
" 2011-01-08 | \n",
" 20 | \n",
" 100 | \n",
" None | \n",
"
\n",
" \n",
" | 28 | \n",
" -80 | \n",
" 2011-01-09 | \n",
" 20 | \n",
" 100 | \n",
" None | \n",
"
\n",
" \n",
" | 29 | \n",
" -80 | \n",
" 2011-01-10 | \n",
" 20 | \n",
" 100 | \n",
" None | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" cost date profit sales profit2\n",
"0 -100 2011-03-01 200 300 400\n",
"1 -100 2011-03-02 200 300 400\n",
"2 -100 2011-03-03 200 300 400\n",
"3 -100 2011-03-04 200 300 400\n",
"4 -100 2011-03-05 200 300 400\n",
"5 -100 2011-03-06 200 300 400\n",
"6 -100 2011-03-07 200 300 400\n",
"7 -100 2011-03-08 200 300 400\n",
"8 -100 2011-03-09 200 300 400\n",
"9 -100 2011-03-10 200 300 400\n",
"10 -90 2011-02-01 110 200 None\n",
"11 -90 2011-02-02 110 200 None\n",
"12 -90 2011-02-03 110 200 None\n",
"13 -90 2011-02-04 110 200 None\n",
"14 -90 2011-02-05 110 200 None\n",
"15 -90 2011-02-06 110 200 None\n",
"16 -90 2011-02-07 110 200 None\n",
"17 -90 2011-02-08 110 200 None\n",
"18 -90 2011-02-09 110 200 None\n",
"19 -90 2011-02-10 110 200 None\n",
"20 -80 2011-01-01 20 100 None\n",
"21 -80 2011-01-02 20 100 None\n",
"22 -80 2011-01-03 20 100 None\n",
"23 -80 2011-01-04 20 100 None\n",
"24 -80 2011-01-05 20 100 None\n",
"25 -80 2011-01-06 20 100 None\n",
"26 -80 2011-01-07 20 100 None\n",
"27 -80 2011-01-08 20 100 None\n",
"28 -80 2011-01-09 20 100 None\n",
"29 -80 2011-01-10 20 100 None"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"sdf = sqlc.read.csv('test-data/input/test-data-input-csv-colmismatch-*.csv', inferSchema=False, header=True)\n",
"sdf.toPandas()\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Problem Case 2: Columns are reordered between files\n",
"This is a sneaky case. The columns are the same but the order is different! Pyspark will read everything just fine without a warning but your data is totally messed up! You don't even notice it! You'll start using the data and at some point notice something weird is going on!\n",
"\n",
"In the example below, the \"profit\" column contains data from the \"cost\" column!"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"scrolled": false
},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" profit | \n",
" cost | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-03-01 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-03-02 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-03-03 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 3 | \n",
" 2011-03-04 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 4 | \n",
" 2011-03-05 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 5 | \n",
" 2011-03-06 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 6 | \n",
" 2011-03-07 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 7 | \n",
" 2011-03-08 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 8 | \n",
" 2011-03-09 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 9 | \n",
" 2011-03-10 | \n",
" 300 | \n",
" 200 | \n",
" -100 | \n",
"
\n",
" \n",
" | 10 | \n",
" 2011-02-01 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 11 | \n",
" 2011-02-02 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 12 | \n",
" 2011-02-03 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 13 | \n",
" 2011-02-04 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 14 | \n",
" 2011-02-05 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 15 | \n",
" 2011-02-06 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 16 | \n",
" 2011-02-07 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 17 | \n",
" 2011-02-08 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 18 | \n",
" 2011-02-09 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 19 | \n",
" 2011-02-10 | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
"
\n",
" \n",
" | 20 | \n",
" 2011-01-01 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 21 | \n",
" 2011-01-02 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 22 | \n",
" 2011-01-03 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 23 | \n",
" 2011-01-04 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 24 | \n",
" 2011-01-05 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 25 | \n",
" 2011-01-06 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 26 | \n",
" 2011-01-07 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 27 | \n",
" 2011-01-08 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 28 | \n",
" 2011-01-09 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
" | 29 | \n",
" 2011-01-10 | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales profit cost\n",
"0 2011-03-01 300 200 -100\n",
"1 2011-03-02 300 200 -100\n",
"2 2011-03-03 300 200 -100\n",
"3 2011-03-04 300 200 -100\n",
"4 2011-03-05 300 200 -100\n",
"5 2011-03-06 300 200 -100\n",
"6 2011-03-07 300 200 -100\n",
"7 2011-03-08 300 200 -100\n",
"8 2011-03-09 300 200 -100\n",
"9 2011-03-10 300 200 -100\n",
"10 2011-02-01 200 -90 110\n",
"11 2011-02-02 200 -90 110\n",
"12 2011-02-03 200 -90 110\n",
"13 2011-02-04 200 -90 110\n",
"14 2011-02-05 200 -90 110\n",
"15 2011-02-06 200 -90 110\n",
"16 2011-02-07 200 -90 110\n",
"17 2011-02-08 200 -90 110\n",
"18 2011-02-09 200 -90 110\n",
"19 2011-02-10 200 -90 110\n",
"20 2011-01-01 100 -80 20\n",
"21 2011-01-02 100 -80 20\n",
"22 2011-01-03 100 -80 20\n",
"23 2011-01-04 100 -80 20\n",
"24 2011-01-05 100 -80 20\n",
"25 2011-01-06 100 -80 20\n",
"26 2011-01-07 100 -80 20\n",
"27 2011-01-08 100 -80 20\n",
"28 2011-01-09 100 -80 20\n",
"29 2011-01-10 100 -80 20"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"sdf = sqlc.read.csv('test-data/input/test-data-input-csv-reorder-*.csv', inferSchema=False, header=True)\n",
"sdf.toPandas()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Fixing the problem with d6stack\n",
"After a while you'll get to the root of the problem, and then you can either manually process those files or use d6tstack to easily check for such a situation and fix it with a few lines of code - no manual processing required. Let's take a look!"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
" return f(*args, **kwds)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n",
"all columns equal? False\n",
"\n",
"in what order do columns appear in the files?\n",
"\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 0 | \n",
" 1 | \n",
" 2 | \n",
" 3 | \n",
"
\n",
" \n",
" | 1 | \n",
" 0 | \n",
" 1 | \n",
" 2 | \n",
" 3 | \n",
"
\n",
" \n",
" | 2 | \n",
" 0 | \n",
" 1 | \n",
" 3 | \n",
" 2 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit\n",
"0 0 1 2 3\n",
"1 0 1 2 3\n",
"2 0 1 3 2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import glob\n",
"import d6tstack.combine_csv\n",
"\n",
"cfg_fnames = list(glob.glob('test-data/input/test-data-input-csv-reorder-*.csv'))\n",
"c = d6tstack.combine_csv.CombinerCSV(cfg_fnames, all_strings=True)\n",
"\n",
"# check columns\n",
"col_sniff = c.sniff_columns()\n",
"print('all columns equal?' , col_sniff['is_all_equal'])\n",
"print('')\n",
"print('in what order do columns appear in the files?')\n",
"print('')\n",
"col_sniff['df_columns_order'].reset_index(drop=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Again, just a useful check before loading data into dask you can see that the columns don't line up. It's very fast to run because it only reads the headers, there's NO reason for you NOT to do it from a QA perspective.\n",
"\n",
"Same as above, the fix is the same few lines of code with d6stack."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# out-of-core combining\n",
"c.to_csv_align(output_dir='test-data/output/')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" profit | \n",
" date | \n",
" cost | \n",
" sales | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 110 | \n",
" 2011-02-01 | \n",
" -90 | \n",
" 200 | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 110 | \n",
" 2011-02-02 | \n",
" -90 | \n",
" 200 | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 110 | \n",
" 2011-02-03 | \n",
" -90 | \n",
" 200 | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" 110 | \n",
" 2011-02-04 | \n",
" -90 | \n",
" 200 | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" 110 | \n",
" 2011-02-05 | \n",
" -90 | \n",
" 200 | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 5 | \n",
" 110 | \n",
" 2011-02-06 | \n",
" -90 | \n",
" 200 | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 6 | \n",
" 110 | \n",
" 2011-02-07 | \n",
" -90 | \n",
" 200 | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 7 | \n",
" 110 | \n",
" 2011-02-08 | \n",
" -90 | \n",
" 200 | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 8 | \n",
" 110 | \n",
" 2011-02-09 | \n",
" -90 | \n",
" 200 | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 9 | \n",
" 110 | \n",
" 2011-02-10 | \n",
" -90 | \n",
" 200 | \n",
" test-data-input-csv-reorder-feb.csv | \n",
"
\n",
" \n",
" | 10 | \n",
" 20 | \n",
" 2011-01-01 | \n",
" -80 | \n",
" 100 | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 11 | \n",
" 20 | \n",
" 2011-01-02 | \n",
" -80 | \n",
" 100 | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 12 | \n",
" 20 | \n",
" 2011-01-03 | \n",
" -80 | \n",
" 100 | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 13 | \n",
" 20 | \n",
" 2011-01-04 | \n",
" -80 | \n",
" 100 | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 14 | \n",
" 20 | \n",
" 2011-01-05 | \n",
" -80 | \n",
" 100 | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 15 | \n",
" 20 | \n",
" 2011-01-06 | \n",
" -80 | \n",
" 100 | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 16 | \n",
" 20 | \n",
" 2011-01-07 | \n",
" -80 | \n",
" 100 | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 17 | \n",
" 20 | \n",
" 2011-01-08 | \n",
" -80 | \n",
" 100 | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 18 | \n",
" 20 | \n",
" 2011-01-09 | \n",
" -80 | \n",
" 100 | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 19 | \n",
" 20 | \n",
" 2011-01-10 | \n",
" -80 | \n",
" 100 | \n",
" test-data-input-csv-reorder-jan.csv | \n",
"
\n",
" \n",
" | 20 | \n",
" 200 | \n",
" 2011-03-01 | \n",
" -100 | \n",
" 300 | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 21 | \n",
" 200 | \n",
" 2011-03-02 | \n",
" -100 | \n",
" 300 | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 22 | \n",
" 200 | \n",
" 2011-03-03 | \n",
" -100 | \n",
" 300 | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 23 | \n",
" 200 | \n",
" 2011-03-04 | \n",
" -100 | \n",
" 300 | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 24 | \n",
" 200 | \n",
" 2011-03-05 | \n",
" -100 | \n",
" 300 | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 25 | \n",
" 200 | \n",
" 2011-03-06 | \n",
" -100 | \n",
" 300 | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 26 | \n",
" 200 | \n",
" 2011-03-07 | \n",
" -100 | \n",
" 300 | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 27 | \n",
" 200 | \n",
" 2011-03-08 | \n",
" -100 | \n",
" 300 | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 28 | \n",
" 200 | \n",
" 2011-03-09 | \n",
" -100 | \n",
" 300 | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
" | 29 | \n",
" 200 | \n",
" 2011-03-10 | \n",
" -100 | \n",
" 300 | \n",
" test-data-input-csv-reorder-mar.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" profit date cost sales filename\n",
"0 110 2011-02-01 -90 200 test-data-input-csv-reorder-feb.csv\n",
"1 110 2011-02-02 -90 200 test-data-input-csv-reorder-feb.csv\n",
"2 110 2011-02-03 -90 200 test-data-input-csv-reorder-feb.csv\n",
"3 110 2011-02-04 -90 200 test-data-input-csv-reorder-feb.csv\n",
"4 110 2011-02-05 -90 200 test-data-input-csv-reorder-feb.csv\n",
"5 110 2011-02-06 -90 200 test-data-input-csv-reorder-feb.csv\n",
"6 110 2011-02-07 -90 200 test-data-input-csv-reorder-feb.csv\n",
"7 110 2011-02-08 -90 200 test-data-input-csv-reorder-feb.csv\n",
"8 110 2011-02-09 -90 200 test-data-input-csv-reorder-feb.csv\n",
"9 110 2011-02-10 -90 200 test-data-input-csv-reorder-feb.csv\n",
"10 20 2011-01-01 -80 100 test-data-input-csv-reorder-jan.csv\n",
"11 20 2011-01-02 -80 100 test-data-input-csv-reorder-jan.csv\n",
"12 20 2011-01-03 -80 100 test-data-input-csv-reorder-jan.csv\n",
"13 20 2011-01-04 -80 100 test-data-input-csv-reorder-jan.csv\n",
"14 20 2011-01-05 -80 100 test-data-input-csv-reorder-jan.csv\n",
"15 20 2011-01-06 -80 100 test-data-input-csv-reorder-jan.csv\n",
"16 20 2011-01-07 -80 100 test-data-input-csv-reorder-jan.csv\n",
"17 20 2011-01-08 -80 100 test-data-input-csv-reorder-jan.csv\n",
"18 20 2011-01-09 -80 100 test-data-input-csv-reorder-jan.csv\n",
"19 20 2011-01-10 -80 100 test-data-input-csv-reorder-jan.csv\n",
"20 200 2011-03-01 -100 300 test-data-input-csv-reorder-mar.csv\n",
"21 200 2011-03-02 -100 300 test-data-input-csv-reorder-mar.csv\n",
"22 200 2011-03-03 -100 300 test-data-input-csv-reorder-mar.csv\n",
"23 200 2011-03-04 -100 300 test-data-input-csv-reorder-mar.csv\n",
"24 200 2011-03-05 -100 300 test-data-input-csv-reorder-mar.csv\n",
"25 200 2011-03-06 -100 300 test-data-input-csv-reorder-mar.csv\n",
"26 200 2011-03-07 -100 300 test-data-input-csv-reorder-mar.csv\n",
"27 200 2011-03-08 -100 300 test-data-input-csv-reorder-mar.csv\n",
"28 200 2011-03-09 -100 300 test-data-input-csv-reorder-mar.csv\n",
"29 200 2011-03-10 -100 300 test-data-input-csv-reorder-mar.csv"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"sdf = sqlc.read.csv('test-data/output/d6tstack-test-data-input-csv-colmismatch-*.csv', inferSchema=False, header=True)\n",
"sdf.toPandas()\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Problem Case 3: Columns are renamed between files\n",
"In this case a column gets renamed between files so you have two columns with partial NaNs that should really be the same column. You would have to manually inspect which columns this applies to and then manually edit them looking for NaNs.\n",
"\n",
"Instead you can use d6tstack to make your input files consistent."
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# see examples-csv.ipynb"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
================================================
FILE: examples-read-write.ipynb
================================================
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import importlib\n",
"import pandas as pd\n",
"import numpy as np\n",
"import glob\n",
"\n",
"import d6tstack.combine_csv\n",
"from d6tstack.utils import PrintLogger\n",
"logger = PrintLogger()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# CombinerCSV"
]
},
{
"cell_type": "code",
"execution_count": 77,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['test-data/input/test-data-input-csv-colmismatch-mar.csv', 'test-data/input/test-data-input-csv-colmismatch-feb.csv', 'test-data/input/test-data-input-csv-colmismatch-jan.csv']\n"
]
}
],
"source": [
"cfg_fnames = list(glob.glob('test-data/input/test-data-input-csv-colmismatch-*.csv'))\n",
"print(cfg_fnames)"
]
},
{
"cell_type": "code",
"execution_count": 88,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"c = d6tstack.combine_csv.CombinerCSV(cfg_fnames, all_strings=True)"
]
},
{
"cell_type": "code",
"execution_count": 89,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" cost | \n",
" date | \n",
" profit | \n",
" profit2 | \n",
" sales | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" -80 | \n",
" 2011-01-01 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" -80 | \n",
" 2011-01-02 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" -80 | \n",
" 2011-01-03 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" -80 | \n",
" 2011-01-04 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" -80 | \n",
" 2011-01-05 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" cost date profit profit2 sales \\\n",
"0 -80 2011-01-01 20 NaN 100 \n",
"1 -80 2011-01-02 20 NaN 100 \n",
"2 -80 2011-01-03 20 NaN 100 \n",
"3 -80 2011-01-04 20 NaN 100 \n",
"4 -80 2011-01-05 20 NaN 100 \n",
"\n",
" filename \n",
"0 test-data-input-csv-colmismatch-jan-matched.csv \n",
"1 test-data-input-csv-colmismatch-jan-matched.csv \n",
"2 test-data-input-csv-colmismatch-jan-matched.csv \n",
"3 test-data-input-csv-colmismatch-jan-matched.csv \n",
"4 test-data-input-csv-colmismatch-jan-matched.csv "
]
},
"execution_count": 89,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.to_csv(output_dir='test-data/output/',overwrite=True)\n",
"pd.read_csv('test-data/output/test-data-input-csv-colmismatch-jan-matched.csv').head()"
]
},
{
"cell_type": "code",
"execution_count": 90,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" cost | \n",
" date | \n",
" profit | \n",
" profit2 | \n",
" sales | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" -80 | \n",
" 2011-01-01 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" -80 | \n",
" 2011-01-02 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" -80 | \n",
" 2011-01-03 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" -80 | \n",
" 2011-01-04 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" -80 | \n",
" 2011-01-05 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" cost date profit profit2 sales \\\n",
"0 -80 2011-01-01 20 NaN 100 \n",
"1 -80 2011-01-02 20 NaN 100 \n",
"2 -80 2011-01-03 20 NaN 100 \n",
"3 -80 2011-01-04 20 NaN 100 \n",
"4 -80 2011-01-05 20 NaN 100 \n",
"\n",
" filename \n",
"0 test-data-input-csv-colmismatch-jan-matched.csv \n",
"1 test-data-input-csv-colmismatch-jan-matched.csv \n",
"2 test-data-input-csv-colmismatch-jan-matched.csv \n",
"3 test-data-input-csv-colmismatch-jan-matched.csv \n",
"4 test-data-input-csv-colmismatch-jan-matched.csv "
]
},
"execution_count": 90,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# doesn't raise any warnings... thought we had overwrite warnings implemented?\n",
"c.to_csv(output_dir='test-data/output/',overwrite=False)\n",
"pd.read_csv('test-data/output/test-data-input-csv-colmismatch-jan-matched.csv').head()"
]
},
{
"cell_type": "code",
"execution_count": 92,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" cost | \n",
" date | \n",
" profit | \n",
" profit2 | \n",
" sales | \n",
" filename | \n",
" filename.1 | \n",
" filename.2 | \n",
" filename.3 | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" -80 | \n",
" 2011-01-01 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" -80 | \n",
" 2011-01-02 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" -80 | \n",
" 2011-01-03 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 3 | \n",
" -80 | \n",
" 2011-01-04 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
" | 4 | \n",
" -80 | \n",
" 2011-01-05 | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
" test-data-input-csv-colmismatch-jan-matched.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" cost date profit profit2 sales \\\n",
"0 -80 2011-01-01 20 NaN 100 \n",
"1 -80 2011-01-02 20 NaN 100 \n",
"2 -80 2011-01-03 20 NaN 100 \n",
"3 -80 2011-01-04 20 NaN 100 \n",
"4 -80 2011-01-05 20 NaN 100 \n",
"\n",
" filename \\\n",
"0 test-data-input-csv-colmismatch-jan-matched.csv \n",
"1 test-data-input-csv-colmismatch-jan-matched.csv \n",
"2 test-data-input-csv-colmismatch-jan-matched.csv \n",
"3 test-data-input-csv-colmismatch-jan-matched.csv \n",
"4 test-data-input-csv-colmismatch-jan-matched.csv \n",
"\n",
" filename.1 \\\n",
"0 test-data-input-csv-colmismatch-jan-matched.csv \n",
"1 test-data-input-csv-colmismatch-jan-matched.csv \n",
"2 test-data-input-csv-colmismatch-jan-matched.csv \n",
"3 test-data-input-csv-colmismatch-jan-matched.csv \n",
"4 test-data-input-csv-colmismatch-jan-matched.csv \n",
"\n",
" filename.2 \\\n",
"0 test-data-input-csv-colmismatch-jan-matched.csv \n",
"1 test-data-input-csv-colmismatch-jan-matched.csv \n",
"2 test-data-input-csv-colmismatch-jan-matched.csv \n",
"3 test-data-input-csv-colmismatch-jan-matched.csv \n",
"4 test-data-input-csv-colmismatch-jan-matched.csv \n",
"\n",
" filename.3 \n",
"0 test-data-input-csv-colmismatch-jan-matched.csv \n",
"1 test-data-input-csv-colmismatch-jan-matched.csv \n",
"2 test-data-input-csv-colmismatch-jan-matched.csv \n",
"3 test-data-input-csv-colmismatch-jan-matched.csv \n",
"4 test-data-input-csv-colmismatch-jan-matched.csv "
]
},
"execution_count": 92,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# adds 3 columns for filename\n",
"c.to_csv(output_dir='test-data/output/',overwrite=True)\n",
"pd.read_csv('test-data/output/test-data-input-csv-colmismatch-jan-matched.csv').head()"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# not writing a file / raising error\n",
"c.to_csv(output_dir='test-data/output/',separate_files=False)"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# not writing a file\n",
"c.to_csv(output_dir='test-data/output/',separate_files=False)"
]
},
{
"cell_type": "code",
"execution_count": 93,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py:271: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\n",
"of pandas will change to not sort by default.\n",
"\n",
"To accept the future behavior, pass 'sort=True'.\n",
"\n",
"To retain the current behavior and silence the warning, pass sort=False\n",
"\n",
" df_all = pd.concat(dfl_all)\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" cost | \n",
" date | \n",
" filename | \n",
" profit | \n",
" profit2 | \n",
" sales | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" -100 | \n",
" 2011-03-01 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
" 200 | \n",
" 400.0 | \n",
" 300 | \n",
"
\n",
" \n",
" | 1 | \n",
" -100 | \n",
" 2011-03-02 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
" 200 | \n",
" 400.0 | \n",
" 300 | \n",
"
\n",
" \n",
" | 2 | \n",
" -100 | \n",
" 2011-03-03 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
" 200 | \n",
" 400.0 | \n",
" 300 | \n",
"
\n",
" \n",
" | 3 | \n",
" -100 | \n",
" 2011-03-04 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
" 200 | \n",
" 400.0 | \n",
" 300 | \n",
"
\n",
" \n",
" | 4 | \n",
" -100 | \n",
" 2011-03-05 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
" 200 | \n",
" 400.0 | \n",
" 300 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" cost date filename profit profit2 \\\n",
"0 -100 2011-03-01 test-data-input-csv-colmismatch-mar.csv 200 400.0 \n",
"1 -100 2011-03-02 test-data-input-csv-colmismatch-mar.csv 200 400.0 \n",
"2 -100 2011-03-03 test-data-input-csv-colmismatch-mar.csv 200 400.0 \n",
"3 -100 2011-03-04 test-data-input-csv-colmismatch-mar.csv 200 400.0 \n",
"4 -100 2011-03-05 test-data-input-csv-colmismatch-mar.csv 200 400.0 \n",
"\n",
" sales \n",
"0 300 \n",
"1 300 \n",
"2 300 \n",
"3 300 \n",
"4 300 "
]
},
"execution_count": 93,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# ok\n",
"c.to_csv(out_filename='test-data/output/test-combined.csv',separate_files=False)\n",
"pd.read_csv('test-data/output/test-combined.csv').head()"
]
},
{
"cell_type": "code",
"execution_count": 94,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" cost | \n",
" date | \n",
" filename | \n",
" profit | \n",
" profit2 | \n",
" sales | \n",
"
\n",
" \n",
" \n",
" \n",
" | 25 | \n",
" -80 | \n",
" 2011-01-06 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
"
\n",
" \n",
" | 26 | \n",
" -80 | \n",
" 2011-01-07 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
"
\n",
" \n",
" | 27 | \n",
" -80 | \n",
" 2011-01-08 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
"
\n",
" \n",
" | 28 | \n",
" -80 | \n",
" 2011-01-09 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
"
\n",
" \n",
" | 29 | \n",
" -80 | \n",
" 2011-01-10 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
" 20 | \n",
" NaN | \n",
" 100 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" cost date filename profit \\\n",
"25 -80 2011-01-06 test-data-input-csv-colmismatch-jan.csv 20 \n",
"26 -80 2011-01-07 test-data-input-csv-colmismatch-jan.csv 20 \n",
"27 -80 2011-01-08 test-data-input-csv-colmismatch-jan.csv 20 \n",
"28 -80 2011-01-09 test-data-input-csv-colmismatch-jan.csv 20 \n",
"29 -80 2011-01-10 test-data-input-csv-colmismatch-jan.csv 20 \n",
"\n",
" profit2 sales \n",
"25 NaN 100 \n",
"26 NaN 100 \n",
"27 NaN 100 \n",
"28 NaN 100 \n",
"29 NaN 100 "
]
},
"execution_count": 94,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pd.read_csv('test-data/output/test-combined.csv').tail()"
]
},
{
"cell_type": "code",
"execution_count": 95,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "to_csv() got an unexpected keyword argument 'is_col_common'",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# add is_col_common to pass through\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_filename\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'test-data/output/test-combined.csv'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mseparate_files\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mis_col_common\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m: to_csv() got an unexpected keyword argument 'is_col_common'"
],
"output_type": "error"
}
],
"source": [
"# add is_col_common to pass through\n",
"c.to_csv(out_filename='test-data/output/test-combined.csv',separate_files=False,is_col_common=False)"
]
},
{
"cell_type": "code",
"execution_count": 96,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "to_csv() got an unexpected keyword argument 'streaming'",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# how do I do streaming?\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_filename\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'test-data/output/test-combined.csv'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mseparate_files\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mstreaming\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m: to_csv() got an unexpected keyword argument 'streaming'"
],
"output_type": "error"
}
],
"source": [
"# how do I do streaming?\n",
"c.to_csv(out_filename='test-data/output/test-combined.csv',separate_files=False,streaming=True)"
]
},
{
"cell_type": "code",
"execution_count": 97,
"metadata": {},
"outputs": [
{
"ename": "AttributeError",
"evalue": "'CombinerCSV' object has no attribute 'to_parquet'",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_parquet\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput_dir\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'test-data/output/'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m: 'CombinerCSV' object has no attribute 'to_parquet'"
],
"output_type": "error"
}
],
"source": [
"c.to_parquet(output_dir='test-data/output/')\n",
"import pyarrow.parquet as pq\n",
"table = pq.read_table('test-data/output/test-data-input-csv-colmismatch-jan-matched')\n",
"table.to_pandas()"
]
},
{
"cell_type": "code",
"execution_count": 61,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py:271: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\n",
"of pandas will change to not sort by default.\n",
"\n",
"To accept the future behavior, pass 'sort=True'.\n",
"\n",
"To retain the current behavior and silence the warning, pass sort=False\n",
"\n",
" df_all = pd.concat(dfl_all)\n"
]
},
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 61,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.to_sql('mysql+mysqlconnector://testusr:testusr@localhost/test','testd6tstack')"
]
},
{
"cell_type": "code",
"execution_count": 62,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"from sqlalchemy.engine import create_engine\n",
"sqlcnxn = create_engine('mysql+mysqlconnector://testusr:testusr@localhost/test').connect()"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" index | \n",
" cost | \n",
" date | \n",
" filename | \n",
" profit | \n",
" profit2 | \n",
" sales | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 0 | \n",
" -100 | \n",
" 2011-03-01 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
" 200 | \n",
" 400 | \n",
" 300 | \n",
"
\n",
" \n",
" | 1 | \n",
" 1 | \n",
" -100 | \n",
" 2011-03-02 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
" 200 | \n",
" 400 | \n",
" 300 | \n",
"
\n",
" \n",
" | 2 | \n",
" 2 | \n",
" -100 | \n",
" 2011-03-03 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
" 200 | \n",
" 400 | \n",
" 300 | \n",
"
\n",
" \n",
" | 3 | \n",
" 3 | \n",
" -100 | \n",
" 2011-03-04 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
" 200 | \n",
" 400 | \n",
" 300 | \n",
"
\n",
" \n",
" | 4 | \n",
" 4 | \n",
" -100 | \n",
" 2011-03-05 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
" 200 | \n",
" 400 | \n",
" 300 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" index cost date filename profit \\\n",
"0 0 -100 2011-03-01 test-data-input-csv-colmismatch-mar.csv 200 \n",
"1 1 -100 2011-03-02 test-data-input-csv-colmismatch-mar.csv 200 \n",
"2 2 -100 2011-03-03 test-data-input-csv-colmismatch-mar.csv 200 \n",
"3 3 -100 2011-03-04 test-data-input-csv-colmismatch-mar.csv 200 \n",
"4 4 -100 2011-03-05 test-data-input-csv-colmismatch-mar.csv 200 \n",
"\n",
" profit2 sales \n",
"0 400 300 \n",
"1 400 300 \n",
"2 400 300 \n",
"3 400 300 \n",
"4 400 300 "
]
},
"execution_count": 66,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pd.read_sql_table('testd6tstack',sqlcnxn).head()"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" index | \n",
" cost | \n",
" date | \n",
" filename | \n",
" profit | \n",
" profit2 | \n",
" sales | \n",
"
\n",
" \n",
" \n",
" \n",
" | 25 | \n",
" 5 | \n",
" -80 | \n",
" 2011-01-06 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
" 20 | \n",
" None | \n",
" 100 | \n",
"
\n",
" \n",
" | 26 | \n",
" 6 | \n",
" -80 | \n",
" 2011-01-07 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
" 20 | \n",
" None | \n",
" 100 | \n",
"
\n",
" \n",
" | 27 | \n",
" 7 | \n",
" -80 | \n",
" 2011-01-08 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
" 20 | \n",
" None | \n",
" 100 | \n",
"
\n",
" \n",
" | 28 | \n",
" 8 | \n",
" -80 | \n",
" 2011-01-09 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
" 20 | \n",
" None | \n",
" 100 | \n",
"
\n",
" \n",
" | 29 | \n",
" 9 | \n",
" -80 | \n",
" 2011-01-10 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
" 20 | \n",
" None | \n",
" 100 | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" index cost date filename profit \\\n",
"25 5 -80 2011-01-06 test-data-input-csv-colmismatch-jan.csv 20 \n",
"26 6 -80 2011-01-07 test-data-input-csv-colmismatch-jan.csv 20 \n",
"27 7 -80 2011-01-08 test-data-input-csv-colmismatch-jan.csv 20 \n",
"28 8 -80 2011-01-09 test-data-input-csv-colmismatch-jan.csv 20 \n",
"29 9 -80 2011-01-10 test-data-input-csv-colmismatch-jan.csv 20 \n",
"\n",
" profit2 sales \n",
"25 None 100 \n",
"26 None 100 \n",
"27 None 100 \n",
"28 None 100 \n",
"29 None 100 "
]
},
"execution_count": 67,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pd.read_sql_table('testd6tstack',sqlcnxn).tail()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# CombinerCSVAdvanced.to_csv()"
]
},
{
"cell_type": "code",
"execution_count": 51,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" profit3 | \n",
" sales | \n",
" cost | \n",
" profit | \n",
" filename | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" 2011-03-01 | \n",
" 400 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-03-02 | \n",
" 400 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-03-03 | \n",
" 400 | \n",
" 300 | \n",
" -100 | \n",
" 200 | \n",
" test-data-input-csv-colmismatch-mar.csv | \n",
"
\n",
" \n",
" | 0 | \n",
" 2011-02-01 | \n",
" NaN | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-02-02 | \n",
" NaN | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-02-03 | \n",
" NaN | \n",
" 200 | \n",
" -90 | \n",
" 110 | \n",
" test-data-input-csv-colmismatch-feb.csv | \n",
"
\n",
" \n",
" | 0 | \n",
" 2011-01-01 | \n",
" NaN | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 1 | \n",
" 2011-01-02 | \n",
" NaN | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
" | 2 | \n",
" 2011-01-03 | \n",
" NaN | \n",
" 100 | \n",
" -80 | \n",
" 20 | \n",
" test-data-input-csv-colmismatch-jan.csv | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date profit3 sales cost profit \\\n",
"0 2011-03-01 400 300 -100 200 \n",
"1 2011-03-02 400 300 -100 200 \n",
"2 2011-03-03 400 300 -100 200 \n",
"0 2011-02-01 NaN 200 -90 110 \n",
"1 2011-02-02 NaN 200 -90 110 \n",
"2 2011-02-03 NaN 200 -90 110 \n",
"0 2011-01-01 NaN 100 -80 20 \n",
"1 2011-01-02 NaN 100 -80 20 \n",
"2 2011-01-03 NaN 100 -80 20 \n",
"\n",
" filename \n",
"0 test-data-input-csv-colmismatch-mar.csv \n",
"1 test-data-input-csv-colmismatch-mar.csv \n",
"2 test-data-input-csv-colmismatch-mar.csv \n",
"0 test-data-input-csv-colmismatch-feb.csv \n",
"1 test-data-input-csv-colmismatch-feb.csv \n",
"2 test-data-input-csv-colmismatch-feb.csv \n",
"0 test-data-input-csv-colmismatch-jan.csv \n",
"1 test-data-input-csv-colmismatch-jan.csv \n",
"2 test-data-input-csv-colmismatch-jan.csv "
]
},
"execution_count": 51,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"combiner2 = d6tstack.combine_csv.CombinerCSVAdvanced(c, c.preview_columns()['columns_all'], {'profit2':'profit3'})\n",
"combiner2.preview_combine() "
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "combine_save() got an unexpected keyword argument 'parquet_output'",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mcombiner2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_filename\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'test-data/output/test-combined.csv'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mseparate_files\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mto_csv\u001b[0;34m(self, out_filename, separate_files, output_dir, suffix, overwrite, streaming, chunksize)\u001b[0m\n\u001b[1;32m 617\u001b[0m \"\"\"\n\u001b[1;32m 618\u001b[0m convert_to_csv_parquet(self, out_filename=out_filename, separate_files=separate_files, output_dir=output_dir,\n\u001b[0;32m--> 619\u001b[0;31m suffix=suffix, overwrite=overwrite, streaming=streaming, chunksize=chunksize)\n\u001b[0m\u001b[1;32m 620\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 621\u001b[0m def to_parquet(self, out_filename=None, separate_files=True, output_dir=None, suffix='-matched',\n",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mconvert_to_csv_parquet\u001b[0;34m(combiner, out_filename, separate_files, output_dir, suffix, overwrite, streaming, chunksize, parquet_output)\u001b[0m\n\u001b[1;32m 58\u001b[0m chunksize=chunksize, parquet_output=parquet_output)\n\u001b[1;32m 59\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mstreaming\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mout_filename\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 60\u001b[0;31m \u001b[0mcombiner\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcombine_save\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_filename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunksize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mchunksize\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparquet_output\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparquet_output\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 61\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mout_filename\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcombiner\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcombine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mTypeError\u001b[0m: combine_save() got an unexpected keyword argument 'parquet_output'"
],
"output_type": "error"
}
],
"source": [
"# bug??\n",
"combiner2.to_csv(out_filename='test-data/output/test-combined.csv',separate_files=False)"
]
},
{
"cell_type": "code",
"execution_count": 54,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "align_save() got an unexpected keyword argument 'parquet_output'",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# bug??\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mcombiner2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_filename\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'test-data/output/test-combined.csv'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mseparate_files\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mto_csv\u001b[0;34m(self, out_filename, separate_files, output_dir, suffix, overwrite, streaming, chunksize)\u001b[0m\n\u001b[1;32m 617\u001b[0m \"\"\"\n\u001b[1;32m 618\u001b[0m convert_to_csv_parquet(self, out_filename=out_filename, separate_files=separate_files, output_dir=output_dir,\n\u001b[0;32m--> 619\u001b[0;31m suffix=suffix, overwrite=overwrite, streaming=streaming, chunksize=chunksize)\n\u001b[0m\u001b[1;32m 620\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 621\u001b[0m def to_parquet(self, out_filename=None, separate_files=True, output_dir=None, suffix='-matched',\n",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mconvert_to_csv_parquet\u001b[0;34m(combiner, out_filename, separate_files, output_dir, suffix, overwrite, streaming, chunksize, parquet_output)\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mseparate_files\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 57\u001b[0m combiner.align_save(output_dir=output_dir, suffix=suffix, overwrite=overwrite,\n\u001b[0;32m---> 58\u001b[0;31m chunksize=chunksize, parquet_output=parquet_output)\n\u001b[0m\u001b[1;32m 59\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mstreaming\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mout_filename\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0mcombiner\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcombine_save\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_filename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunksize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mchunksize\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparquet_output\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparquet_output\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mTypeError\u001b[0m: align_save() got an unexpected keyword argument 'parquet_output'"
],
"output_type": "error"
}
],
"source": [
"# bug??\n",
"combiner2.to_csv(out_filename='test-data/output/test-combined.csv',separate_files=True)"
]
},
{
"cell_type": "code",
"execution_count": 58,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "align_save() got an unexpected keyword argument 'parquet_output'",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mcombiner2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_parquet\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput_dir\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'test-data/output/'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mto_parquet\u001b[0;34m(self, out_filename, separate_files, output_dir, suffix, overwrite, streaming, chunksize)\u001b[0m\n\u001b[1;32m 637\u001b[0m convert_to_csv_parquet(self, out_filename=out_filename, separate_files=separate_files, output_dir=output_dir,\n\u001b[1;32m 638\u001b[0m \u001b[0msuffix\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msuffix\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moverwrite\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moverwrite\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstreaming\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstreaming\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunksize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mchunksize\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 639\u001b[0;31m parquet_output=True)\n\u001b[0m",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mconvert_to_csv_parquet\u001b[0;34m(combiner, out_filename, separate_files, output_dir, suffix, overwrite, streaming, chunksize, parquet_output)\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mseparate_files\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 57\u001b[0m combiner.align_save(output_dir=output_dir, suffix=suffix, overwrite=overwrite,\n\u001b[0;32m---> 58\u001b[0;31m chunksize=chunksize, parquet_output=parquet_output)\n\u001b[0m\u001b[1;32m 59\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mstreaming\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mout_filename\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0mcombiner\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcombine_save\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_filename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunksize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mchunksize\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparquet_output\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparquet_output\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mTypeError\u001b[0m: align_save() got an unexpected keyword argument 'parquet_output'"
],
"output_type": "error"
}
],
"source": [
"combiner2.to_parquet(output_dir='test-data/output/')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# DEBUG: large files"
]
},
{
"cell_type": "code",
"execution_count": 70,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"9\n",
"['/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20140401_20140430_D.txt', '/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20140501_20140829_D.txt', '/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20140901_20141231_D.txt', '/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20150101_20150630_D.txt', '/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20150701_20151231_D.txt', '/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20160101_20160630_D.txt', '/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20160701_20161230_D.txt', '/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20170102_20170630_D.txt', '/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20170703_20170731_D.txt']\n"
]
}
],
"source": [
"cfg_fnames = list(np.sort(glob.glob('/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_*.txt')))\n",
"print(len(cfg_fnames))\n",
"print(cfg_fnames)"
]
},
{
"cell_type": "code",
"execution_count": 71,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1\n",
"['/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20150101_20150630_D.txt']\n"
]
}
],
"source": [
"cfg_fnames = list(np.sort(glob.glob('/mnt/data/dev/ubs-alphahack2017-shared/data-raw/ihs/US_Factors_Zscores/US_Factors_TotalCap_Cusip_Zscore_Historical_20150101_20150630_D.txt')))\n",
"print(len(cfg_fnames))\n",
"print(cfg_fnames)"
]
},
{
"cell_type": "code",
"execution_count": 72,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"c = d6tstack.combine_csv.CombinerCSV(cfg_fnames, all_strings=True)"
]
},
{
"cell_type": "code",
"execution_count": 73,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 73,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.is_all_equal()"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"ename": "KeyboardInterrupt",
"evalue": "",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# how do I do streaming?\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_filename\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'test-data/output/test-combined.csv'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mseparate_files\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mto_csv\u001b[0;34m(self, out_filename, separate_files, output_dir, suffix, overwrite, chunksize)\u001b[0m\n\u001b[1;32m 434\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 435\u001b[0m convert_to_csv_parquet(self, out_filename=out_filename, separate_files=separate_files, output_dir=output_dir,\n\u001b[0;32m--> 436\u001b[0;31m suffix=suffix, overwrite=overwrite, streaming=False, chunksize=chunksize)\n\u001b[0m\u001b[1;32m 437\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 438\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mconvert_to_csv_parquet\u001b[0;34m(combiner, out_filename, separate_files, output_dir, suffix, overwrite, streaming, chunksize, parquet_output)\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0mcombiner\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcombine_save\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_filename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunksize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mchunksize\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparquet_output\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparquet_output\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mout_filename\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 62\u001b[0;31m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcombiner\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcombine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 63\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mparquet_output\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpyarrow\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mpa\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mcombine\u001b[0;34m(self, is_col_common, is_preview)\u001b[0m\n\u001b[1;32m 261\u001b[0m \"\"\"\n\u001b[1;32m 262\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 263\u001b[0;31m \u001b[0mdfl_all\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv_all\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'reading full file'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mis_preview\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mis_preview\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 264\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 265\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlogger\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mread_csv_all\u001b[0;34m(self, msg, is_preview, chunksize, cfg_col_sel, cfg_col_rename)\u001b[0m\n\u001b[1;32m 125\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlogger\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mmsg\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msend_log\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m' '\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mntpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbasename\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'ok'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 127\u001b[0;31m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mis_preview\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mis_preview\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunksize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mchunksize\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 128\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcfg_col_sel\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mcfg_col_rename\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 129\u001b[0m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mapply_select_rename\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcfg_col_sel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcfg_col_rename\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mread_csv\u001b[0;34m(self, fname, is_preview, chunksize)\u001b[0m\n\u001b[1;32m 113\u001b[0m \u001b[0mcfg_nrows\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnrows_preview\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mis_preview\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 114\u001b[0m return pd.read_csv(fname, dtype=cfg_dype, nrows=cfg_nrows, chunksize=chunksize,\n\u001b[0;32m--> 115\u001b[0;31m **self.read_csv_params)\n\u001b[0m\u001b[1;32m 116\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 117\u001b[0m def read_csv_all(self, msg=None, is_preview=False, chunksize=None, cfg_col_sel=None,\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mparser_f\u001b[0;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, escapechar, comment, encoding, dialect, tupleize_cols, error_bad_lines, warn_bad_lines, skipfooter, doublequote, delim_whitespace, low_memory, memory_map, float_precision)\u001b[0m\n\u001b[1;32m 676\u001b[0m skip_blank_lines=skip_blank_lines)\n\u001b[1;32m 677\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 678\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 679\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 680\u001b[0m \u001b[0mparser_f\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m 444\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 445\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 446\u001b[0;31m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mparser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 447\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 448\u001b[0m \u001b[0mparser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mread\u001b[0;34m(self, nrows)\u001b[0m\n\u001b[1;32m 1034\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'skipfooter not supported for iteration'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1035\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1036\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_engine\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1037\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1038\u001b[0m \u001b[0;31m# May alter columns / col_dict\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mread\u001b[0;34m(self, nrows)\u001b[0m\n\u001b[1;32m 1846\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnrows\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1847\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1848\u001b[0;31m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reader\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1849\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1850\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_first_chunk\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader.read\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._read_low_memory\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._read_rows\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._convert_column_data\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._convert_tokens\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._convert_with_dtype\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/core/dtypes/common.py\u001b[0m in \u001b[0;36mis_integer_dtype\u001b[0;34m(arr_or_dtype)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mdef\u001b[0m \u001b[0mis_integer_dtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marr_or_dtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \"\"\"\n\u001b[1;32m 813\u001b[0m \u001b[0mCheck\u001b[0m \u001b[0mwhether\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mprovided\u001b[0m \u001b[0marray\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mdtype\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mof\u001b[0m \u001b[0man\u001b[0m \u001b[0minteger\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
],
"output_type": "error"
}
],
"source": [
"# how do I do streaming? this loads everything into memeory...\n",
"c.to_csv(out_filename='test-data/output/test-combined.csv',separate_files=False)"
]
},
{
"cell_type": "code",
"execution_count": 75,
"metadata": {},
"outputs": [
{
"ename": "KeyboardInterrupt",
"evalue": "",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_sql_stream\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'mysql+mysqlconnector://testusr:testusr@localhost/test'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m'testd6tstack'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/mnt/data/dev/d6t-lib/d6tstack/d6tstack/combine_csv.py\u001b[0m in \u001b[0;36mto_sql_stream\u001b[0;34m(self, cnxn_string, table_name, if_exists, chunksize, sql_chunksize, cfg_col_sel, is_col_common, cfg_col_rename)\u001b[0m\n\u001b[1;32m 403\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlogger\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 404\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msend_log\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'processing '\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mntpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbasename\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'ok'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 405\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mdf_chunk\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunksize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mchunksize\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 406\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcfg_col_sel\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mcfg_col_rename\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 407\u001b[0m \u001b[0mdf_chunk\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mapply_select_rename\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf_chunk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcfg_col_sel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcfg_col_rename\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1005\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__next__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1006\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1007\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_chunk\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1008\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1009\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mget_chunk\u001b[0;34m(self, size)\u001b[0m\n\u001b[1;32m 1068\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1069\u001b[0m \u001b[0msize\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnrows\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_currow\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1070\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1071\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1072\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mread\u001b[0;34m(self, nrows)\u001b[0m\n\u001b[1;32m 1034\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'skipfooter not supported for iteration'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1035\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1036\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_engine\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1037\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1038\u001b[0m \u001b[0;31m# May alter columns / col_dict\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mread\u001b[0;34m(self, nrows)\u001b[0m\n\u001b[1;32m 1846\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnrows\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1847\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1848\u001b[0;31m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reader\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1849\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1850\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_first_chunk\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader.read\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._read_low_memory\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._read_rows\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._convert_column_data\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._convert_tokens\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._convert_with_dtype\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32m/opt/anaconda3/lib/python3.6/site-packages/pandas/core/dtypes/common.py\u001b[0m in \u001b[0;36mis_integer_dtype\u001b[0;34m(arr_or_dtype)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mdef\u001b[0m \u001b[0mis_integer_dtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marr_or_dtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \"\"\"\n\u001b[1;32m 813\u001b[0m \u001b[0mCheck\u001b[0m \u001b[0mwhether\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mprovided\u001b[0m \u001b[0marray\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mdtype\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mof\u001b[0m \u001b[0man\u001b[0m \u001b[0minteger\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
],
"output_type": "error"
}
],
"source": [
"c.to_sql_stream('mysql+mysqlconnector://testusr:testusr@localhost/test','testd6tstack')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
================================================
FILE: examples-sql.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data Engineering in Python with databolt - Fast Loading to SQL with pandas (d6tlib/d6tstack)\n",
"\n",
"Pandas and SQL are great but they have some problems:\n",
"* loading data from pandas to SQL is very slow. So you can't preprocess data with python and then quickly store it in a db\n",
"* Loading CSV files into SQL is cumbersome and quickly breaks when input files are not consistent\n",
"\n",
"With `d6tstack` you can:\n",
"* load pandas dataframes to postgres or mysql much faster than with `pd.to_sql()` and with minimal memory consumption\n",
"* preprocess CSV files with pandas before writing to db\n",
"* solve data schema problems (eg new or renamed columns) before writing to db \n",
"* out of core functionality where large files are processed in chunks\n",
"\n",
"In this workbook we will demonstrate the usage of the d6tstack library for quickly loading data into SQL from CSV files and pandas."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# pd.to_sql() is slow\n",
"Let's see how slow `pd.to_sql()` is storing 100k rows of random data."
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(100000, 23)\n"
]
}
],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import uuid\n",
"import sqlalchemy\n",
"import glob\n",
"import time\n",
"\n",
"cfg_uri_psql = 'postgresql+psycopg2://psqlusr:psqlpwdpsqlpwd@localhost/psqltest'\n",
"cfg_uri_mysql = 'mysql+mysqlconnector://testusr:testpwd@localhost/testdb'\n",
"\n",
"cfg_nobs = int(1e5)\n",
"np.random.seed(0)\n",
"df = pd.DataFrame({'id':range(cfg_nobs)})\n",
"df['uuid']=[uuid.uuid4().hex.upper()[0:10] for _ in range(cfg_nobs)]\n",
"df['date']=pd.date_range('1/1/2010',periods=cfg_nobs, freq='1T')\n",
"for i in range(20):\n",
" df['d'+str(i)]=np.random.normal(size=int(cfg_nobs))\n",
"\n",
"print(df.shape)"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--- 28.010449647903442 seconds ---\n"
]
}
],
"source": [
"sqlengine = sqlalchemy.create_engine(cfg_uri_psql)\n",
"\n",
"start_time = time.time()\n",
"df.to_sql('benchmark',sqlengine,if_exists='replace')\n",
"print(\"--- %s seconds ---\" % (time.time() - start_time))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Speeding up pd.to_sql() in postgres and mysql with d6tstack\n",
"Let's see how we can make this faster. In this simple example we have a ~5x speedup with the speedup growing exponentially with larger datasets."
]
},
{
"cell_type": "code",
"execution_count": 36,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--- 4.6529316902160645 seconds ---\n",
"creating mysql.csv ok\n",
"loading mysql.csv ok\n",
"--- 7.102342367172241 seconds ---\n"
]
}
],
"source": [
"import d6tstack.utils\n",
"\n",
"# psql\n",
"start_time = time.time()\n",
"d6tstack.utils.pd_to_psql(df, cfg_uri_psql, 'benchmark', if_exists='replace')\n",
"print(\"--- %s seconds ---\" % (time.time() - start_time))\n",
"\n",
"# mysql\n",
"start_time = time.time()\n",
"d6tstack.utils.pd_to_mysql(df, cfg_uri_mysql, 'benchmark', if_exists='replace')\n",
"print(\"--- %s seconds ---\" % (time.time() - start_time))\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Using Pandas for preprocessing CSVs before storing to database\n",
"Pandas is great for preprocessing data. For example lets say we want to process dates before importing them to a database. `d6tstack` makes this easy for you, you simply pass the filename or list of files along with the preprocessing function and it will be quickly loaded in SQL - without loading everything into memory."
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" date sales cost profit\n",
"0 2011-02-01 200 -90 110\n",
"1 2011-02-02 200 -90 110\n",
"2 2011-02-03 200 -90 110\n",
"3 2011-02-04 200 -90 110\n",
"4 2011-02-05 200 -90 110\n"
]
}
],
"source": [
"cfg_fname = 'test-data/input/test-data-input-csv-colmismatch-feb.csv'\n",
"print(pd.read_csv(cfg_fname).head())"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n",
" date sales cost profit date_year_quarter date_monthend\n",
"0 2011-02-01 200 -90 110 11Q1 2011-02-28\n",
"1 2011-02-02 200 -90 110 11Q1 2011-02-28\n",
"2 2011-02-03 200 -90 110 11Q1 2011-02-28\n",
"3 2011-02-04 200 -90 110 11Q1 2011-02-28\n",
"4 2011-02-05 200 -90 110 11Q1 2011-02-28\n"
]
}
],
"source": [
"def apply(dfg):\n",
" dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')\n",
" dfg['date_year_quarter'] = (dfg['date'].dt.year).astype(str).str[-2:]+'Q'+(dfg['date'].dt.quarter).astype(str)\n",
" dfg['date_monthend'] = dfg['date'] + pd.tseries.offsets.MonthEnd()\n",
" return dfg\n",
"\n",
"d6tstack.combine_csv.CombinerCSV([cfg_fname], apply_after_read=apply,add_filename=False).to_psql_combine(cfg_uri_psql, 'benchmark', if_exists='replace')\n",
"print(pd.read_sql_table('benchmark',sqlengine).head())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Loading multiple CSV to SQL with data schema changes\n",
"Native database import commands only support one file. You can write a script to process multipe files which first of all is annoying and even worse it often breaks eg if there are schema changes. With `d6tstack` you quickly import multiple files and deal with data schema changes with just a couple of lines of python. The below is a quick example, to explore full functionality see https://github.com/d6t/d6tstack/blob/master/examples-csv.ipynb"
]
},
{
"cell_type": "code",
"execution_count": 48,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n",
"all equal False\n",
"\n"
]
},
{
"data": {
"text/html": [
"\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" date | \n",
" sales | \n",
" cost | \n",
" profit | \n",
" profit2 | \n",
"
\n",
" \n",
" | file_path | \n",
" | \n",
" | \n",
" | \n",
" | \n",
" | \n",
"
\n",
" \n",
" \n",
" \n",
" | test-data/input/test-data-input-csv-colmismatch-feb.csv | \n",
" True | \n",
" True | \n",
" True | \n",
" True | \n",
" False | \n",
"
\n",
" \n",
" | test-data/input/test-data-input-csv-colmismatch-jan.csv | \n",
" True | \n",
" True | \n",
" True | \n",
" True | \n",
" False | \n",
"
\n",
" \n",
" | test-data/input/test-data-input-csv-colmismatch-mar.csv | \n",
" True | \n",
" True | \n",
" True | \n",
" True | \n",
" True | \n",
"
\n",
" \n",
"
\n",
"
"
],
"text/plain": [
" date sales cost profit profit2\n",
"file_path \n",
"test-data/input/test-data-input-csv-colmismatch... True True True True False\n",
"test-data/input/test-data-input-csv-colmismatch... True True True True False\n",
"test-data/input/test-data-input-csv-colmismatch... True True True True True"
]
},
"execution_count": 48,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import glob\n",
"import d6tstack.combine_csv\n",
"\n",
"cfg_fnames = list(glob.glob('test-data/input/test-data-input-csv-colmismatch-*.csv'))\n",
"c = d6tstack.combine_csv.CombinerCSV(cfg_fnames)\n",
"\n",
"# check columns\n",
"print('all equal',c.is_all_equal())\n",
"print('')\n",
"c.is_column_present()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The presence of the additional `profit2` column in the 3rd file would break the data load. `d6tstack` will fix the situation and load everything correctly. And you can run any additional preprocessing logic like in the above example. All this is done out of core so you can process even large files without any memory issues."
]
},
{
"cell_type": "code",
"execution_count": 49,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sniffing columns ok\n",
" date sales cost profit profit2 date_year_quarter date_monthend\n",
"25 2011-03-06 300 -100 200 400.0 11Q1 2011-03-31\n",
"26 2011-03-07 300 -100 200 400.0 11Q1 2011-03-31\n",
"27 2011-03-08 300 -100 200 400.0 11Q1 2011-03-31\n",
"28 2011-03-09 300 -100 200 400.0 11Q1 2011-03-31\n",
"29 2011-03-10 300 -100 200 400.0 11Q1 2011-03-31\n"
]
}
],
"source": [
"cfg_fnames = list(glob.glob('test-data/input/test-data-input-csv-colmismatch-*.csv'))\n",
"d6tstack.combine_csv.CombinerCSV(cfg_fnames, apply_after_read=apply,add_filename=False).to_psql_combine(cfg_uri_psql, 'benchmark', if_exists='replace')\n",
"print(pd.read_sql_table('benchmark',sqlengine).tail())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
================================================
FILE: requirements-dev.txt
================================================
pytest
sphinx
sphinxcontrib-napoleon
sphinx_rtd_theme
dask[dataframe]
fastparquet
python-snappy
xlwt
================================================
FILE: requirements.txt
================================================
numpy
openpyxl
xlrd
pandas>=0.22.0
sqlalchemy
scipy
pyarrow
psycopg2
mysql-connector
d6tcollect
================================================
FILE: setup.cfg
================================================
[metadata]
description-file = README.md
================================================
FILE: setup.py
================================================
from setuptools import setup
extras = {
'xls': ['openpyxl','xlrd'],
'parquet': ['pyarrow'],
'psql': ['psycopg2-binary'],
'mysql': ['mysql-connector'],
}
setup(
name='d6tstack',
version='0.2.0',
packages=['d6tstack'],
url='https://github.com/d6t/d6tstack',
license='MIT',
author='DataBolt Team',
author_email='support@databolt.tech',
description='d6tstack: Quickly ingest CSV and XLS files. Export to pandas, SQL, parquet',
long_description='Quickly ingest raw files. Works for XLS, CSV, TXT which can be exported to CSV, Parquet, SQL and Pandas. d6tstack solves many performance and schema problems typically encountered when ingesting raw files.',
install_requires=[
'numpy','pandas>=0.22.0','sqlalchemy','scipy','d6tcollect'
],
extras_require=extras,
include_package_data=True,
python_requires='>=3.5',
keywords=['d6tstack', 'ingest csv'],
classifiers=[]
)
================================================
FILE: tests/__init__.py
================================================
================================================
FILE: tests/pypi.sh
================================================
# pip install setuptools wheel twine
python setup.py sdist bdist_wheel
twine upload dist/*
================================================
FILE: tests/test-parquet.py
================================================
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 24 10:45:14 2018
@author: deepmind
"""
import pandas as pd
import glob
from fastparquet import write
from fastparquet import ParquetFile
import pyarrow.parquet as pq
for fname in glob.glob('test-data-input-csv-*.csv'):
df=pd.read_csv(fname)
df['date']=pd.to_datetime(df['date'],format='%Y-%m-%d')
# write(fname[:-4]+'.parq', df)
pq.write_table(table, 'example.parquet')pa.Table.from_pandas(df)
import dask.dataframe as dd
ddf = dd.read_parquet('test-data-input-csv-*.csv')
ddf.head()
ddf = dd.read_parquet('test-data-input-csv-*.parq')
ddf.head()
ddf.tail()
ddf.compute()
dft = ParquetFile('test-data-input-csv-mar.parq').to_pandas()
assert df.equals(dft)
ddf = dd.read_csv('test-data-input-csv-*.parq')
================================================
FILE: tests/test_combine_csv.py
================================================
"""Run unit tests.
Use this to run tests and understand how tasks.py works.
Setup::
mkdir -p test-data/input
mkdir -p test-data/output
mysql -u root -p
CREATE DATABASE testdb;
CREATE USER 'testusr'@'localhost' IDENTIFIED BY 'testpwd';
GRANT ALL PRIVILEGES ON testdb.* TO 'testusr'@'%';
Run tests::
pytest test_combine.py -s
Notes:
* this will create sample csv, xls and xlsx files
* test_combine_() test the main combine function
"""
from d6tstack.combine_csv import *
from d6tstack.sniffer import CSVSniffer
import d6tstack.utils
import math
import pandas as pd
# import pyarrow as pa
# import pyarrow.parquet as pq
import ntpath
import shutil
import dask.dataframe as dd
import sqlalchemy
import pytest
cfg_fname_base_in = 'test-data/input/test-data-'
cfg_fname_base_out_dir = 'test-data/output'
cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-'
cnxn_string = 'sqlite:///test-data/db/{}.db'
#************************************************************
# fixtures
#************************************************************
class DebugLogger(object):
def __init__(self, event):
pass
def send_log(self, msg, status):
pass
def send(self, data):
pass
logger = DebugLogger('combiner')
# sample data
def create_files_df_clean():
# create sample data
df1=pd.DataFrame({'date':pd.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20})
df2=pd.DataFrame({'date':pd.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90})
df3=pd.DataFrame({'date':pd.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100})
# cfg_col = [ 'date', 'sales','cost','profit']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
return df1, df2, df3
def create_files_df_clean_combine():
df1,df2,df3 = create_files_df_clean()
df_all = pd.concat([df1,df2,df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_clean_combine_with_filename(fname_list):
df1, df2, df3 = create_files_df_clean()
df1['filename'] = os.path.basename(fname_list[0])
df2['filename'] = os.path.basename(fname_list[1])
df3['filename'] = os.path.basename(fname_list[2])
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine(cfg_col_common,allstr=True):
df1, df2, df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
if allstr:
df_all = df_all[df_all.columns].astype(str)
return df_all
def check_df_colmismatch_combine(dfg,is_common=False, convert_date=True):
dfg = dfg.drop(['filepath','filename'],1).sort_values('date').reset_index(drop=True)
if convert_date:
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
dfchk = create_files_df_colmismatch_combine(is_common,False).reset_index(drop=True)[dfg.columns]
assert dfg.equals(dfchk)
return True
def create_files_df_colmismatch_combine2(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
# csv standard
@pytest.fixture(scope="module")
def create_files_csv():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-csv-clean-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch():
df1,df2,df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch2():
df1,df2,df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch2-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colreorder():
df1,df2,df3 = create_files_df_clean()
cfg_col = [ 'date', 'sales','cost','profit']
cfg_col2 = [ 'date', 'sales','profit','cost']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
# save files
cfg_fname = cfg_fname_base_in+'input-csv-reorder-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan',index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb',index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_noheader():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-noheader-csv-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False, header=False)
df2.to_csv(cfg_fname % 'feb',index=False, header=False)
df3.to_csv(cfg_fname % 'mar',index=False, header=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_col_renamed():
df1, df2, df3 = create_files_df_clean()
df3 = df3.rename(columns={'sales':'revenue'})
cfg_col = ['date', 'sales', 'profit', 'cost']
cfg_col2 = ['date', 'revenue', 'profit', 'cost']
cfg_fname = cfg_fname_base_in + 'input-csv-renamed-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan', index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb', index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar', index=False)
return [cfg_fname % 'jan', cfg_fname % 'feb', cfg_fname % 'mar']
def create_files_csv_dirty(cfg_sep=",", cfg_header=True):
df1,df2,df3 = create_files_df_clean()
df1.to_csv(cfg_fname_base_in+'debug.csv',index=False, sep=cfg_sep, header=cfg_header)
return cfg_fname_base_in+'debug.csv'
# excel single-tab
def create_files_xls_single_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
df1.to_excel(cfg_fname % 'jan',index=False)
df2.to_excel(cfg_fname % 'feb',index=False)
df3.to_excel(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xlsx')
def write_file_xls(dfg, fname, startrow=0,startcol=0):
writer = pd.ExcelWriter(fname)
dfg.to_excel(writer, 'Sheet1', index=False,startrow=startrow,startcol=startcol)
dfg.to_excel(writer, 'Sheet2', index=False,startrow=startrow,startcol=startcol)
writer.save()
# excel multi-tab
def create_files_xls_multiple_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
write_file_xls(df1,cfg_fname % 'jan')
write_file_xls(df2,cfg_fname % 'feb')
write_file_xls(df3,cfg_fname % 'mar')
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xlsx')
#************************************************************
# tests - helpers
#************************************************************
def test_file_extensions_get():
fname_list = ['a.csv','b.csv']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.csv','.csv']
fname_list = ['a.xls','b.xls']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.xls','.xls']
def test_file_extensions_all_equal():
ext_list = ['.csv']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.xls']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.csv','.xls']
assert not file_extensions_all_equal(ext_list)
def test_file_extensions_valid():
ext_list = ['.csv']*2
assert file_extensions_valid(ext_list)
ext_list = ['.xls']*2
assert file_extensions_valid(ext_list)
ext_list = ['.exe','.xls']
assert not file_extensions_valid(ext_list)
#************************************************************
#************************************************************
# scan header
#************************************************************
#************************************************************
def test_csv_sniff(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
with pytest.raises(ValueError) as e:
c = CombinerCSV([])
# clean
combiner = CombinerCSV(fname_list=create_files_csv)
combiner.sniff_columns()
assert combiner.is_all_equal()
assert combiner.is_column_present().all().all()
assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit']
assert combiner.sniff_results['columns_common'] == combiner.sniff_results['columns_all']
assert combiner.sniff_results['columns_unique'] == []
# extra column
combiner = CombinerCSV(fname_list=create_files_csv_colmismatch)
combiner.sniff_columns()
assert not combiner.is_all_equal()
assert not combiner.is_column_present().all().all()
assert combiner.is_column_present().all().values.tolist()==[True, True, True, True, False]
assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit', 'profit2']
assert combiner.sniff_results['columns_common'] == ['date', 'sales', 'cost', 'profit']
assert combiner.is_column_present_common().columns.tolist() == ['date', 'sales', 'cost', 'profit']
assert combiner.sniff_results['columns_unique'] == ['profit2']
assert combiner.is_column_present_unique().columns.tolist() == ['profit2']
# mixed order
combiner = CombinerCSV(fname_list=create_files_csv_colreorder)
combiner.sniff_columns()
assert not combiner.is_all_equal()
assert combiner.sniff_results['df_columns_order']['profit'].values.tolist() == [3, 3, 2]
def test_csv_selectrename(create_files_csv, create_files_csv_colmismatch):
# rename
df = CombinerCSV(fname_list=create_files_csv).preview_rename()
assert df.empty
df = CombinerCSV(fname_list=create_files_csv, columns_rename={'notthere':'nan'}).preview_rename()
assert df.empty
df = CombinerCSV(fname_list=create_files_csv, columns_rename={'cost':'cost2'}).preview_rename()
assert df.columns.tolist()==['cost']
assert df['cost'].unique().tolist()==['cost2']
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_rename={'profit2':'profit3'}).preview_rename()
assert df.columns.tolist()==['profit2']
assert df['profit2'].unique().tolist()==[np.nan, 'profit3']
# select
l = CombinerCSV(fname_list=create_files_csv).preview_select()
assert l == ['date', 'sales', 'cost', 'profit']
l2 = CombinerCSV(fname_list=create_files_csv, columns_select_common=True).preview_select()
assert l2==l
l = CombinerCSV(fname_list=create_files_csv, columns_select=['date', 'sales', 'cost']).preview_select()
assert l == ['date', 'sales', 'cost']
l = CombinerCSV(fname_list=create_files_csv_colmismatch).preview_select()
assert l == ['date', 'sales', 'cost', 'profit', 'profit2']
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).preview_select()
assert l == ['date', 'sales', 'cost', 'profit']
# rename+select
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}).preview_select()
assert l==['date', 'profit3']
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}).preview_select()
assert l==['date', 'profit3']
def test_to_pandas(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
df = CombinerCSV(fname_list=create_files_csv).to_pandas()
assert df.shape == (30, 6)
df = CombinerCSV(fname_list=create_files_csv_colmismatch).to_pandas()
assert df.shape == (30, 6+1)
assert df['profit2'].isnull().unique().tolist() == [True, False]
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_pandas()
assert df.shape == (30, 6)
assert 'profit2' not in df.columns
# rename+select
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas()
assert df.shape == (30, 2)
assert 'profit3' in df.columns and not 'profit2' in df.columns
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas()
assert df.shape == (30, 2)
assert 'profit3' in df.columns and not 'profit2' in df.columns
def test_combinepreview(create_files_csv_colmismatch):
df = CombinerCSV(fname_list=create_files_csv_colmismatch).combine_preview()
assert df.shape == (9, 6+1)
assert df.dtypes.tolist() == [np.dtype('O'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')]
def apply(dfg):
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
return dfg
df = CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).combine_preview()
assert df.shape == (9, 6+1)
assert df.dtypes.tolist() == [np.dtype('