1、datasets.load_datasets()
- 参考:https://huggingface.co/docs/datasets/v2.2.1/en/package_reference/loading_methods#datasets.load_dataset ```python from datasets import load_dataset
jy: 调用 /datasets/load.py 中的 load_dataset 函数;
ds = load_dataset(“./dataset_script_jy/csv.py”, data_files=”nli_for_simcse.csv”) print(ds)
其中,`load_dataset`函数解析如下:
```python
def load_dataset(
path: str,
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Union[Dict, List] = None,
split: Optional[Union[str, Split]] = None,
cache_dir: Optional[str] = None,
features: Optional[Features] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[GenerateMode] = None,
ignore_verifications: bool = False,
save_infos: bool = False,
script_version: Optional[Union[str, Version]] = None,
**config_kwargs,
) -> Union[DatasetDict, Dataset]:
(1)功能说明
- Load a dataset. This method does the following under the hood:
- 1)Download and import in the library the dataset loading script from
path
if it’s not already cached inside the library.- Processing scripts are small python scripts that define the citation, info and format of the dataset, contain the URL to the original data files and the code to load examples from the original data files.
- You can find some of the scripts here:https://github.com/huggingface/datasets/datasets and easily upload yours to share them using the CLI
datasets-cli
.
- 2)Run the dataset loading script which will:
- Download the dataset file from the original URL (see the script) if it’s not already downloaded and cached.
- Process and cache the dataset in typed Arrow tables for caching.
- Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python standard types.
- They can be directly access from drive, loaded in RAM or even streamed over the web.
3)Return a dataset build from the requested splits in
split
(default: all).(2)参数解析
path
:path to the dataset processing script with the dataset builder. Can be either:- a local path to processing script or the directory containing the script (if the script has the same name as the directory),e.g.
'./dataset/squad'
or'./dataset/squad/squad.py'
- a dataset identifier on HuggingFace AWS bucket (list all available datasets and ids with
datasets.list_datasets()
), e.g.'squad'
,'glue'
or'openai/webtext'
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),e.g.
name
:defining the name of the dataset configurationdata_files
:defining the data_files of the dataset configurationdata_dir
:defining the data_dir of the dataset configurationsplit
(datasets.Split
orstr
):which split of the data to load.- If None, will return a
dict
with all splits(typicallydatasets.Split.TRAIN
anddatasets.Split.TEST
). - If given, will return a single Dataset.
- Splits can be combined and specified like in tensorflow-datasets.
- If None, will return a
cache_dir
:directory to read/write data. Defaults to~/datasets
.features
(Optionaldatasets.Features
):Set the features type to use for this dataset.download_config
(Optionaldatasets.DownloadConfig
):specific download configuration parameters.download_mode
(Optionaldatasets.GenerateMode
):select the download/generate mode ,Default toREUSE_DATASET_IF_EXISTS
ignore_verifications
:Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/…)save_infos
:Save the dataset information (checksums/size/splits/…)script_version
(OptionalUnion[str, datasets.Version]
):if specified, the module will be loaded from the datasets repository at this version. By default it is set to the local version fo the lib. Specifying a version that is different from your local version of the lib might cause compatibility issues.**config_kwargs
(Optionaldict
):keyword arguments to be passed to thedatasets.BuilderConfig
and used in thedatasets.DatasetBuilder
.(3)返回结果
datasets.Dataset
ordatasets.DatasetDict
import logging from dataclasses import dataclass from typing import List, Optional, Union
import pandas as pd import pyarrow as pa
import datasets
logger = logging.getLogger(name)
@dataclass class CsvConfig(datasets.BuilderConfig): “””BuilderConfig for CSV.”””
sep: str = ","
delimiter: Optional[str] = None
header: Optional[Union[int, List[int], str]] = "infer"
names: Optional[List[str]] = None
column_names: Optional[List[str]] = None
index_col: Optional[Union[int, str, List[int], List[str]]] = None
usecols: Optional[Union[List[int], List[str]]] = None
prefix: Optional[str] = None
mangle_dupe_cols: bool = True
engine: Optional[str] = None
true_values: Optional[list] = None
false_values: Optional[list] = None
skipinitialspace: bool = False
skiprows: Optional[Union[int, List[int]]] = None
nrows: Optional[int] = None
na_values: Optional[Union[str, List[str]]] = None
keep_default_na: bool = True
na_filter: bool = True
verbose: bool = False
skip_blank_lines: bool = True
thousands: Optional[str] = None
decimal: str = b"."
lineterminator: Optional[str] = None
quotechar: str = '"'
quoting: int = 0
escapechar: Optional[str] = None
comment: Optional[str] = None
encoding: Optional[str] = None
dialect: str = None
error_bad_lines: bool = True
warn_bad_lines: bool = True
skipfooter: int = 0
doublequote: bool = True
memory_map: bool = False
float_precision: Optional[str] = None
chunksize: int = 10_000
features: datasets.Features = None
def __post_init__(self):
if self.delimiter is not None:
self.sep = self.delimiter
if self.column_names is not None:
self.names = self.column_names
class Csv(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = CsvConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _generate_tables(self, files):
schema = pa.schema(self.config.features.type) if self.config.features is not None else None
for file_idx, file in enumerate(files):
csv_file_reader = pd.read_csv(
file,
iterator=True,
sep=self.config.sep,
header=self.config.header,
names=self.config.names,
index_col=self.config.index_col,
usecols=self.config.usecols,
prefix=self.config.prefix,
mangle_dupe_cols=self.config.mangle_dupe_cols,
engine=self.config.engine,
true_values=self.config.true_values,
false_values=self.config.false_values,
skipinitialspace=self.config.skipinitialspace,
skiprows=self.config.skiprows,
nrows=self.config.nrows,
na_values=self.config.na_values,
keep_default_na=self.config.keep_default_na,
na_filter=self.config.na_filter,
verbose=self.config.verbose,
skip_blank_lines=self.config.skip_blank_lines,
thousands=self.config.thousands,
decimal=self.config.decimal,
lineterminator=self.config.lineterminator,
quotechar=self.config.quotechar,
quoting=self.config.quoting,
escapechar=self.config.escapechar,
comment=self.config.comment,
encoding=self.config.encoding,
dialect=self.config.dialect,
error_bad_lines=self.config.error_bad_lines,
warn_bad_lines=self.config.warn_bad_lines,
skipfooter=self.config.skipfooter,
doublequote=self.config.doublequote,
memory_map=self.config.memory_map,
float_precision=self.config.float_precision,
chunksize=self.config.chunksize,
)
for batch_idx, df in enumerate(csv_file_reader):
pa_table = pa.Table.from_pandas(df, schema=schema)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), pa_table
```