refactor: add more examples

This commit is contained in:
2022-06-03 16:36:32 +09:00
parent 6cd6370a71
commit 4dd186486c
15 changed files with 14961 additions and 1218 deletions

View File

@@ -1,16 +1,21 @@
import argparse
from sensai_dataset.generator.commands import generate_dataset
from sensai_dataset.generator.constants import DATASET_DIR, DATASET_SOURCE_DIR
from sensai_dataset.generator.commands import generate_dataset, generate_reduced_dataset
from sensai_dataset.generator.constants import SENSAI_COMPLETE_DIR, SENSAI_DIR, DATASET_SOURCE_DIR
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='dataset generator')
parser.add_argument('-m', '--matcher', type=str, default='chats_*.csv')
parser.add_argument('-m', '--matcher', type=str, default='chats_*.parquet')
args = parser.parse_args()
print('target: ' + DATASET_DIR)
print('source: ' + DATASET_SOURCE_DIR)
print('SENSAI_COMPLETE_DIR: ' + SENSAI_COMPLETE_DIR)
print('SENSAI_DIR: ' + SENSAI_DIR)
generate_dataset(source_dir=DATASET_SOURCE_DIR,
target_dir=DATASET_DIR,
target_dir=SENSAI_COMPLETE_DIR,
matcher=args.matcher)
generate_reduced_dataset(source_dir=DATASET_SOURCE_DIR,
target_dir=SENSAI_DIR,
matcher=args.matcher)

View File

@@ -6,16 +6,17 @@ import pandas as pd
def generate_dataset(source_dir, target_dir, matcher):
print('[generate_sensai_dataset]')
print('[generate_dataset]')
delet_path = join(source_dir, 'deletion_events.csv')
del_events = pd.read_csv(delet_path, usecols=['id', 'retracted'])
delet_path = join(source_dir, 'deletion_events.parquet')
del_events = pd.read_parquet(delet_path, columns=['id', 'retracted'])
del_events = del_events.query('retracted == 0').copy()
del_events.drop(columns=['retracted'], inplace=True)
del_events['label'] = 'deleted'
ban_path = join(source_dir, 'ban_events.csv')
ban_events = pd.read_csv(ban_path, usecols=['authorChannelId', 'videoId'])
ban_path = join(source_dir, 'ban_events.parquet')
ban_events = pd.read_parquet(ban_path,
columns=['authorChannelId', 'videoId'])
ban_events['label'] = 'hidden'
for f in sorted(iglob(join(source_dir, matcher))):
@@ -24,17 +25,93 @@ def generate_dataset(source_dir, target_dir, matcher):
# load chat
print('>>> Loading chats')
chat_path = join(source_dir, 'chats_' + period_string + '.csv')
chat_path = join(source_dir, 'chats_' + period_string + '.parquet')
chats = pd.read_csv(chat_path,
na_values='',
keep_default_na=False,
usecols=[
'authorChannelId',
'videoId',
'id',
'body',
])
chats = pd.read_parquet(
chat_path,
columns=['authorChannelId', 'videoId', 'id', 'authorName', 'body'])
# remove NA
chats = chats[chats['body'].notna()]
# apply mods
print('>>> Merging bans')
chats = pd.merge(chats,
ban_events,
on=['authorChannelId', 'videoId'],
how='left')
# apply mods
print('>>> Merging deletion')
chats.loc[chats['id'].isin(del_events['id']), 'label'] = 'deleted'
# apply safe
print('>>> Applying safe')
chats['label'].fillna('nonflagged', inplace=True)
isFlagged = chats['label'] != 'nonflagged'
flagged = chats[isFlagged].copy()
# to make balanced dataset
nbFlagged = flagged.shape[0]
if nbFlagged == 0:
continue
print('>>> Sampling nonflagged chats')
print('nbFlagged', nbFlagged)
nonflag = chats[~isFlagged].sample(nbFlagged)
print('>>> Writing dataset')
# NOTE: do not use categorical type with to_parquest. otherwise, it will be failed to load them with huggingface's Dataset
columns_to_delete = [
'authorChannelId',
'videoId',
'id',
]
flagged.drop(columns=columns_to_delete, inplace=True)
flagged.to_parquet(join(target_dir,
f'chats_flagged_{period_string}.parquet'),
index=False)
nonflag.drop(columns=columns_to_delete, inplace=True)
nonflag.to_parquet(join(target_dir,
f'chats_nonflag_{period_string}.parquet'),
index=False)
# free up memory
del nonflag
del flagged
del chats
gc.collect()
def generate_reduced_dataset(source_dir, target_dir, matcher):
print('[generate_reduced_dataset]')
delet_path = join(source_dir, 'deletion_events.parquet')
del_events = pd.read_parquet(delet_path, columns=['id', 'retracted'])
del_events = del_events.query('retracted == 0').copy()
del_events.drop(columns=['retracted'], inplace=True)
del_events['label'] = 'deleted'
ban_path = join(source_dir, 'ban_events.parquet')
ban_events = pd.read_parquet(ban_path,
columns=['authorChannelId', 'videoId'])
ban_events['label'] = 'hidden'
for f in sorted(iglob(join(source_dir, matcher))):
period_string = splitext(basename(f))[0].split('_')[1]
print('>>> Period:', period_string)
# load chat
print('>>> Loading chats')
chat_path = join(source_dir, 'chats_' + period_string + '.parquet')
chats = pd.read_parquet(
chat_path,
columns=['authorChannelId', 'videoId', 'id', 'authorName', 'body'])
# remove NA
chats = chats[chats['body'].notna()]

View File

@@ -1,7 +1,5 @@
import os
DATASET_DIR = os.environ['DATASET_DIR']
SENSAI_DIR = os.environ['SENSAI_DIR']
SENSAI_COMPLETE_DIR = os.environ['SENSAI_COMPLETE_DIR']
DATASET_SOURCE_DIR = os.environ['DATASET_SOURCE_DIR']
os.makedirs(DATASET_DIR, exist_ok=True)
os.makedirs(DATASET_SOURCE_DIR, exist_ok=True)