I’m a beginner in Python, and I’m working on a project to preprocess Japanese text data for argument mining. My goal is to extract metadata (e.g., parliamentary session, date, speaker) and the speech content from the text, and then save it in a JSON file.
Each speech in my dataset typically begins with the symbol “○” followed by the speaker’s name or position, and then the speech content.
It looks like this:
○公述人(北岡伸一君) これはできるところからやっていくということであろうと思います。国連の中に、あるいは独立でもいいと思いますが、世界の武器貿易を把握するための機関をつくる。それを日本がリーダーシップをとり、お金を出してやっていく。それはいわば貿易におけるガットのようなものに将来なればいいと思いますけれども、そういうものをやっていく。超大国が踏み切らないからやらないというのではなくて、やっぱりできるところからやっていくということだと思います。
○斎藤文夫君 もう時間がございませんので、両先生ありがとうございました。
○佐藤三吾君 早朝からお招きして、両公述人の先生には大変失礼なことをして、同僚としてもおわびを申し上げたいと思います。委員長からまたごあいさつがあると思います。
I initially wrote a simple regex to handle this, and it worked well for some files, but not consistently across all of them.
I noticed that some procedural data, which also contains the symbol “○,” is not being removed by my regex. When I tried to refine my regex to address this, it stopped capturing some of the necessary speeches.
I’ve identified a specific file that serves as a good benchmark for testing my code.
It is accessible via this link:
https://kokkai.ndl.go.jp/#/detail?minId=112315262X00119920326¤t=19
(I loaded and have used it as a text file)
This is my code:
import os
import re
import json
from datetime import datetime
import argparse
def convert_japanese_date_to_english(date_str):
# Convert date string from 'YYYYMMDD' to 'DD Mon YYYY'
return datetime.strptime(date_str, '%Y%m%d').strftime('%d %b %Y')
def extract_session_and_chamber(first_line):
# Extract session number and chamber (衆議院 or 参議院) from the first line of the transcript
session_match = re.search(
r'第(d+)回国会s*(衆議院|参議院).*?(?:第(d+)号)',
first_line
)
if session_match:
session_number = session_match.group(1)
chamber = session_match.group(2)
return f'Session {session_number}', chamber
return 'Unknown Session', 'Unknown Chamber'
def extract_speaker_and_content(segment):
# Extract speaker name with role (including the case with ○)
match = re.match(r'○([^(s]+)s*(?:[((](.*?)[))])?s*(.+)', segment, re.DOTALL)
if match:
speaker_name = match.group(1).strip()
role = match.group(2).strip() if match.group(2) else ""
speaker = f"{speaker_name} ({role})" if role else speaker_name
content = match.group(3).strip()
return speaker, content
# Check for items starting with ○
match_simple = re.match(r'○([^ ]+)s*(.+)', segment, re.DOTALL)
if match_simple:
speaker = match_simple.group(1).strip()
content = match_simple.group(2).strip()
return speaker, content
# If neither pattern matches, return None for both speaker and content
return None, None
def is_metadata_or_procedural(segment):
# Identify if the segment is metadata, such as session info, attendee lists, or procedural content
metadata_patterns = [
r'第d+回国会', # Session headers
r'参議院|衆議院', # Chamber names
r'午前|午後', # Time indications
r'委員長|理事|委員|出席者|政府委員', # Committee-related terms
r'辞任|補欠選任', # Procedural terms related to appointments
r'議案|案件', # Agenda items
r'―――――――――――――', # Procedural separators
]
# Return True if the segment matches any metadata patterns
for pattern in metadata_patterns:
if re.search(pattern, segment):
return True
return False
def clean_content(content):
# Clean the speech content by removing unnecessary symbols and normalizing text
content = re.sub(r'―+.*?―+', '', content) # Remove procedural markers
content = re.sub(r'〔[^〕]*〕', '', content) # Remove content in curly brackets
content = re.sub(r'(.*?)', '', content) # Remove content in round brackets
content = re.sub(r'([^)]*)', '', content) # Remove content in Japanese-style parentheses
content = re.sub(r's+', ' ', content) # Normalize whitespace to single spaces
# Trim leading and trailing whitespace
return content.strip()
def process_speeches(file_path, output_dir):
# Process a single speech transcript file and save the cleaned data to a JSON file
file_name = os.path.basename(file_path).split('.')[0]
date_part = re.match(r'd{8}', file_name).group(0)
date = convert_japanese_date_to_english(date_part)
with open(file_path, 'r', encoding='utf-8') as file:
lines = file.readlines()
session_info, chamber = extract_session_and_chamber(lines[0])
speech = ''.join(lines)
segments = re.split(r'(?=○)', speech)
processed_data = []
for segment in segments:
segment = segment.strip()
if segment and not is_metadata_or_procedural(segment):
speaker, content = extract_speaker_and_content(segment)
if speaker and content: # Only process if both speaker and content are present
cleaned_content = clean_content(content)
if cleaned_content:
entry = {
'Date': date,
'Session': session_info,
'Chamber': chamber,
'Speaker': speaker,
'Content': cleaned_content
}
processed_data.append(entry)
output_file = os.path.join(output_dir, f'processed_{file_name}.json')
with open(output_file, 'w', encoding='utf-8') as jsonfile:
json.dump(processed_data, jsonfile, ensure_ascii=False, indent=4)
return output_file
def process_all_speeches(input_dir, output_dir):
# Process all speech transcript files in the input directory
if not os.path.exists(output_dir):
os.makedirs(output_dir)
preprocessed_files = [f for f in os.listdir(input_dir) if f.endswith('.txt')]
processed_files = []
for file_name in preprocessed_files:
input_path = os.path.join(input_dir, file_name)
output_file = process_speeches(input_path, output_dir)
processed_files.append(output_file)
return processed_files
def main():
# Main entry point for the script
parser = argparse.ArgumentParser(description='Process Japanese speech transcripts.')
parser.add_argument('input_dir', help='Directory containing input .txt files')
parser.add_argument('output_dir', help='Directory to save processed .json files')
args = parser.parse_args()
process_all_speeches(args.input_dir, args.output_dir)
if __name__ == '__main__':
main()
Could someone help me with this code or suggest a better approach to reliably extract the speech content?
Any guidance would be greatly appreciated!
Ana17 is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.