自然语言处理经常使用re正则模块进行字符串替换,但是文本数量特别大的时候,需要跑很久,这就需要使用asyncio异步加速处理
import pandas as pd
import re
import asyncio
data = pd.read_csv("guba_all_post_20230413.csv")
data.dropna(inplace=True)
# def replace_between_dollars(strings):
# pattern = r'\$[^$]*\$;'
# pattern1 = r'[^\w\s]+'
# new_strings = []
# for idx,text in enumerate(strings):
# text = re.sub(pattern, '', text)
# text = re.sub(pattern1, '', text)
# text = re.sub(r'\s+', '', text)
# new_strings.append(text)
# return new_strings
# replace_between_dollars(data["text"])
# data["new_text"] = replace_between_dollars(data["text"])
# data[:50]
pattern = r'\$[^$]*\$;'
pattern1 = r'[^\w\s]+'
async def replace_between_dollars(long_string):
text = str(long_string)
new_strings = []
text = re.sub(pattern, '', text)
text = re.sub(pattern1, '', text)
text = re.sub(r'\s+', '', text)
text = re.sub(r'[a-zA-Z]{30,}', '', text)
text = re.sub(r"autoimg\w+", "", text)
return text
async def main():
tasks = []
for i in data["text"]:
# print(i)
tasks.append(asyncio.create_task(replace_between_dollars(i)))
matches_list = await asyncio.gather(*tasks)
data["new_text"] = matches_list
print(matches_list[:200])
data.to_csv("guba_all_newtext_20230413.csv",index=False)
if __name__ == '__main__':
asyncio.run(main())
结果:
['估值有待修复煤炭平均市盈率6倍3美元', '国产医疗器械行业发展迅速迈瑞作为的国内最大的医疗器械企业基本一枝独秀了', '今日上海现货钼价', '出消息了准备套人', '你爷爷要红了', '买个了鬼半年多了没一点长进而且还跌', '没有万手哥55过不去', '今天972抄底了感觉大盘要怕怕的明天希望你给给机会出来', '可从研究开放式基金入手如010379013626005108010341等', '明570收']
多思考也是一种努力,做出正确的分析和选择,因为我们的时间和精力都有限,所以把时间花在更有价值的地方。