348 lines (347 with data), 14.7 kB
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import pandas as pd\n",
"import re\n",
"from sklearn.model_selection import train_test_split"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Overview\n",
"\n",
"## Clean Text\n",
"Our goal is to 'clean up' the discharge text with the goal of making it easier for our model to find meaningful signal. To this end, we remove \"fluff\" from the text. I loosely define \"fluff\" as:\n",
"1. text that appears in the same context across most of the notes (e.g. \"sarasota memorial hospital\")\n",
"2. text that our model has no good way to handle (e.g. \"50 mg\" - our model doesn't understand numbers)\n",
"\n",
"In the first case, we simply remove the string altogether.\n",
"In the second case, we replace with a string - e.g. \"50 mg\" becomes \"quantity\".\n",
" \n",
"Approach:\n",
"1. Find dates in the text, replace them with the string 'DATE'\n",
"2. Find and remove fluff expressions with 'DATE' in them, e.g. 'Date of admission DATE', 'd: DATE t: DATE'\n",
"3. Find quantities in the text, replace them with the string 'QUANTITY'\n",
"4. Find frequency instructions ('per day') in the text, replace them with the string 'FREQUENCY'.\n",
"5. Find and remove fluff like 'sarasota memorial hospital' or 'admitting diagnosis'. \n",
"6. Find and remove other expressions ('visit id XXXXXX', ')\n",
"\n",
"Note that this necessarily occurs after splitting text into sections; this code would strip away section headers.\n",
"\n",
"## Train/test split\n",
"Once text prep is done, we do a train/test split, and write train/test data to separate data files."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Clean text"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# This expects data written out by the notebook `TextSections`.\n",
"DATA_PATH = \"\"\n",
"WRITE_DATA_TO = \"\"\n",
"\n",
"# Column names\n",
"OUTCOME = \"ReadmissionInLessThan30Days\"\n",
"VISIT_ID = \"ChartGUID\"\n",
"SEC_NAMES = ['abdomen', 'activity', 'admission diagnoses', 'allergies', 'chest',\n",
" 'complications', 'consultations', 'core measures', 'course',\n",
" 'discharge condition', 'discharge diagnoses', 'discharge diet',\n",
" 'discharge medications', 'disposition', 'extremities', 'general',\n",
" 'heart', 'heent', 'history', 'laboratory data', 'lungs', 'neck',\n",
" 'neurologic', 'physical examination', 'present illness', 'procedures',\n",
" 'procedures performed', 'prognosis', 'social history', 'vital signs']\n",
"\n",
"dataframe = pd.read_csv(DATA_PATH)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"##################################################################################\n",
"# Dates\n",
"##################################################################################\n",
"# Process dates\n",
"MONTH = \"[jJ]anuary|[fF]ebruary|[mM]arch|[aA]pril|[mM]ay|[jJ]une|\"\n",
"MONTH += \"[jJ]uly|[aA]ugust|[sS]eptember|[oO]ctober|[nN]ovember|[dD]ecember\"\n",
"MONTH = \"(\" + MONTH + \"|[jJ]an|[fF]eb|[mM]ar|[aA]pr|[jJ]un|[jJ]ul|[aA]ug|[sS]ept|[oO]ct|[nN]ov|[dD]ec)\"\n",
"DATENOY = \"(\" + MONTH + \"\\s)(\\d+)\" # june 5\n",
"DATEWDS = \"((\" + MONTH + \"\\s)(\\d+)(\\s|\\,)\\s(\\d+))\" # june 5, 1995\n",
"DATEWDS_EU = \"((\\d+\\s*)\" + MONTH + \"(\\s|\\,)\\s(\\d+))\" # 5 june, 1995\n",
"DATENMS = \"((\\d{1,2}(\\/|-))+\\d{1,2}(\\/\\s?|-)\\d{2,4})\" # 06/05/1995, 06-05-1995\n",
"DATE = \"(\" + DATEWDS + \"|\" + DATEWDS_EU + \"|\" + DATENMS + \"|\" + DATENOY + \")\"\n",
"\n",
"\n",
"\n",
"##################################################################################\n",
"# Numbers\n",
"##################################################################################\n",
"NUMBER = \"((\\d+)(\\.|\\/)(\\d+))|(\\d+)\"\n",
"\n",
"NUMBER = NUMBER + \"|(zero)|(one)|(two)|(three)|(four)|(five)|(six)|(seven)|(eight)|(nine)\" +\"|(ten)|(eleven)|(twelve)|(thirteen)|(fourteen)|(fifteen)|(sixteen)|(seventeen)|(eighteen)|\" +\"(nineteen)|(twenty)|(thirty)|(forty)|(fifty)|(sixty)|(seventy)|(eighty)|(ninety)|(hundred)\"\n",
"NUMBER = \"((\\s)|(\\())(\" + NUMBER + \")((\\s)|(\\.)|(\\,)|(%))\"\n",
"\n",
"##################################################################################\n",
"# TCU/CRU - what to do?\n",
"##################################################################################\n",
"TCU = '((transitional care unit)|(tcu))'\n",
"CRU = '((cru)|(comprehensive rehab unit)|(comprehensive rehabilitation unit))'\n",
"\n",
"CRU_OR_TCU = re.compile(\"(\" + TCU + \"|\" + CRU + \")\")\n",
"\n",
"################################################################\n",
"# Admitted/Discharged\n",
"################################################################\n",
"# Deal with header, e.g. \"date of admission:\", \"date of discharge\"\n",
"ADM_PATTERN_EMPTYDATE = '((((date of admission)|(admission date))(:?))|(admitted:))(\\s?)'\n",
"ADM_PATTERN = re.compile(ADM_PATTERN_EMPTYDATE + \"DATE\")\n",
"ADM_PATTERN_EMPTYDATE = re.compile(ADM_PATTERN_EMPTYDATE)\n",
"\n",
"DCH_PATTERN_EMPTYDATE = '((date of discharge)|(discharge date)|(discharged))(:?)(\\s?)'\n",
"DCH_PATTERN = re.compile(DCH_PATTERN_EMPTYDATE + \"DATE\")\n",
"DCH_PATTERN_EMPTYDATE = re.compile(DCH_PATTERN_EMPTYDATE)\n",
"\n",
"################################################################\n",
"# SMH/Discharge summary\n",
"################################################################\n",
"SMH_FLUFF = re.compile(\"(sarasota memorial hospital - sarasota, fl)|(sarasota memorial hospital)|(smh hospitalist program)\")\n",
"DCH_FLUFF = re.compile(\"(discharge summary)\")\n",
"END_FLUFF = re.compile(\"\\[signature\\]\")\n",
"TIME = re.compile(\"(\\d{1,2}:\\d{2}\\s[aApP][mM])|(\\d{1,2}:\\d{2}(:\\d{2})?)\")\n",
"\n",
"################################################################\n",
"# Admitting diagnosis\n",
"################################################################\n",
"DIAG = \"((diagnosis)|(diagnoses))\"\n",
"DIAG = re.compile(\"((((admit)|(admitting)|(final discharge)|(final)|(discharge)|(primary)|(admission)|(principal)|(principle))\\s\" + DIAG + \")|(\" + DIAG + \"))(:?)\")\n",
"PTVISIT = re.compile(\"((patient visit)|(patient))\\s((identification)|(id))\\s((number)|#)(:?)((\\s#)?)(\\s?)(\\d+)\")\n",
"#PROBABLY_GUID = re.compile(\"\\s\\d{7}\\s\")\n",
"GUID = re.compile(\n",
" \"((inpatient)((-|:)?)(\\s?)(\\d{7}\\s))|\" + \\\n",
" \"((mrn:\\s)(\\d{7}))|\" + \\\n",
" \"((account|confirmation|jobid)(:?)(\\s?)(\\d{6,12}))|\"+\\\n",
" \"\\s\\d{7}\\s\"\n",
" )\n",
"\n",
"################################################################\n",
"# Dictated/transcribed\n",
"################################################################\n",
"DICT_TRANS = re.compile(\"(d|t):(\\s?)DATE\")\n",
"DICT_TRANS_BY = re.compile(\"((dictated by)|(transcribed by))(\\s?)\")\n",
"\n",
"################################################################\n",
"# Patient name\n",
"################################################################\n",
"PT_NAME = re.compile(\"(patient name:)(\\s?)(\\w+),(\\s?)(\\w+)\")\n",
"\n",
"################################################################\n",
"# Case #/history #\n",
"################################################################\n",
"CASE_HISTORY = re.compile(\"((case)|(history))(\\s?)(#|number|num)\")\n",
"\n",
"##################################################################################\n",
"# Med instructions\n",
"##################################################################################\n",
"#NUMBER = \"(\\d+)(\\.?)(\\d*)|(one)|(two)|(three)|(four)|(five)|(six)|(seven)|(eight)|(nine)|(ten)\"\n",
"AMOUNT = re.compile(\"(NUMBER)\" + \"(\\s?)((micrograms)|(microgram)|(mgs)|(mg)|(milligrams)|(milligram)|(milliequivalents))\") \n",
"FREQUENCY = re.compile(\"((by mouth)|(q\\sd)|(on odd days)|(times a day)|(times per day)|(at dinner)|(at breakfast)|(at bedtime)|(q\\. day)|(q d\\.)|(q a\\.m\\.)|(q p\\.m\\.)|(q am)|(q pm)|(q day)|(q\\.day)|(q\\.hs)|(meq)|(q\\.d\\.)|(b\\.i\\.d\\.)|(t\\.i\\.d\\.)|(q\\.i\\.d)|(q\\dh)|(p\\.r\\.n\\.)|(p\\.o\\.)|(daily))\") # http://www.medicinenet.com/script/main/art.asp?articlekey=6954\n",
"INSTRUCT = re.compile(\"((as directed)|(\\spo\\s)|(\\sp\\.o\\.\\s))\") # http://www.medicinenet.com/script/main/art.asp?articlekey=6954\n",
"\n",
"##################################################################################\n",
"# Text lists (1. XXX, 2. YYY, 3. ZZZ...)\n",
"##################################################################################\n",
"TEXT_LIST = \"(1\\..+?[a-z])\\. ([a-z])\"\n",
"LIST_START = \" 1\\.\\s\"\n",
"LIST_DELIM = \"\\. [1-9]{1,2}\\. \"\n",
"\n",
"##################################################################################\n",
"# Process!\n",
"##################################################################################\n",
"def encode_dates(x):\n",
" x = re.sub(DATE, \"DATE\", x)\n",
" return(x)\n",
"\n",
"def remove_admission_and_discharge(x):\n",
" x = re.sub(ADM_PATTERN, \"\", x)\n",
" x = re.sub(ADM_PATTERN_EMPTYDATE, \"\", x)\n",
" x = re.sub(DCH_PATTERN, \"\", x)\n",
" x = re.sub(DCH_PATTERN_EMPTYDATE, \"\", x)\n",
" return(x)\n",
"\n",
"def remove_quantity(x):\n",
" x = re.sub(AMOUNT, \" QUANTITY\", x)\n",
" return(x)\n",
"\n",
"def remove_instruction(x):\n",
" x = re.sub(INSTRUCT, \" INSTRUCTION \", x)\n",
" return(x)\n",
"\n",
"def remove_frequency(x):\n",
" x = re.sub(FREQUENCY, \"FREQUENCY\", x)\n",
" return(x)\n",
"\n",
"NUMBER = re.compile(NUMBER)\n",
"def encode_numbers(x):\n",
" x = re.sub(NUMBER, \" NUMBER \", x)\n",
" return(x)\n",
"\n",
"def remove_fluff(x):\n",
" x = re.sub(DCH_FLUFF, \"\", re.sub(SMH_FLUFF, \"\", x))\n",
" x = re.sub(END_FLUFF, \"\", x)\n",
" x = re.sub(TIME, \"\", x)\n",
" x = re.sub(DIAG, \"DIAGNOSIS\", x)\n",
" x = re.sub(DICT_TRANS, \"\", x)\n",
" x = re.sub(DICT_TRANS_BY, \"\", x)\n",
" x = re.sub(CASE_HISTORY, \"\", x)\n",
" x = re.sub(PTVISIT, \"\", x)\n",
" x = re.sub(GUID, \"\", x)\n",
" x = re.sub(PT_NAME, \"\", x)\n",
" return(x)\n",
"\n",
"def remove_end_of_note(x):\n",
" x = re.sub(\"(DATE ____)(\\w+)\", \"\", x)\n",
" x = re.sub(\"(cc:)(.+)\", \"\", x)\n",
" return(x)\n",
"\n",
"def encode_lists(x):\n",
" x = re.sub(TEXT_LIST, r\"\\1 ENDLIST \\2\", x)\n",
" x = re.sub(LIST_START, \" STARTLIST \", x)\n",
" x = re.sub(LIST_DELIM, \" COMMA \", x)\n",
" return(x)\n",
"\n",
"def remove_midsentence_periods(x):\n",
" x = re.sub(\" dr\\. \", \" dr \", x)\n",
" x = re.sub(\" mr\\. \", \" mr \", x)\n",
" x = re.sub(\" ms\\. \", \" ms \", x)\n",
" x = re.sub(\" mrs\\. \", \" mrs \", x)\n",
" x = re.sub(\" m\\.d\\. \", \" md \", x)\n",
" x = re.sub(\" [a-z]\\. \", \" \", x) # removing name abbrev\n",
" return(x)\n",
"\n",
"def encode_end_of_sentences(x):\n",
" # Make sure to cull non sentence ending periods first\n",
" x = re.sub(\"(\\.\\s)|(ENDLIST\\s)\", \" STOP \", x)\n",
" return(x)\n",
"\n",
"STOPWORDS = \"(\\s((a)|(an)|(the)|(at))\\s)|(\\s\\s)\"\n",
"\n",
"import time\n",
"def process_text(x):\n",
" x = str(x)\n",
" x = encode_dates(x)\n",
" x = remove_admission_and_discharge(x)\n",
" x = remove_fluff(x)\n",
" x = remove_frequency(x)\n",
" x = remove_instruction(x)\n",
" x = encode_lists(x)\n",
" x = encode_numbers(x) \n",
" x = remove_quantity(x)\n",
" x = re.sub(CRU_OR_TCU, \"\", x)\n",
" x = remove_midsentence_periods(x)\n",
" x = remove_end_of_note(x)\n",
" x = re.sub(\"(QUANTITY)|(FREQUENCY)|(INSTRUCTION)|(NUMBER)|(DATE)|(COMMA)\", \"\", x)\n",
" x = encode_end_of_sentences(x)\n",
" x = re.sub(STOPWORDS, \" \", x)\n",
" return(x)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"for section in SEC_NAMES:\n",
" text[section] = text[section].apply(process_text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"textSections.to_csv(WRITE_DATA_TO + \".csv\", index=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Train/test split"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"train, test = train_test_split(textSections, stratify = textSections[OUTCOME])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"train.to_csv(WRITE_DATA_TO + \"_train.csv\", index=False)\n",
"test.to_csv(WRITE_DATA_TO + \"_train.csv\", index=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}