From 783a1ab8b74d427f3791930b68491c3b0b1e2179 Mon Sep 17 00:00:00 2001
From: David Fuhry <david@129a-records.de>
Date: Thu, 22 Nov 2018 18:06:30 +0100
Subject: [PATCH] Minor fixes

---
 README.md          | 2 +-
 r/ExtractFromXML.R | 4 +++-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index 07ce035..df8de47 100644
--- a/README.md
+++ b/README.md
@@ -40,7 +40,7 @@ Use that file to generate xml dump at wikipedias [Export page](https://en.wikipe
 
 # ExtractFromXML.Rasa
 
-Will read in the xml file from the data directory and extract the title and text of the pages in the dump. Will then write them to *texte.csv* in the data directory. For convenience will also create a texte.RDS file, load with `texte <- read.RDS("../data/texte.RDS")`.
+Will read in the xml file from the data directory and extract the title and text of the pages in the dump. Will then write them to *texte.csv* in the data directory, use `read.table` to import.  For convenience will also create a texte.RDS file, load with `texte <- readRDS("../data/texte.RDS")`.
 **NOTE:** For the script to work, the first line of the xml needs to be replaced with `<mediawiki xml:lang="en">`.
 
 
diff --git a/r/ExtractFromXML.R b/r/ExtractFromXML.R
index ac13f0b..2af3bc4 100644
--- a/r/ExtractFromXML.R
+++ b/r/ExtractFromXML.R
@@ -13,4 +13,6 @@ texts <- sapply(text.nodes, xml_text)
 df.out <- data.frame(Title = titles,
                      Text = texts)
 
-write.csv2(df.out, "../data/texte.csv")
+saveRDS(df.out, "../data/texte.RDS")
+
+write.table(df.out, "../data/texte.csv")
\ No newline at end of file
-- 
GitLab