Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
sentiment-classification
Languages:
English
Size:
10K - 100K
License:
Commit
·
5eb29d1
1
Parent(s):
461a9d1
Add test split (#2)
Browse files- Add test split (451e3d04b584c0efe9ee8d6f22342e82a1be7085)
- Update metadata in dataset card (f657281952e26e4fe48b673a5cbacade5118e5bb)
- README.md +6 -3
- tweets_hate_speech_detection.py +13 -14
README.md
CHANGED
|
@@ -71,10 +71,13 @@ dataset_info:
|
|
| 71 |
dtype: string
|
| 72 |
splits:
|
| 73 |
- name: train
|
| 74 |
-
num_bytes:
|
| 75 |
num_examples: 31962
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
| 78 |
---
|
| 79 |
|
| 80 |
# Dataset Card for Tweets Hate Speech Detection
|
|
|
|
| 71 |
dtype: string
|
| 72 |
splits:
|
| 73 |
- name: train
|
| 74 |
+
num_bytes: 3191888
|
| 75 |
num_examples: 31962
|
| 76 |
+
- name: test
|
| 77 |
+
num_bytes: 1711606
|
| 78 |
+
num_examples: 17197
|
| 79 |
+
download_size: 4738708
|
| 80 |
+
dataset_size: 4903494
|
| 81 |
---
|
| 82 |
|
| 83 |
# Dataset Card for Tweets Hate Speech Detection
|
tweets_hate_speech_detection.py
CHANGED
|
@@ -29,6 +29,8 @@ The objective of this task is to detect hate speech in tweets. For the sake of s
|
|
| 29 |
Formally, given a training sample of tweets and labels, where label ‘1’ denotes the tweet is racist/sexist and label ‘0’ denotes the tweet is not racist/sexist, your objective is to predict the labels on the given test dataset.
|
| 30 |
"""
|
| 31 |
|
|
|
|
|
|
|
| 32 |
_CITATION = """\
|
| 33 |
@InProceedings{Z
|
| 34 |
Roshan Sharma:dataset,
|
|
@@ -38,9 +40,10 @@ year={2018}
|
|
| 38 |
}
|
| 39 |
"""
|
| 40 |
|
| 41 |
-
|
| 42 |
-
"https://raw.githubusercontent.com/sharmaroshan/Twitter-Sentiment-Analysis/master/train_tweet.csv"
|
| 43 |
-
|
|
|
|
| 44 |
|
| 45 |
|
| 46 |
class TweetsHateSpeechDetection(datasets.GeneratorBasedBuilder):
|
|
@@ -55,30 +58,26 @@ class TweetsHateSpeechDetection(datasets.GeneratorBasedBuilder):
|
|
| 55 |
"tweet": datasets.Value("string"),
|
| 56 |
}
|
| 57 |
),
|
| 58 |
-
homepage=
|
| 59 |
citation=_CITATION,
|
| 60 |
task_templates=[TextClassification(text_column="tweet", label_column="label")],
|
| 61 |
)
|
| 62 |
|
| 63 |
def _split_generators(self, dl_manager):
|
| 64 |
-
|
| 65 |
-
|
| 66 |
return [
|
| 67 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath":
|
|
|
|
| 68 |
]
|
| 69 |
|
| 70 |
def _generate_examples(self, filepath):
|
| 71 |
"""Generate Tweet examples."""
|
| 72 |
with open(filepath, encoding="utf-8") as csv_file:
|
| 73 |
-
csv_reader = csv.
|
| 74 |
csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
|
| 75 |
)
|
| 76 |
-
next(csv_reader, None)
|
| 77 |
for id_, row in enumerate(csv_reader):
|
| 78 |
-
row = row[1:]
|
| 79 |
-
(label, tweet) = row
|
| 80 |
-
|
| 81 |
yield id_, {
|
| 82 |
-
"label": int(label),
|
| 83 |
-
"tweet":
|
| 84 |
}
|
|
|
|
| 29 |
Formally, given a training sample of tweets and labels, where label ‘1’ denotes the tweet is racist/sexist and label ‘0’ denotes the tweet is not racist/sexist, your objective is to predict the labels on the given test dataset.
|
| 30 |
"""
|
| 31 |
|
| 32 |
+
_HOMEPAGE = "https://github.com/sharmaroshan/Twitter-Sentiment-Analysis"
|
| 33 |
+
|
| 34 |
_CITATION = """\
|
| 35 |
@InProceedings{Z
|
| 36 |
Roshan Sharma:dataset,
|
|
|
|
| 40 |
}
|
| 41 |
"""
|
| 42 |
|
| 43 |
+
_URL = {
|
| 44 |
+
"train": "https://raw.githubusercontent.com/sharmaroshan/Twitter-Sentiment-Analysis/master/train_tweet.csv",
|
| 45 |
+
"test": "https://raw.githubusercontent.com/sharmaroshan/Twitter-Sentiment-Analysis/master/test_tweets.csv",
|
| 46 |
+
}
|
| 47 |
|
| 48 |
|
| 49 |
class TweetsHateSpeechDetection(datasets.GeneratorBasedBuilder):
|
|
|
|
| 58 |
"tweet": datasets.Value("string"),
|
| 59 |
}
|
| 60 |
),
|
| 61 |
+
homepage=_HOMEPAGE,
|
| 62 |
citation=_CITATION,
|
| 63 |
task_templates=[TextClassification(text_column="tweet", label_column="label")],
|
| 64 |
)
|
| 65 |
|
| 66 |
def _split_generators(self, dl_manager):
|
| 67 |
+
path = dl_manager.download(_URL)
|
|
|
|
| 68 |
return [
|
| 69 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": path["train"]}),
|
| 70 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": path["test"]}),
|
| 71 |
]
|
| 72 |
|
| 73 |
def _generate_examples(self, filepath):
|
| 74 |
"""Generate Tweet examples."""
|
| 75 |
with open(filepath, encoding="utf-8") as csv_file:
|
| 76 |
+
csv_reader = csv.DictReader(
|
| 77 |
csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
|
| 78 |
)
|
|
|
|
| 79 |
for id_, row in enumerate(csv_reader):
|
|
|
|
|
|
|
|
|
|
| 80 |
yield id_, {
|
| 81 |
+
"label": int(row.setdefault("label", -1)),
|
| 82 |
+
"tweet": row["tweet"],
|
| 83 |
}
|