Datasets:
				
			
			
	
			
	
		
			
	
		Size:
	
	
	
	
	1M<n<10M
	
	
	ArXiv:
	
	
	
	
	
	
	
	
Tags:
	
	
	
	
	programming-language
	
	
	
	
	code
	
	
	
	
	program-synthesis
	
	
	
	
	automatic-code-repair
	
	
	
	
	code-retrieval
	
	
	
	
	code-translation
	
	
	License:
	
	
	
	
	
	
	
update loader
Browse files- xCodeEval.py +4 -3
    	
        xCodeEval.py
    CHANGED
    
    | @@ -5,6 +5,7 @@ import json | |
| 5 | 
             
            import zipfile
         | 
| 6 | 
             
            from collections import defaultdict
         | 
| 7 | 
             
            import datasets
         | 
|  | |
| 8 | 
             
            import textwrap
         | 
| 9 | 
             
            from multiprocessing import Pool, cpu_count
         | 
| 10 | 
             
            logger = datasets.logging.get_logger(__name__)
         | 
| @@ -2002,9 +2003,9 @@ class xCodeEval(datasets.GeneratorBasedBuilder): | |
| 2002 | 
             
            		TRAIN_FILE_NAMES = get_file_name(task_name, "train")
         | 
| 2003 | 
             
            		VALIDATION_FILE_NAMES = get_file_name(task_name, "validation")
         | 
| 2004 | 
             
            		TEST_FILE_NAMES = get_file_name(task_name, "test")
         | 
| 2005 | 
            -
            		train_urls = [ BASE_URL.format(task_name=task_name, split="train", file_name=file_name) for file_name in  TRAIN_FILE_NAMES]
         | 
| 2006 | 
            -
            		validation_urls = [ BASE_URL.format(task_name=task_name, split="validation", file_name=file_name) for file_name in  VALIDATION_FILE_NAMES]
         | 
| 2007 | 
            -
            		test_urls = [ BASE_URL.format(task_name=task_name, split="test", file_name=file_name) for file_name in  TEST_FILE_NAMES]
         | 
| 2008 |  | 
| 2009 | 
             
            		train_downloaded_files = dl_manager.download(train_urls)
         | 
| 2010 | 
             
            		validation_downloaded_files = dl_manager.download(validation_urls)
         | 
|  | |
| 5 | 
             
            import zipfile
         | 
| 6 | 
             
            from collections import defaultdict
         | 
| 7 | 
             
            import datasets
         | 
| 8 | 
            +
            import urllib.parse
         | 
| 9 | 
             
            import textwrap
         | 
| 10 | 
             
            from multiprocessing import Pool, cpu_count
         | 
| 11 | 
             
            logger = datasets.logging.get_logger(__name__)
         | 
|  | |
| 2003 | 
             
            		TRAIN_FILE_NAMES = get_file_name(task_name, "train")
         | 
| 2004 | 
             
            		VALIDATION_FILE_NAMES = get_file_name(task_name, "validation")
         | 
| 2005 | 
             
            		TEST_FILE_NAMES = get_file_name(task_name, "test")
         | 
| 2006 | 
            +
            		train_urls = [ BASE_URL.format(task_name=task_name, split="train", file_name=urllib.parse.quote(file_name)) for file_name in  TRAIN_FILE_NAMES]
         | 
| 2007 | 
            +
            		validation_urls = [ BASE_URL.format(task_name=task_name, split="validation", file_name=urllib.parse.quote(file_name)) for file_name in  VALIDATION_FILE_NAMES]
         | 
| 2008 | 
            +
            		test_urls = [ BASE_URL.format(task_name=task_name, split="test", file_name=urllib.parse.quote(file_name)) for file_name in  TEST_FILE_NAMES]
         | 
| 2009 |  | 
| 2010 | 
             
            		train_downloaded_files = dl_manager.download(train_urls)
         | 
| 2011 | 
             
            		validation_downloaded_files = dl_manager.download(validation_urls)
         | 
