{ "paper_id": "2022", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T01:11:49.935813Z" }, "title": "GPT-NeoX-20B: An Open-Source Autoregressive Language Model", "authors": [ { "first": "Sid", "middle": [], "last": "Black", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Stella", "middle": [], "last": "Biderman", "suffix": "", "affiliation": {}, "email": "stella@eleuther.ai" }, { "first": "Eric", "middle": [], "last": "Hallahan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Quentin", "middle": [], "last": "Anthony", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Leo", "middle": [], "last": "Gao", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Laurence", "middle": [], "last": "Golding", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Connor", "middle": [], "last": "Leahy", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kyle", "middle": [], "last": "Mcdonell", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jason", "middle": [], "last": "Phang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Michael", "middle": [], "last": "Pieler", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mikel", "middle": [], "last": "Artetxe", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shruti", "middle": [], "last": "Bhosale", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Naman", "middle": [], "last": "Goyal", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Todor", "middle": [], "last": "Mihaylov", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Myle", "middle": [], "last": "Ott", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sam", "middle": [], "last": "Shleifer", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Victoria", "middle": [], "last": "Xi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jingfei", "middle": [], "last": "Lin", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Srinivasan", "middle": [], "last": "Du", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ramakanth", "middle": [], "last": "Iyer", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Giri", "middle": [], "last": "Pasunuru", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Xian", "middle": [], "last": "Anantharaman", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shuohui", "middle": [], "last": "Li", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Halil", "middle": [], "last": "Chen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mandeep", "middle": [], "last": "Akin", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Louis", "middle": [], "last": "Baines", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Xing", "middle": [], "last": "Martin", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Singh", "middle": [], "last": "Zhou", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Brian", "middle": [], "last": "Koura", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jeff", "middle": [], "last": "O'horo", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Luke", "middle": [], "last": "Wang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mona", "middle": [], "last": "Zettlemoyer", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Zornitsa", "middle": [], "last": "Diab", "suffix": "", "affiliation": {}, "email": "" }, { "first": "", "middle": [], "last": "Kozareva", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Amanda", "middle": [], "last": "Askell", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yuntao", "middle": [], "last": "Bai", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Anna", "middle": [], "last": "Chen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Dawn", "middle": [], "last": "Drain", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Deep", "middle": [], "last": "Ganguli", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tom", "middle": [], "last": "Henighan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Andy", "middle": [], "last": "Jones", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nicholas", "middle": [], "last": "Joseph", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Benjamin", "middle": [], "last": "Mann", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nova", "middle": [], "last": "Dassarma", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nelson", "middle": [], "last": "El- Hage", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Zac", "middle": [], "last": "Hatfield-Dodds", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Danny", "middle": [], "last": "Hernandez", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jack- Son", "middle": [], "last": "Kernion", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kamal", "middle": [], "last": "Ndousse", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Catherine", "middle": [], "last": "Olsson", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Dario", "middle": [ "2020" ], "last": "Amodei", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tom", "middle": [ "B" ], "last": "Brown", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jack", "middle": [], "last": "Clark", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sam", "middle": [], "last": "Mccan- Dlish", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Chris", "middle": [], "last": "Olah", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jared", "middle": [ "D" ], "last": "Kaplan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "", "middle": [ "A" ], "last": "Gen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Emily", "middle": [ "M" ], "last": "Bender", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Timnit", "middle": [], "last": "Gebru", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Angelina", "middle": [], "last": "Mcmillan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nick", "middle": [], "last": "Ryder", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Melanie", "middle": [], "last": "Subbiah", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Prafulla", "middle": [], "last": "Dhariwal", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Arvind", "middle": [], "last": "Neelakantan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Pranav", "middle": [], "last": "Shyam", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Girish", "middle": [], "last": "Sastry", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sandhini", "middle": [], "last": "Agarwal", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ariel", "middle": [], "last": "Herbert-Voss", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Gretchen", "middle": [], "last": "Krueger", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Rewon", "middle": [], "last": "Child", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aditya", "middle": [], "last": "Ramesh", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Daniel", "middle": [ "M" ], "last": "Ziegler", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jeffrey", "middle": [], "last": "Wu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Clemens", "middle": [], "last": "Winter", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Christopher", "middle": [], "last": "Hesse", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mark", "middle": [], "last": "Chen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Eric", "middle": [], "last": "Sigler", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ma- Teusz", "middle": [], "last": "Litwin", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Scott", "middle": [], "last": "Gray", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Benjamin", "middle": [], "last": "Chess", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Christopher", "middle": [], "last": "Berner", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sam", "middle": [ "2020" ], "last": "Mccandlish", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Alec", "middle": [], "last": "Radford", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ilya", "middle": [], "last": "Sutskever", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nelson", "middle": [], "last": "Elhage", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Neel", "middle": [], "last": "Nanda", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tom", "middle": [], "last": "Conerly", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jackson", "middle": [], "last": "Kernion", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Liane", "middle": [], "last": "Lovitt", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jared", "middle": [], "last": "Ka- Plan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aaron", "middle": [], "last": "Harlap", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Deepak", "middle": [], "last": "Narayanan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Amar", "middle": [], "last": "Phanishayee", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Vivek", "middle": [], "last": "Seshadri", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nikhil", "middle": [], "last": "Devanur", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Greg", "middle": [], "last": "Ganger", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Dan", "middle": [], "last": "Hendrycks", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Collin", "middle": [], "last": "Burns", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Steven", "middle": [], "last": "Basart", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Andy", "middle": [], "last": "Zou", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mantas", "middle": [], "last": "Mazeika", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Saurav", "middle": [], "last": "Kadavath", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Akul", "middle": [], "last": "Arora", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Eric", "middle": [], "last": "Tang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mor", "middle": [], "last": "Katz", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jacob", "middle": [], "last": "Jackson", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Heewoo", "middle": [], "last": "Jun", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Chris", "middle": [], "last": "Hallacy", "suffix": "", "affiliation": {}, "email": "" }, { "first": "John", "middle": [], "last": "Schul- Man", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jordan", "middle": [], "last": "Hoffmann", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sebastian", "middle": [], "last": "Borgeaud", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Arthur", "middle": [], "last": "Mensch", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Elena", "middle": [], "last": "Buchatskaya", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Trevor", "middle": [], "last": "Cai", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Eliza", "middle": [], "last": "Rutherford", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Diego", "middle": [], "last": "De", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Las", "middle": [], "last": "Casas", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Lisa", "middle": [ "Anne" ], "last": "Hendricks", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Johannes", "middle": [], "last": "Welbl", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aidan", "middle": [], "last": "Clark", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Adam", "middle": [], "last": "Roberts", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Won", "middle": [], "last": "Chung", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Anselm", "middle": [], "last": "Levskaya", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Gaurav", "middle": [], "last": "Mishra", "suffix": "", "affiliation": {}, "email": "" }, { "first": "James", "middle": [], "last": "Bradbury", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Daniel", "middle": [], "last": "Andor", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sha- Ran", "middle": [], "last": "Narang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Brian", "middle": [], "last": "Lester", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Colin", "middle": [], "last": "Gaffney", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Afroz", "middle": [], "last": "Mohiuddin", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Curtis", "middle": [], "last": "Hawthorne", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aitor", "middle": [], "last": "Lewkowycz", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Alex", "middle": [], "last": "Salcianu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Marc", "middle": [], "last": "Van Zee", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jacob", "middle": [], "last": "Austin", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sebas- Tian", "middle": [], "last": "Goodman", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Livio", "middle": [ "Baldini" ], "last": "Soares", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Haitang", "middle": [], "last": "Hu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sasha", "middle": [], "last": "Tsvyashchenko", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aakanksha", "middle": [], "last": "Chowdhery", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jas- Mijn", "middle": [], "last": "Bastings", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jannis", "middle": [], "last": "Bulian", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Xavier", "middle": [], "last": "Garcia", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jianmo", "middle": [], "last": "Ni", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Andrew", "middle": [], "last": "Chen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kathleen", "middle": [], "last": "Kenealy", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jonathan", "middle": [ "H" ], "last": "Clark", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Stephan", "middle": [], "last": "Lee", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Dan", "middle": [], "last": "Garrette", "suffix": "", "affiliation": {}, "email": "" }, { "first": "James", "middle": [], "last": "Lee-Thorp", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Colin", "middle": [], "last": "Raffel", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Noam", "middle": [], "last": "Shazeer", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Marvin", "middle": [], "last": "Ritter", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Maarten", "middle": [], "last": "Bosma", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Alexandre", "middle": [], "last": "Passos", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jeremy", "middle": [], "last": "Maitin-Shepard", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Noah", "middle": [], "last": "Fiedel", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mark", "middle": [], "last": "Omernick", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Brennan", "middle": [], "last": "Saeta", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ryan", "middle": [], "last": "Sepassi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Alexander", "middle": [], "last": "Spiridonov", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Joshua", "middle": [], "last": "Newlan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Andrea", "middle": [], "last": "Gesmundo", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Victor", "middle": [], "last": "Sanh", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Albert", "middle": [], "last": "Webson", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Stephen", "middle": [ "H" ], "last": "Bach", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Lintang", "middle": [], "last": "Sutawika", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Zaid", "middle": [], "last": "Alyafeai", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Antoine", "middle": [], "last": "Chaffin", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Arnaud", "middle": [], "last": "Stiegler", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Teven", "middle": [], "last": "Le Scao", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Arun", "middle": [], "last": "Raja", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Manan", "middle": [], "last": "Dey", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Saiful", "middle": [], "last": "Bari", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Canwen", "middle": [], "last": "Xu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Urmish", "middle": [], "last": "Thakker", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sharma", "middle": [], "last": "Sharma", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Eliza", "middle": [], "last": "Szczechla", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Taewoon", "middle": [], "last": "Kim", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Gunjan", "middle": [], "last": "Chhablani", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nihal", "middle": [], "last": "Nayak", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Debajyoti", "middle": [], "last": "Datta", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jonathan", "middle": [], "last": "Chang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tian-Jian", "middle": [], "last": "Jiang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Han", "middle": [], "last": "Wang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Matteo", "middle": [], "last": "Manica", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sheng", "middle": [], "last": "Shen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Zheng", "middle": [ "Xin" ], "last": "Yong", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Harshit", "middle": [], "last": "Pandey", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Rachel", "middle": [], "last": "Bawden", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Thomas", "middle": [], "last": "Wang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Trishala", "middle": [], "last": "Neeraj", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jos", "middle": [], "last": "Rozen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Abheesht", "middle": [], "last": "Sharma", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Andrea", "middle": [], "last": "Santilli", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Thibault", "middle": [], "last": "F\u00e9vry", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jason", "middle": [ "Alan" ], "last": "Fries", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ryan", "middle": [], "last": "Teehan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tali", "middle": [], "last": "Bers", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mohammad", "middle": [], "last": "Shoeybi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mostofa", "middle": [], "last": "Patwary", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Raul", "middle": [], "last": "Puri", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shaden", "middle": [], "last": "Smith", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Brandon", "middle": [], "last": "Norick", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Patrick", "middle": [], "last": "Legresley", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Samyam", "middle": [], "last": "Rajbhandari", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jared", "middle": [], "last": "Casper", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Zhun", "middle": [], "last": "Liu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shrimai", "middle": [], "last": "Prabhumoye", "suffix": "", "affiliation": {}, "email": "" }, { "first": "George", "middle": [], "last": "Zerveas", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Vijay", "middle": [], "last": "Korthikanti", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Elton", "middle": [], "last": "Zhang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Reza", "middle": [ "Yazdani" ], "last": "Aminabadi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Julie", "middle": [], "last": "Bernauer", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Xia", "middle": [], "last": "Song", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yuxiong", "middle": [], "last": "He", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Michael", "middle": [], "last": "Houston", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Saurabh", "middle": [], "last": "Tiwary", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Bryan", "middle": [ "2022" ], "last": "Catanzaro", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nisan", "middle": [], "last": "Stiennon", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Long", "middle": [], "last": "Ouyang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ryan", "middle": [], "last": "Lowe", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Chelsea", "middle": [], "last": "Voss", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yu", "middle": [], "last": "Sun", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shuohuan", "middle": [], "last": "Wang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shikun", "middle": [], "last": "Feng", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Siyu", "middle": [], "last": "Ding", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Chao", "middle": [], "last": "Pang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Junyuan", "middle": [], "last": "Shang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jiaxiang", "middle": [], "last": "Liu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Xuyi", "middle": [], "last": "Chen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yanbin", "middle": [], "last": "Zhao", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yuxiang", "middle": [], "last": "Lu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Weixin", "middle": [], "last": "Liu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Zhihua", "middle": [], "last": "Wu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Weibao", "middle": [], "last": "Gong", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jianzhong", "middle": [], "last": "Liang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Zhizhou", "middle": [], "last": "Shang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Peng", "middle": [], "last": "Sun", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Xuan", "middle": [], "last": "Ouyang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Dianhai", "middle": [], "last": "Yu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Hao", "middle": [], "last": "Tian", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Hua", "middle": [], "last": "Wu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Haifeng", "middle": [ "2021" ], "last": "Wang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "", "middle": [], "last": "Ernie", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Zeerak", "middle": [], "last": "Talat", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aur\u00e9lie", "middle": [], "last": "N\u00e9v\u00e9ol", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Miruna", "middle": [], "last": "Clinciu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shayne", "middle": [], "last": "Longpre", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sasha", "middle": [], "last": "Luccioni", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Maraim", "middle": [], "last": "Masoud", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Margaret", "middle": [], "last": "Mitchell", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Dragomir", "middle": [], "last": "Radev", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shanya", "middle": [], "last": "Sharma", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Arjun", "middle": [], "last": "Subramonian", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jaesung", "middle": [], "last": "Tae", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Samson", "middle": [], "last": "Tan", "suffix": "", "affiliation": {}, "email": "" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "We introduce GPT-NeoX-20B, a 20 billion parameter autoregressive language model trained on the Pile, whose weights will be made freely and openly available to the public through a permissive license. It is, to the best of our knowledge, the largest dense autoregressive model that has publicly available weights at the time of submission. In this work, we describe GPT-NeoX-20B's architecture and training, and evaluate its performance on a range of language-understanding, mathematics and knowledge-based tasks. We open-source the training and evaluation code, as well as the model weights, at https://github.com/ EleutherAI/gpt-neox.", "pdf_parse": { "paper_id": "2022", "_pdf_hash": "", "abstract": [ { "text": "We introduce GPT-NeoX-20B, a 20 billion parameter autoregressive language model trained on the Pile, whose weights will be made freely and openly available to the public through a permissive license. It is, to the best of our knowledge, the largest dense autoregressive model that has publicly available weights at the time of submission. In this work, we describe GPT-NeoX-20B's architecture and training, and evaluate its performance on a range of language-understanding, mathematics and knowledge-based tasks. We open-source the training and evaluation code, as well as the model weights, at https://github.com/ EleutherAI/gpt-neox.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "Over the past several years, there has been an explosion in research surrounding large language models (LLMs) for natural language processing, catalyzed largely by the impressive performance of Transformer-based language models such as BERT (Devlin et al., 2019) , GPT-2 (Radford et al., 2019) , GPT-3 (Brown et al., 2020) , and T5 (Raffel et al., 2020) . One of the most impactful outcomes of this research has been the discovery that the performance of LLMs scales predictably as a power-law with the number of parameters, with architecture details such as width/depth ratio having a minimal impact on performance within a wide range (Kaplan et al., 2020) . A consequence of this has been an abundance of research focusing on scaling Transformer models up to ever-larger scales, resulting in dense models that surpass 500B parameters (Smith et al., 2022; Chowdhery et al., 2022) , a milestone that would have been almost unthinkable just a few years prior.", "cite_spans": [ { "start": 236, "end": 262, "text": "BERT (Devlin et al., 2019)", "ref_id": null }, { "start": 265, "end": 293, "text": "GPT-2 (Radford et al., 2019)", "ref_id": null }, { "start": 296, "end": 322, "text": "GPT-3 (Brown et al., 2020)", "ref_id": null }, { "start": 332, "end": 353, "text": "(Raffel et al., 2020)", "ref_id": "BIBREF14" }, { "start": 636, "end": 657, "text": "(Kaplan et al., 2020)", "ref_id": null }, { "start": 836, "end": 856, "text": "(Smith et al., 2022;", "ref_id": null }, { "start": 857, "end": 880, "text": "Chowdhery et al., 2022)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Today, there are dozens of publicly acknowledged LLMs in existence. The largest have more than two orders of magnitude more parameters than GPT-2, and even at that scale there are nearly a dozen different models. However, these models are almost universally the protected intellectual property of large tech companies, and are gated behind a commercial API, available only upon request, or not available for outsider use at all. To our knowledge, the only freely and publicly available dense autoregressive language models larger than GPT-2 are GPT-Neo (2.7B parameters) (Black et al., 2021), GPT-J-6B (Wang and Komatsuzaki, 2021) , Megatron-11B 1 , Pangu-\u03b1-13B (Zeng et al., 2021) , and the recently released FairSeq models (2.7B, 6.7B, and 13B parameters) (Artetxe et al., 2021) .", "cite_spans": [ { "start": 602, "end": 630, "text": "(Wang and Komatsuzaki, 2021)", "ref_id": "BIBREF21" }, { "start": 662, "end": 681, "text": "(Zeng et al., 2021)", "ref_id": null }, { "start": 758, "end": 780, "text": "(Artetxe et al., 2021)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In this paper we introduce GPT-NeoX-20B, a 20 billion parameter open source autoregressive language model. We make the models weights freely and openly available to the public through a permissive license, motivated by the belief that open access to LLMs is critical to advancing research in a wide range of areas-particularly in AI safety, mechanistic interpretability, and the study of how LLM capabilities scale. Many of the most interesting capabilities of LLMs only emerge above a certain number of parameters, and they have many properties that simply cannot be studied in smaller models. Although safety is often cited as a justification for keeping model weights private, we believe this is insufficient to prevent misuse, and is largely a limitation on the ability to probe and study LLMs for researchers not based at the small number of organizations that have access to state of the art language models.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In the following sections, we give a broad overview of GPT-NeoX-20B's architecture and training hyperparameters, detail the hardware and software setup used for training and evaluation, and elaborate on the choices made when designing the training dataset and tokenization. We also address of some of the difficulties and unknowns we encountered in training such a large model. We place significant importance on the broader impacts of the release GPT-NeoX-20B and other such LLMs, and have prepared a separate manuscript for dissecting these issues in greater detail.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In addition, we also make available the model weights at evenly spaced 1000 step intervals throughout the whole of training. We hope that by making a wide range of checkpoints throughout training freely available, we will facilitate research on the training dynamics of LLMs, as well as the aforementioned areas of AI safety and interpretability.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "GPT-NeoX-20B is an autoregressive transformer decoder model whose architecture largely follows that of GPT-3 (Brown et al., 2020) , with a few notable deviations described below. Our model has 20 billion parameters, of which 19.9 billion are \"non-embedding\" parameters that Kaplan et al. (2020) identify as the proper number to use for scaling laws analysis. Our model has 44 layers, a hidden dimension size of 6144, and 64 heads.", "cite_spans": [ { "start": 103, "end": 129, "text": "GPT-3 (Brown et al., 2020)", "ref_id": null }, { "start": 274, "end": 294, "text": "Kaplan et al. (2020)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Model Design and Implementation", "sec_num": "2" }, { "text": "Although our architecture is largely similar to GPT-3, there are some notable differences. In this section we give a high-level overview of those differences, but ask the reader to refer to (Brown et al., 2020) for full details of the model architecture. Our model architecture is almost identical to that of GPT-J (Wang and Komatsuzaki, 2021) 2 , however we choose to use GPT-3 as the point of reference because there is no canonical published reference on the design of GPT-J.", "cite_spans": [ { "start": 190, "end": 210, "text": "(Brown et al., 2020)", "ref_id": null }, { "start": 315, "end": 343, "text": "(Wang and Komatsuzaki, 2021)", "ref_id": "BIBREF21" }, { "start": 344, "end": 345, "text": "2", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Model Architecture", "sec_num": "2.1" }, { "text": "We use rotary embeddings (Su et al., 2021) instead of the learned positional embeddings used in GPT models (Radford et al., 2018) , based on our positive prior experiences using it in training LLMs. Rotary embeddings are a form of static relative positional embeddings. In brief, they twist the embedding space such that the attention of a token at position m to token at position n is linearly dependent on m \u2212 n. More formally, they modify the standard multiheaded attention equations from", "cite_spans": [ { "start": 25, "end": 42, "text": "(Su et al., 2021)", "ref_id": null }, { "start": 107, "end": 129, "text": "(Radford et al., 2018)", "ref_id": "BIBREF10" } ], "ref_spans": [], "eq_spans": [], "section": "Rotary Positional Embeddings", "sec_num": "2.1.1" }, { "text": "softmax 1 \u221a d \u2211 n,m x T m W T q W k x n ,", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Rotary Positional Embeddings", "sec_num": "2.1.1" }, { "text": "where x m , x n are (batched) embeddings of tokens at position m and n respectively and W T q , W k are the query and key weights respectively to", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Rotary Positional Embeddings", "sec_num": "2.1.1" }, { "text": "softmax 1 \u221a d \u2211 n,m x T m W T q R d \u0398,(n\u2212m) W k x n ,", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Rotary Positional Embeddings", "sec_num": "2.1.1" }, { "text": "where", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Rotary Positional Embeddings", "sec_num": "2.1.1" }, { "text": "R d \u0398,x is a d \u00d7 d block diagonal matrix with the block of index i being a 2D rotation by x\u03b8 i for hyperparameters \u0398 = {\u03b8 i = 10000 \u22122i/d | i \u2208 {0, 1, 2, . . . , (d \u2212 1)/2}}.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Rotary Positional Embeddings", "sec_num": "2.1.1" }, { "text": "While Su et al. (2021) apply rotary embeddings to every embedding vector, we follow Wang and Komatsuzaki (2021) and instead apply it only to the first 25% of embedding vector dimensions. Our initial experiments indicate that this strikes the best balance of performance and computational efficiency. 3", "cite_spans": [ { "start": 84, "end": 111, "text": "Wang and Komatsuzaki (2021)", "ref_id": "BIBREF21" } ], "ref_spans": [], "eq_spans": [], "section": "Rotary Positional Embeddings", "sec_num": "2.1.1" }, { "text": "We compute the Attention and Feed-Forward (FF) layers in parallel 4 and sum the results, rather than running them in series. This is primarily for efficiency purposes, as each residual addition with op-sharding requires one all-reduce in the forward pass and one in the backwards pass (Shoeybi et al., 2020) . By computing the Attention and FFs in parallel, the results can be reduced locally before performing a single all-reduce. In Mesh Transformer JAX (Wang, 2021) , this led to a 15% throughput increase, while having comparable loss curves with running them in series during early training.", "cite_spans": [ { "start": 285, "end": 307, "text": "(Shoeybi et al., 2020)", "ref_id": null }, { "start": 456, "end": 468, "text": "(Wang, 2021)", "ref_id": "BIBREF20" } ], "ref_spans": [], "eq_spans": [], "section": "Parallel Attention + FF Layers", "sec_num": "2.1.2" }, { "text": "Due to an oversight in our code, we unintentionally apply two independent Layer Norms instead of using a tied layer norm the way Wang and Komatsuzaki (2021) does. Instead of computing", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Parallel Attention + FF Layers", "sec_num": "2.1.2" }, { "text": "x + Attn(LN 1 (x)) + FF(LN 1 (x))", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Parallel Attention + FF Layers", "sec_num": "2.1.2" }, { "text": "as intended, our codebase unties the layer norms:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Parallel Attention + FF Layers", "sec_num": "2.1.2" }, { "text": "x + Attn(LN 1 (x)) + FF(LN 2 (x)).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Parallel Attention + FF Layers", "sec_num": "2.1.2" }, { "text": "Unfortunately, this was only noticed after we were much too far into training to restart. Subsequent experiments at small scales indicated that the untied layer norm makes no difference in performance, but we nevertheless wish to highlight this in the interest of transparency.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Parallel Attention + FF Layers", "sec_num": "2.1.2" }, { "text": "For the Feed-Forward output layers before the residuals, we used the initialization scheme introduced in Wang (2021), 2", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Initialization", "sec_num": "2.1.3" }, { "text": "d . This prevents activations from growing with increasing depth and width, with the factor of 2 compensating for the fact that the parallel and feed-forward layers are organized in parallel. For all other layers, we use the small init scheme from Nguyen and Salazar", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "L \u221a", "sec_num": null }, { "text": "(2019), 2 d+4d", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "L \u221a", "sec_num": null }, { "text": "While GPT-3 uses alternating dense and sparse layers using the technique introduced in Child et al. (2019), we instead opt to exclusively use dense layers to reduce implementation complexity.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "All Dense Layers", "sec_num": "2.1.4" }, { "text": "Our model is trained using a codebase that builds on Megatron (Shoeybi et al., 2020) and Deep-Speed to facilitate efficient and straightforward training of large language models with tens of billions of parameters. We use the official PyTorch v1.10.0 release binary package compiled with CUDA 11.1. This package is bundled with NCCL 2.10.3 for distributed communications.", "cite_spans": [ { "start": 53, "end": 84, "text": "Megatron (Shoeybi et al., 2020)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Software Libraries", "sec_num": "2.2" }, { "text": "We trained GPT-NeoX-20B on twelve Supermicro AS-4124GO-NART servers, each with eight NVIDIA A100-SXM4-40GB GPUs and configured with two AMD EPYC 7532 CPUs. All GPUs can directly access the InfiniBand switched fabric through one of four ConnectX-6 HCAs for GPUDirect RDMA. Two NVIDIA MQM8700-HS2R switches-connected by 16 links-compose the spine of this InfiniBand network, with one link per node CPU socket connected to each switch. Figure 7 shows a simplified overview of a node as configured for training.", "cite_spans": [], "ref_spans": [ { "start": 433, "end": 441, "text": "Figure 7", "ref_id": "FIGREF5" } ], "eq_spans": [], "section": "Hardware", "sec_num": "2.3" }, { "text": "Due to the intractability of performing a hyperparameter sweep for a 20 billion parameter model, we opted to use the values from Brown et al. 2020to guide our choice of hyperparameters. As Brown et al. (2020) did not train a model at our exact scale, we interpolate between the learning rates of their 13B and 175B models to arrive at a learning rate of 0.97E\u22125. Based on the results of smaller scale experiments, we select a weight decay of 0.01. To achieve a higher training throughput, we opt to use the same batch size as OpenAI's 175B model-approximately 3.15M tokens, or 1538 contexts of 2048 tokens each, and train for a total of 150, 000 steps, decaying the learning rate with a cosine schedule to 10% of its original value at the end of training.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Training", "sec_num": "3" }, { "text": "We use the AdamW (Loshchilov and Hutter, 2019) optimizer, with beta values of 0.9 and 0.95 respectively, and an epsilon of 1.0E\u22128. We extend AdamW with the ZeRO optimizer to reduce memory consumption by distributing optimizer states across ranks. Since the weights and optimizer states of a model at this scale do not fit on a single GPU, we use the tensor parallelism scheme introduced in Shoeybi et al. (2020) in combination with pipeline parallelism (Harlap et al., 2018) to distribute the model across GPUs. To train GPT-NeoX-20B, we found that the most efficient way to distribute the model given our hardware setup was to set a tensor parallel size of 2, and a pipeline parallel size of 4. This allows for the most communication intensive processes, tensor and pipeline parallelism, to occur within a node, and data parallel communication to occur across node boundaries. In this fashion, we were able to achieve and maintain an efficiency of 117 teraFLOPS per GPU.", "cite_spans": [ { "start": 453, "end": 474, "text": "(Harlap et al., 2018)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Training", "sec_num": "3" }, { "text": "GPT-NeoX-20B was trained on the Pile (Gao et al., 2020), a massive curated dataset designed specifically for training large language models. It consists of data from 22 data sources, coarsely broken down into 5 categories:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Training Data", "sec_num": "3.1" }, { "text": "\u2022 Academic Writing: Pubmed Abstracts and PubMed Central, arXiv, FreeLaw, 5 USPTO Backgrounds, 6 PhilPapers, 7 NIH Exporter 8", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Training Data", "sec_num": "3.1" }, { "text": "\u2022 Web-scrapes and Internet Resources: Com-monCrawl, OpenWebText2, StackExchange, 9 Wikipedia (English)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Training Data", "sec_num": "3.1" }, { "text": "\u2022 Prose: BookCorpus2, Bibliotik, Project Gutenberg (PG-19; Rae et al., 2019) \u2022 Dialogue: Youtube subtitles, Ubuntu IRC, 10 OpenSubtitles (Lison and Tiedemann, 2016), Hacker News, 11 EuroParl (Koehn, 2005) \u2022 Miscellaneous: GitHub, the DeepMind Mathematics dataset (Saxton et al., 2019) , Enron Emails (Klimt and Yang, 2004) In aggregate, the Pile consists of over 825GiB of raw text data. The diverse data sources reflects our desire for a general-purpose language model. Certain components are up-sampled to obtain a more balanced data distribution. In contrast, GPT-3's training data consists of web-scrapes, books datasets, and Wikipedia. When comparing results in this work to GPT-3, the training data is almost certainly the biggest known unknown factor. Full details of the Pile can be found in the technical report (Gao et al., 2020) and the associated datasheet (Biderman et al., 2022) .", "cite_spans": [ { "start": 51, "end": 58, "text": "(PG-19;", "ref_id": null }, { "start": 59, "end": 76, "text": "Rae et al., 2019)", "ref_id": "BIBREF13" }, { "start": 182, "end": 204, "text": "EuroParl (Koehn, 2005)", "ref_id": null }, { "start": 263, "end": 284, "text": "(Saxton et al., 2019)", "ref_id": null }, { "start": 300, "end": 322, "text": "(Klimt and Yang, 2004)", "ref_id": null }, { "start": 821, "end": 839, "text": "(Gao et al., 2020)", "ref_id": null }, { "start": 869, "end": 892, "text": "(Biderman et al., 2022)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Training Data", "sec_num": "3.1" }, { "text": "It is particularly notable that the Pile contains a scrape of StackExchange preprocessed into a Q/A form. There is a significant and growing body of work on the influence of the syntactic structure of finetuning data on downstream performance (Zhong et al., 2021; Tan et al., 2021; Sanh et al., 2021; Wei et al., 2021) . While so far there has been no systematic work that focuses on prompted pretraining, recent work (Biderman and Raff, 2022) observed that the formulation of the StackExchange component of the Pile appears to heavily influences code generation.", "cite_spans": [ { "start": 243, "end": 263, "text": "(Zhong et al., 2021;", "ref_id": "BIBREF31" }, { "start": 264, "end": 281, "text": "Tan et al., 2021;", "ref_id": null }, { "start": 282, "end": 300, "text": "Sanh et al., 2021;", "ref_id": null }, { "start": 301, "end": 318, "text": "Wei et al., 2021)", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "Training Data", "sec_num": "3.1" }, { "text": "For GPT-NeoX-20B, we use a BPE-based tokenizer similar to that used in GPT-2, with the same total vocabulary size of 50257, with three major changes to the tokenizer. First, we train a new BPE tokenizer based on the Pile, taking advantage of its diverse text sources to construct a more generalpurpose tokenizer. Second, in contrast to the GPT-2 tokenizer which treats tokenization at the start of a string as a non-space-delimited token, the GPT-NeoX-20B tokenizer applies consistent space delimitation regardless. This resolves an inconsistency regarding the presence of prefix spaces to a tokenization input. 12 . An example can be seen in Figure 1 . Third, our tokenizer contains tokens for repeated space tokens (all positive integer amounts of repeated spaces up to and including 24). This allows the GPT-NeoX-20B tokenizer to tokenize text with large amounts of whitespace using fewer tokens; for instance, program source code or arXiv L A T E X source files. See Appendix F for an analysis of the tokenizer. GPT ", "cite_spans": [ { "start": 612, "end": 614, "text": "12", "ref_id": null }, { "start": 1016, "end": 1019, "text": "GPT", "ref_id": null } ], "ref_spans": [ { "start": 643, "end": 651, "text": "Figure 1", "ref_id": null } ], "eq_spans": [], "section": "Tokenization", "sec_num": "3.2" }, { "text": "In the past two years, the standard practice when training autoregressive language models has become to train for only one epoch (Komatsuzaki, 2019; Kaplan et al., 2020; Henighan et al., 2020) . Recent research has claimed to see significant benefits from going even further and deduplicating training data (Lee et al., 2021; Kandpal et al., 2022; Roberts et al., 2022) . In particular, every publicly known larger language model other than GPT-3 (Brown et al., 2020) and Jurassic-1 13 either uses some form of deduplication (Rae et al., 2022; Askell et al., 2021; Zeng et al., 2021; Sun et al., 2021; Smith et al., 2022; Hoffmann et al., 2022; Chowdhery et al., 2022) or does not discuss the training data in sufficient detail to determine what was done (Kim et al., 2021) . When the Pile was originally made, the only language model larger than GPT-NeoX-20B that existed was GPT-3, which upsampled high quality subsets of its training data. The Pile followed suit, and due to a combination of a lack of resources for large scale ablations and a lack of noticeable impact at smaller scales, we opt to use the Pile as-is. As shown in fig. 2 , even at the 20B parameter scale we see no drop in test validation loss after crossing the 1 epoch boundary.", "cite_spans": [ { "start": 129, "end": 148, "text": "(Komatsuzaki, 2019;", "ref_id": null }, { "start": 149, "end": 169, "text": "Kaplan et al., 2020;", "ref_id": null }, { "start": 170, "end": 192, "text": "Henighan et al., 2020)", "ref_id": null }, { "start": 307, "end": 325, "text": "(Lee et al., 2021;", "ref_id": "BIBREF31" }, { "start": 326, "end": 347, "text": "Kandpal et al., 2022;", "ref_id": null }, { "start": 348, "end": 369, "text": "Roberts et al., 2022)", "ref_id": null }, { "start": 441, "end": 467, "text": "GPT-3 (Brown et al., 2020)", "ref_id": null }, { "start": 525, "end": 543, "text": "(Rae et al., 2022;", "ref_id": null }, { "start": 544, "end": 564, "text": "Askell et al., 2021;", "ref_id": null }, { "start": 565, "end": 583, "text": "Zeng et al., 2021;", "ref_id": null }, { "start": 584, "end": 601, "text": "Sun et al., 2021;", "ref_id": null }, { "start": 602, "end": 621, "text": "Smith et al., 2022;", "ref_id": null }, { "start": 622, "end": 644, "text": "Hoffmann et al., 2022;", "ref_id": null }, { "start": 645, "end": 668, "text": "Chowdhery et al., 2022)", "ref_id": null }, { "start": 755, "end": 773, "text": "(Kim et al., 2021)", "ref_id": null } ], "ref_spans": [ { "start": 1134, "end": 1140, "text": "fig. 2", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Data Duplication", "sec_num": "3.3" }, { "text": "Unfortunately, none of the papers that have claimed to see an improvement from deduplication have released trained models that demonstrate this, making replication and confirmation of their results difficult. Lee et al. (2021) releases the deduplication code that they used, which we intend to use to explore this question in more detail in the future.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Data Duplication", "sec_num": "3.3" }, { "text": "It is important to note that even if there is not an improvement in loss or on task evaluations there are nevertheless compelling reasons to deduplicate training data for any model put into production. In particular, systematic analysis has shown signifi-cant benefits in terms of reducing the leakage of training data (Lee et al., 2021; Zhang et al., 2021; Carlini et al., 2022; Kandpal et al., 2022) .", "cite_spans": [ { "start": 319, "end": 337, "text": "(Lee et al., 2021;", "ref_id": "BIBREF31" }, { "start": 338, "end": 357, "text": "Zhang et al., 2021;", "ref_id": "BIBREF31" }, { "start": 358, "end": 379, "text": "Carlini et al., 2022;", "ref_id": null }, { "start": 380, "end": 401, "text": "Kandpal et al., 2022)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Data Duplication", "sec_num": "3.3" }, { "text": "To evaluate our model we use the EleutherAI Language Model Evaluation Harness (Gao et al., 2021b) , an open source codebase for language model evaluation that supports a number of model APIs. As our goal is to make a powerful model publicly accessible, we compare with English language models with at least 10B parameter that are publicly accessible. We compare with the GPT-3 models on the OpenAI API (Brown et al., 2020) , the open source FairSeq dense models (Artetxe et al., 2021), and GPT-J-6B (Wang and Komatsuzaki, 2021). We do not compare against T5 (Raffel et al., 2020) or its derivatives as our evaluation methodology assumes that the models are autoregressive. While there is a Megatron 11B checkpoint that has been publicly released, the released code is non-functional and we have not been able to get the model to work. We do not compare against any mixture-of-experts models as no public MoE model achieves performance comparable to a 10B parameter dense model.", "cite_spans": [ { "start": 78, "end": 97, "text": "(Gao et al., 2021b)", "ref_id": null }, { "start": 402, "end": 422, "text": "(Brown et al., 2020)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Performance Evaluations", "sec_num": "4" }, { "text": "While it is common to display \"scaling laws\" curves of best fit, we opt to not do so as the small number of OpenAI API models give DaVinci an outsized influence on the slope of the curve. In many of the examples we study, including DaVinci in the scaling laws calculation moves the line of best fit so far as to entirely change the conclusions. Instead, we connect the points with lines directly. We categorize both GPT-J-6B and GPT-NeoX-20B under the umbrella of GPT-NeoX models, as both models are trained with the same architecture (except for the negligible differences described in Section 2.1.2) and were trained on the same dataset. However, we connect them using a dashed line to reflect the fact that these two models are not the same model trained at two different scales the way the FairSeq and OpenAI models are, having been trained using different codebases, different tokenizers, and for different numbers of tokens.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Performance Evaluations", "sec_num": "4" }, { "text": "Where we were able to obtain the relevant information, we report two baselines: human-level performance and random performance. All plots contain error bars representing two standard errors, indicating the 95% confidence interval around each point. For some plots, the standard error is so small that the interval is not visible.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Performance Evaluations", "sec_num": "4" }, { "text": "We evaluate our model on a diverse collection of standard language model evaluation datasets that we divide into three main categories: natural language tasks, Advanced Knowledge-Based Tasks, and Mathematical Tasks. Due to space constraints a representative subset of the results are shown here, with the rest in Appendix E.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Tasks Evaluated", "sec_num": "4.1" }, { "text": "We evaluate our model on a diverse collection of standard language model evaluation datasets: ANLI (Nie et al., 2020) Mathematical Tasks The solving of mathematical problem solving is an area that has had a long history of study in AI research, despite the fact that large language models tend to perform quite poorly on both arithmetic tasks and mathematical problems phrased in natural language. We evaluate on the MATH test dataset (Hendrycks et al., 2021b) as well as on the numerical arithmetic problems introduced by Brown et al. (2020) . Note that the MATH test dataset is an evaluation metric that is generally finetuned on, but due to computational limitations we only evaluate models zero-and five-shot here.", "cite_spans": [ { "start": 99, "end": 117, "text": "(Nie et al., 2020)", "ref_id": "BIBREF5" }, { "start": 523, "end": 542, "text": "Brown et al. (2020)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Natural Language Tasks", "sec_num": null }, { "text": "Advanced Knowledge-Based Tasks We are also interested in the ability of our models to answer factual questions that (for humans) require advanced knowledge. To do this, we use a dataset of multiple choice questions in a variety of diverse domains developed by Hendrycks et al. (2021a). Following common practice on this dataset, we focus on results aggregated by subject area: Humanities, Social Sciences, STEM, and Miscellaneous as presented in Figure 6 . We report five-shot performance to be comparable to previous work. : Zero-shot performance of GPT-NeoX-20B compared to and FairSeq and OpenAI models on arithmetic tasks. Random performance on these tasks is 0%, and we were unable to find information on median human performance. Figure 5 : Zero-shot performance of GPT-NeoX-20B compared to and FairSeq and OpenAI models on arithmetic tasks. Random performance on these tasks is 0%, and we were unable to find information on median human performance. ", "cite_spans": [], "ref_spans": [ { "start": 446, "end": 454, "text": "Figure 6", "ref_id": "FIGREF4" }, { "start": 736, "end": 744, "text": "Figure 5", "ref_id": null } ], "eq_spans": [], "section": "Natural Language Tasks", "sec_num": null }, { "text": "Natural Language Tasks While GPT-NeoX-20B outperforms FairSeq 13B on some tasks (e.g. ARC, LAMBADA, PIQA, PROST), it underperforms on others (e.g. HellaSwag, LogiQA zeroshot). In total, across the 32 evaluations we did we outpreform on 22 tasks, underpreform on four tasks, and fall within the margin of error on six tasks. By far our weakest performance is on Hel-laSwag, where we score four standard deviations below FairSeq 13B in both zero-and five-shot evaluations. Similarly, GPT-J underperforms FairSeq 6.7B by three standard deviations zero-shot and six standard deviations five-shot on HellaSwag. We find this massive performance loss largely inexplicable; while we originally assumed that the substantial non-prose components of the Pile were to blame, we note that GPT-J and GPT-NeoX overpreform FairSeq models on the very similar Lambada task by roughly the same amount.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Performance Results", "sec_num": "5.1" }, { "text": "Mathematics While GPT-3 and FairSeq models are generally quite close on arithmetic tasks, they are consistently out-preformed by GPT-J and GPT-NeoX. We conjecture that this is traceable to the prevalence of mathematics equations in the training data, but warn that people should not assume that this means that training on the Pile produces better out-of-distribution arithmetic reasoning. Razeghi et al. 2022show that there is a strong correlation between the frequency of a numerical equation in the Pile and GPT-J's performance on that equation, and we see no reason this would not hold in GPT-NeoX 20B, FairSeq, and GPT-3. We are unfortunately unable to investigate this effect in FairSeq and GPT-3 models because the authors do not release their training data.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Performance Results", "sec_num": "5.1" }, { "text": "Advanced Knowledge-Based Tasks While GPT-NeoX and FairSeq both exhibit dominant performance on MMMLU compared to GPT-3 in the five-shot setting ( Figures 6 and 11) , their performance is much closer in the zero-shot setting (Figure 10) . Hendrycks et al. (2021b) find that few-shot evaluation does not improve performance, but that appears to be only the case for GPT-3. We view this as a warning against drawing strong conclusions about evaluation metrics based only on one model, and encourage researchers developing new evaluation benchmarks to leverage multiple different classes of models to avoid overfitting their conclusions to a specific model.", "cite_spans": [], "ref_spans": [ { "start": 146, "end": 163, "text": "Figures 6 and 11)", "ref_id": "FIGREF4" }, { "start": 224, "end": 235, "text": "(Figure 10)", "ref_id": "FIGREF9" } ], "eq_spans": [], "section": "Performance Results", "sec_num": "5.1" }, { "text": "Our experiments indicate that GPT-J-6B and GPT-NeoX-20B benefit substantially more from fewshot evaluations than the FairSeq models do. When going from 0-shot to 5-shot evaluations, GPT-J-6B improves by 0.0526 and GPT-NeoX-20B improves by 0.0598 while the FairSeq 6.7B and 13B models improve by 0.0051 and 0.0183 respectively. This result is statistically significant and robust to purturbations of prompting. While we do not have a particular explanation for this currently, we view this as a strong recommendation for our models.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Powerful Few-Shot Learning", "sec_num": "5.2" }, { "text": "Optimal Training Hyperparameter tuning is an expensive process, and is often infeasible to do at full scale for multi-billion parameter models. Due to the aforementioned limitations, we opted to choose hyperparameters based on a mixture of experiments at smaller scales and by interpolating parameters appropriate for our model size based on previously published work (Brown et al., 2020). However, several aspects of both our model architecture [Section 2.1] and training setup, including the data [Section 3.1] and the tokenizer [Section 3.2], diverge significantly from Brown et al. (2020). As such, it is almost certainly the case that the hyperparameters used for our model are no longer optimal, and potentially never were.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Limitations", "sec_num": "5.3" }, { "text": "Lack of Coding Evaluations Many of the design choices we made during the development of this model were oriented towards improving performance on coding tasks. However, we underestimated the difficulty and cost of existing coding benchmarks (Chen et al., 2021) , and so were unable to evaluate out model in that domain. We hope to do so in the future.", "cite_spans": [ { "start": 241, "end": 260, "text": "(Chen et al., 2021)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Limitations", "sec_num": "5.3" }, { "text": "Data Duplication Finally, the lack of dataset deduplication could also have had an impact on downstream performance. Recent work has shown that deduplicating training data can have a large effect on perplexity (Lee et al., 2021). While our experiments show no sign of this, it is hard to dismiss it due to the number of researchers who have found the opposite result.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Limitations", "sec_num": "5.3" }, { "text": "The current status quo in research is that large language models are things people train and publish about, but do not actually release. To the best of our knowledge, GPT-NeoX-20B is the largest and most performant dense language model to ever be publicly released. A variety of reasons for the nonrelease of large language models are given by various groups, but the primary one is the harms that public access to LLMs would purportedly cause.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Releasing a 20B Parameter LLM", "sec_num": "5.4" }, { "text": "We take these concerns quite seriously. However, having taken them quite seriously, we feel that they are flawed in several respects. While a thorough analysis of these issues is beyond the scope of this paper, the public release of our model is the most important contribution of this paper and so an explanation of why we disagree with the prevailing wisdom is important.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Releasing a 20B Parameter LLM", "sec_num": "5.4" }, { "text": "Providing access to ethics and alignment researchers will prevent harm. The open-source release of this model is motivated by the hope that it will allow researchers who would not otherwise have access to LLMs to use them. While there are negative risks due to the potential acceleration of capabilities research, we believe the benefits of this release outweigh the risks. We also note that these benefits are not hypothetical, as a number of papers about the limits and ethics of LLMs has been explicitly enabled by the public release of previous models (Zhang et al., 2021; Kandpal et al., 2022; Carlini et al., 2022; Birhane et al., 2021; nostalgebraist, 2020; Meng et al., 2022; Lin et al., 2021) .", "cite_spans": [ { "start": 556, "end": 576, "text": "(Zhang et al., 2021;", "ref_id": "BIBREF31" }, { "start": 577, "end": 598, "text": "Kandpal et al., 2022;", "ref_id": null }, { "start": 599, "end": 620, "text": "Carlini et al., 2022;", "ref_id": null }, { "start": 621, "end": 642, "text": "Birhane et al., 2021;", "ref_id": null }, { "start": 643, "end": 664, "text": "nostalgebraist, 2020;", "ref_id": null }, { "start": 665, "end": 683, "text": "Meng et al., 2022;", "ref_id": null }, { "start": 684, "end": 701, "text": "Lin et al., 2021)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Releasing a 20B Parameter LLM", "sec_num": "5.4" }, { "text": "Limiting access to governments and corporations will not prevent harm. Perhaps the most curious aspect of the argument that LLMs should not be released is that the people making such arguments are not arguing they they should not use LLMs. Rather, they are claiming that other people should not use them. We do not believe that this is a position that should be taken seriously. The companies and governments that have the financial resources to train LLMs are overwhelmingly more likely to do large scale harm using a LLM than a random individual.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Releasing a 20B Parameter LLM", "sec_num": "5.4" }, { "text": "Releasing this model is the beginning, not the end, of our work to make GPT-NeoX-20B widely accessible to researchers. Due to the size of the model, inference is most economical on a pair of RTX 3090 Tis or a single A6000 GPU and finetuning requires significantly more compute. Truly promoting widespread access to LLMs means promoting widespread access to computing infrastructure in addition to the models themselves. We plan to make progress on this issue going forward by continuing to work on reducing the inference costs of our model, and by working with researchers to provide access to the computing infrastructure they need to carry out experiments on our models. We strongly encourage researchers who are interested in studying GPT-NeoX-20B but lack the necessary infrastructure to reach out to discuss how we can help empower you.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Releasing a 20B Parameter LLM", "sec_num": "5.4" }, { "text": "We introduce GPT-NeoX-20B, a 20 billion parameter autoregressive Transformer language model trained on the Pile (Gao et al., 2020) dataset, and detail the main architectural differences between GPT-NeoX-20B and GPT-3-most notably the change in tokenizer, the addition of Rotary embeddings, the parallel computation of attention and feed-forward layers, and a different initialization scheme and hyperparameters. We run extensive evaluations of GPT-NeoX-20B on natural language and factual knowledge tasks, and compare it with other publicly available models, finding it performed particularly well on knowledge-based and mathematical tasks. Finally, we are open sourcing the training and evaluation code at https://github. com/EleutherAI/gpt-neox, where readers can find a link to download the model weights across the whole training run. ", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Summary", "sec_num": "6" }, { "text": "The current status quo in research is that large language models are things people train and publish about, but do not actually release. To the best of our knowledge, GPT-NeoX-20B is the largest dense language model to ever be publicly released with a several-way tie for second place at 13 billion parameters (Artetxe et al., 2021; Xue et al., 2020 Xue et al., , 2021 and many more models at the 10-11B parameter scale. A variety of reasons for the non-release of large language models are given by various groups, but the primary one is the harms that public access to LLMs would purportedly cause. We take these concerns quite seriously. However, having taken them quite seriously, we feel that they are flawed in several respects. While a thorough analysis of these issues is beyond the scope of this paper, the public release of our model is the most important contribution of this paper and so an explanation of why we disagree with the prevailing wisdom is important.", "cite_spans": [ { "start": 310, "end": 332, "text": "(Artetxe et al., 2021;", "ref_id": null }, { "start": 333, "end": 349, "text": "Xue et al., 2020", "ref_id": null }, { "start": 350, "end": 368, "text": "Xue et al., , 2021", "ref_id": "BIBREF26" } ], "ref_spans": [], "eq_spans": [], "section": "C Broader Impacts", "sec_num": null }, { "text": "Providing access to ethics and alignment researchers will prevent harm. The open-source release of this model is motivated by the hope that it will allow researchers who would not otherwise have access to LLMs to use them. While there are negative risks due to the potential acceleration of capabilities research, we believe the benefits of this release outweigh the risks. We also note that these benefits are not hypothetical, as a number of papers about the limits and ethics of LLMs has been explicitly enabled by the public release of previous models (Zhang et al., 2021; Kandpal et al., 2022; Carlini et al., 2022; Birhane et al., 2021; nostalgebraist, 2020; Meng et al., 2022; Lin et al., 2021) .", "cite_spans": [ { "start": 556, "end": 576, "text": "(Zhang et al., 2021;", "ref_id": "BIBREF31" }, { "start": 577, "end": 598, "text": "Kandpal et al., 2022;", "ref_id": null }, { "start": 599, "end": 620, "text": "Carlini et al., 2022;", "ref_id": null }, { "start": 621, "end": 642, "text": "Birhane et al., 2021;", "ref_id": null }, { "start": 643, "end": 664, "text": "nostalgebraist, 2020;", "ref_id": null }, { "start": 665, "end": 683, "text": "Meng et al., 2022;", "ref_id": null }, { "start": 684, "end": 701, "text": "Lin et al., 2021)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "C Broader Impacts", "sec_num": null }, { "text": "Limiting access to governments and corporations will not prevent harm. Perhaps the most curious aspect of the argument that LLMs should not be released is that the people making such arguments are not arguing they they should not use LLMs. Rather, they are claiming that other people should not use them. We do not believe that this is a position that should be taken seriously. The companies and governments that have the financial resources to train LLMs are overwhelmingly more likely to do large scale harm using a LLM than a random individual.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "C Broader Impacts", "sec_num": null }, { "text": "The open-source release of this model is motivated by the hope that it will allow ethics and alignment researchers who would not otherwise have access to LLMs to use them. While there are negative risks due to the potential acceleration of capabilities research, we believe the benefits of this release outweigh the risks of accelerating capabilities research.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "C Broader Impacts", "sec_num": null }, { "text": "When discussing the impact of access to technology, it is important to distinguish between capacities research which seeks to push the current stateof-the-art and research on We feel the risk of releasing GPT-NeoX-20B is acceptable, as the contribution of the model to capabilities research is likely to be limited, for two reasons.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "C.1 Impact on Capabilities Research and Products", "sec_num": null }, { "text": "We ultimately believe that the benefits of releasing this model outweigh the risks, but this argument hinges crucially on the particular circumstances of this release. All actors considering releasing powerful AI models or advancing the frontier of capabilities should think carefully about what they release, in what way, and when.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "C.1 Impact on Capabilities Research and Products", "sec_num": null }, { "text": "To oversimplify a complex debate, there are broadly speaking two schools of thought regarding the mitigation of harm that is done by AI algorithms: AI Ethics and AI Alignement. AI Ethics researchers are primarily concerned with the impact of current technologies or technologies very similar to current technologies, while AI Alignment is primarily concerned with future \"generally intelligent\" systems whose capacities greatly outclass currently existing systems and possess human and superhuman levels of intelligence. While the tools, methods, and ideas of these camps are very different, we believe that increasing access to these technologies will empower and advance the goals of researchers in both schools.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "C.2 Impact on Ethics and Alignment Research", "sec_num": null }, { "text": "Analyzing and documenting the limitations of models is an essential aspect of AI ethics research (Matias, 2020). Work examining and criticizing datasets (Kreutzer et al., 2022; Dodge et al., 2021; Birhane et al., 2021) , functionality (Smart, 2021; Zhang et al., 2021; Carlini et al., 2022; Biderman and Raff, 2022) , evaluation and deployment procedures (Biderman and Scheirer, 2020; Talat et al., 2022) , and more are essential to well-rounded and informed debate on the value and application of technology.", "cite_spans": [ { "start": 153, "end": 176, "text": "(Kreutzer et al., 2022;", "ref_id": null }, { "start": 177, "end": 196, "text": "Dodge et al., 2021;", "ref_id": null }, { "start": 197, "end": 218, "text": "Birhane et al., 2021)", "ref_id": null }, { "start": 235, "end": 248, "text": "(Smart, 2021;", "ref_id": null }, { "start": 249, "end": 268, "text": "Zhang et al., 2021;", "ref_id": "BIBREF31" }, { "start": 269, "end": 290, "text": "Carlini et al., 2022;", "ref_id": null }, { "start": 291, "end": 315, "text": "Biderman and Raff, 2022)", "ref_id": null }, { "start": 355, "end": 384, "text": "(Biderman and Scheirer, 2020;", "ref_id": null }, { "start": 385, "end": 404, "text": "Talat et al., 2022)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "C.2.1 The Necessity of Model Access for AI Ethics", "sec_num": null }, { "text": "However the current centralization of LLM training also creates a centralization of control of technology (Sadowski et al., 2021; Whittaker, 2021) that makes meaningful independent evaluation impossible. This means that it is often not possible to do this kind of work in practice because of the severe access restrictions companies that own large language models put on them. While GPT-NeoX is the 13th largest dense language model at time of writing only model larger than GPT-NeoX 20B that is publicly accessible is GPT-3. There are significant limitations on people's ability to do research on GPT-3 though, as it is not free to use and its training data is private.", "cite_spans": [ { "start": 106, "end": 129, "text": "(Sadowski et al., 2021;", "ref_id": null }, { "start": 130, "end": 146, "text": "Whittaker, 2021)", "ref_id": "BIBREF25" } ], "ref_spans": [], "eq_spans": [], "section": "C.2.1 The Necessity of Model Access for AI Ethics", "sec_num": null }, { "text": "LLMs represent a different paradigm than the AI systems generally studied by alignment researchers because they are not well-described as coherent agents or expected utility maximizers. Though trained to optimize a log-likelihood loss function, at a high level the goals a LLM pursues are varied and contradictory, depending on the way it is prompted. This introduces additional challenges, but may also enable new approaches to alignment. GPT-NeoX-20B itself is not the system we need to align, but we hope it can serve as a publicly available platform for experiments whose results might generalize to crucial future work.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "C.2.2 The Usefulness of Large Language Models in Alignment", "sec_num": null }, { "text": "The following is a non-exhaustive list of potential approaches we consider promising for further investigation.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "C.2.2 The Usefulness of Large Language Models in Alignment", "sec_num": null }, { "text": "Mechanistic interpretability. Mechanistic interpretability research (Cammarata et al., 2020) hopes to gain an understanding into how models accomplish the tasks they do, in part in the hopes of detecting problematic or deceptive algorithms implemented by models before these failures manifest in the real world. Being able to interpret and inspect the detailed inner workings of trained models would be a powerful tool to ensure models are optimizing for the goals we intended (Hubinger et al., 2021; Koch et al., 2021) . Reverse engineering transformer language models has already yielded insights about the inner functioning of LMs (Elhage et al., 2021; nostalgebraist, 2020; Meng et al., 2022; Dai et al., 2021) .", "cite_spans": [ { "start": 477, "end": 500, "text": "(Hubinger et al., 2021;", "ref_id": null }, { "start": 501, "end": 519, "text": "Koch et al., 2021)", "ref_id": null }, { "start": 634, "end": 655, "text": "(Elhage et al., 2021;", "ref_id": null }, { "start": 656, "end": 677, "text": "nostalgebraist, 2020;", "ref_id": null }, { "start": 678, "end": 696, "text": "Meng et al., 2022;", "ref_id": null }, { "start": 697, "end": 714, "text": "Dai et al., 2021)", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "C.2.2 The Usefulness of Large Language Models in Alignment", "sec_num": null }, { "text": "Using a LLM as a reward model. Because they are trained to predict human writing, LLMs also appear to develop a useful representation of human values at the semantic level. Finding a way to utilise these representations could be a possible path toward solving the problem of reward robustness in RL and other algorithms which require a proxy of human judgment (Stiennon et al., 2022; Wentworth, 2020) . Despite fundamental theoretical limitations on learning human values (Armstrong and Mindermann, 2018; Kosoy, 2016) , value learning may still be robust enough to align weaker superhuman AIs. Future experiments could explore the extent to which LLM pretraining improves downstream reward model robustness and generalization.", "cite_spans": [ { "start": 360, "end": 383, "text": "(Stiennon et al., 2022;", "ref_id": null }, { "start": 384, "end": 400, "text": "Wentworth, 2020)", "ref_id": "BIBREF24" }, { "start": 472, "end": 504, "text": "(Armstrong and Mindermann, 2018;", "ref_id": "BIBREF0" }, { "start": 505, "end": 517, "text": "Kosoy, 2016)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "C.2.2 The Usefulness of Large Language Models in Alignment", "sec_num": null }, { "text": "Natural language transparency. Since LLM prompts are in a human-readable form, it can provide insight on the LLM's expected behavior. Prompt programming or finetuning can be used to leverage this fact and force a LLM to execute more transparent algorithms, such as splitting problems into steps or explicitly writing an \"internal monologue\" (Soares, 2021; Gao et al., 2021a; Nye et al., 2021) . Reliability and trustworthiness can present significant challenges for these approaches.", "cite_spans": [ { "start": 341, "end": 355, "text": "(Soares, 2021;", "ref_id": null }, { "start": 356, "end": 374, "text": "Gao et al., 2021a;", "ref_id": null }, { "start": 375, "end": 392, "text": "Nye et al., 2021)", "ref_id": "BIBREF6" } ], "ref_spans": [], "eq_spans": [], "section": "C.2.2 The Usefulness of Large Language Models in Alignment", "sec_num": null }, { "text": "However, this form of transparency also has its limits. In particular, models can often respond unpredictably to prompts, and internal monologues may become completely detached from the model's decision making process if translating between the model's ontology and the human ontology is more complex than simply modeling human monologues (Christiano et al., 2021) .", "cite_spans": [ { "start": 339, "end": 364, "text": "(Christiano et al., 2021)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "C.2.2 The Usefulness of Large Language Models in Alignment", "sec_num": null }, { "text": "Simulating agents at runtime. Although LLMs are not well-described as coherent agents, they can still be used to generate goal-directed processes. Given an appropriate prompt (such as a story of a character working to achieve a goal), LLMs can predict and thus simulate an agent (Huang et al., 2022) . Simulated agents take representative actions according to the patterns present in the training data, similar to behavior cloning. One potential future research direction is testing whether they are less susceptible to failure modes that follow from expected utility maximization, such as Goodhart failures and power-seeking behavior. However, other failure modes can be introduced by the LM training procedure, such as \"delusions\" or \"hallucinations\" (Ortega et al., 2021; Gao, 2021; Maynez et al., 2020) . Additionally, simulated agents may be uncompetitive with optimal agents like those produced by Reinforcement Learning. An important research direction is to explore how the beneficial properties of simulated agents can be maintained while making them competitive with RL based approaches.", "cite_spans": [ { "start": 279, "end": 299, "text": "(Huang et al., 2022)", "ref_id": null }, { "start": 753, "end": 774, "text": "(Ortega et al., 2021;", "ref_id": null }, { "start": 775, "end": 785, "text": "Gao, 2021;", "ref_id": null }, { "start": 786, "end": 806, "text": "Maynez et al., 2020)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "C.2.2 The Usefulness of Large Language Models in Alignment", "sec_num": null }, { "text": "Tool AI and automated alignment research. LMs can be used as relatively unagentic tools, such as OpenAI's Codex model (Chen et al., 2021) acting as a coding assistant. Because pretrained LLMs are not directly optimized for the factual accuracy of their predictions, it is possible they avoid some of the traditional problems with tool or oracle AI (Armstrong et al., 2012) , such as the incentive to produce manipulative answers (Demski, 2019). Tool AI is not a long-term solution to the problem of alignment, but it could be used to assist alignment research or even automate large parts of it. For example, language models could be used to help brainstorm alignment ideas more quickly, act as a writing assistant, or directly generate alignment research papers for humans to review. This line of research also risks accelerating capabilities research, a concern we discuss more below.", "cite_spans": [ { "start": 118, "end": 137, "text": "(Chen et al., 2021)", "ref_id": null }, { "start": 348, "end": 372, "text": "(Armstrong et al., 2012)", "ref_id": "BIBREF1" } ], "ref_spans": [], "eq_spans": [], "section": "C.2.2 The Usefulness of Large Language Models in Alignment", "sec_num": null }, { "text": "Because training large models requires a significant engineering and capital investment, such models are often out of reach for small labs and independent researchers. As it stands, only large organizations have access to the latest generation of powerful language models (Brown et al., 2020; Rae et al., 2022; Fedus et al., 2021; Lieber et al., 2021; Tang, 2021) . The number of researchers focused primarily on ethics and alignment working at these labs is much lower than those working on developing new capabilities.", "cite_spans": [ { "start": 272, "end": 292, "text": "(Brown et al., 2020;", "ref_id": null }, { "start": 293, "end": 310, "text": "Rae et al., 2022;", "ref_id": null }, { "start": 311, "end": 330, "text": "Fedus et al., 2021;", "ref_id": null }, { "start": 331, "end": 351, "text": "Lieber et al., 2021;", "ref_id": null }, { "start": 352, "end": 363, "text": "Tang, 2021)", "ref_id": "BIBREF17" } ], "ref_spans": [], "eq_spans": [], "section": "C.3 Differential Impact on Access", "sec_num": null }, { "text": "We feel the risk of releasing GPT-NeoX-20B is acceptable, as the contribution of the model to capabilities research is likely to be limited, for two reasons. Firstly, the organizations pursuing capabilities research most aggressively are unlikely to benefit from our open-source release of this model as they have already developed more powerful models of their own. Secondly, we believe the single most important piece of knowledge that drives advancing capabilities research is the knowledge that scaling LLMs was possible in the first place (Leahy, 2021; Leahy and Biderman, 2021) . Whereas the actual implementation is very fungible (as evidenced by the large number of parties who have succeeded in creating their own LLMs in the past two years). This differential impact, wherein our release is expected to benefit primarily people who have less funding and infrastructure, is a key factor in our decision to release this model publicly.", "cite_spans": [ { "start": 544, "end": 557, "text": "(Leahy, 2021;", "ref_id": null }, { "start": 558, "end": 583, "text": "Leahy and Biderman, 2021)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "C.3 Differential Impact on Access", "sec_num": null }, { "text": "We ultimately believe that the benefits of releasing this model outweigh the risks, but this argument hinges crucially on the particular circumstances of this release. All actors considering releasing powerful AI models or advancing the frontier of capabilities should think carefully about what they release, in what way, and when.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "C.3 Differential Impact on Access", "sec_num": null }, { "text": "A significant point of concern in some recent work is the energy usage and carbon emissions associated with training large language models (Strubell et al., 2019; Schwartz et al., 2020; Lacoste et al., 2019; Bender et al., 2021) . In particular, Strubell et al. (2019) estimate that a then-recent paper by the authors released 626, 155 lbs or 284.01 metric tons 14 of CO 2 (t CO 2 ). As Strubell et al. 2019has been widely cited and quoted in the media as representative of large-scale language models, we decided to explicitly and carefully track our energy usage and carbon emissions to see if this is truly a representative account of NLP emissions.", "cite_spans": [ { "start": 139, "end": 162, "text": "(Strubell et al., 2019;", "ref_id": null }, { "start": 163, "end": 185, "text": "Schwartz et al., 2020;", "ref_id": null }, { "start": 186, "end": 207, "text": "Lacoste et al., 2019;", "ref_id": null }, { "start": 208, "end": 228, "text": "Bender et al., 2021)", "ref_id": null }, { "start": 231, "end": 268, "text": "In particular, Strubell et al. (2019)", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "C.4 Environmental Impact", "sec_num": null }, { "text": "Throughout the development and training of our model, we tracked our energy usage and carbon emissions. We found that the process of developing and training GPT-NeoX-20B emitted almost exactly 10% of Strubell et al. (2019)'s estimate, coming in at a total of 69957 lbs or 31.73 metric tons of CO 2 . This is roughly the equivalent of the yearly emissions of the average American or 35 round-trip flights between New York City and San Francisco. Our systems were based in Illinois, USA, and consumed energy sourced from the mix as follows This mixture produces an average of 0.47905 t CO 2 /MWh, and we consumed a total of 43.92 MWh of electricity over the course of 1830 hours of training. Scaling, testing, and evaluation were responsible for the equivalent of another 920 hours on our systems, for a total energy consumption 66.24 MWh and thus the production of just under 35 metric tons of CO 2 .", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "C.4 Environmental Impact", "sec_num": null }, { "text": "It is noteworthy that Strubell et al. 2019are estimating emissions from a neural architecture search paper, and is therefore not directly comparable to ours. The primary motivation for our comparison is that their number has attracted a lot of attention and is often taken to be respresentative of NLP research. In general, we advocate for more systematic and comprehensive reporting to improve transparency surrounding this important topic.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "C.4 Environmental Impact", "sec_num": null }, { "text": "Results for natural language understanding tasks are shown in Tables 2 and 3, while results for Hendrycks tasks are found in Tables 10 to 13. All evaluations had version 0 in the Evaluation Harness. This information is reported in the output of the Evaluation Harness and should be used for ensuring reproducibility of these results, even as the task implementations themselves may change to fix bugs. 0.052 \u00b1 0.008 0.057 \u00b1 0.008 ----MATH (Pre-Calculus) 0.013 \u00b1 0.005 0.027 \u00b1 0.007 ---- Table 8 : Five-Shot Results on Basic Arithmetic and MATH (GPT-J and GPT-NeoX). GPT-3 is omitted due to financial limitations. . API limits we were unable to evaluate on the OpenAI API. Instead, we report numbers from Hendrycks et al. (2021a) with model sizes corrected.", "cite_spans": [], "ref_spans": [ { "start": 62, "end": 142, "text": "Tables 2 and 3, while results for Hendrycks tasks are found in Tables 10 to 13.", "ref_id": "TABREF2" }, { "start": 488, "end": 495, "text": "Table 8", "ref_id": null } ], "eq_spans": [], "section": "D Architecture Diagram E Full Evaluation Results", "sec_num": null }, { "text": "GPT-2 GPT-NeoX-20B GPT Table 15 : Number of tokens from tokenizing the Pile validation set. The GPT-NeoX-20B tokenizer uses fewer tokens to represent the Pile overall, with the biggest gains in whitespace heavy datasets such as arXiv, GitHub and StackExchange.", "cite_spans": [], "ref_spans": [ { "start": 23, "end": 31, "text": "Table 15", "ref_id": "TABREF2" } ], "eq_spans": [], "section": "D Architecture Diagram E Full Evaluation Results", "sec_num": null }, { "text": "GPT-2 GPT-NeoX-20B GPT relatively few tokens. We define a word as a contiguous string delimited by whitespace or punctuation (as defined by strings.punctuation in Python). We perform this analysis at the component level. We only consider words that occur at least 10 times within the given component. We show in Table 18 a representative example from the Pile-CC corpus.", "cite_spans": [], "ref_spans": [ { "start": 312, "end": 320, "text": "Table 18", "ref_id": "TABREF2" } ], "eq_spans": [], "section": "D Architecture Diagram E Full Evaluation Results", "sec_num": null }, { "text": "In Figures 12 and 17 , we show examples of tokenized documents from the Pile, comparing the GPT-2 tokenizer to ours.", "cite_spans": [], "ref_spans": [ { "start": 3, "end": 20, "text": "Figures 12 and 17", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "G Tokenization Examples", "sec_num": null }, { "text": "This model does not work using the provided codebase, and we have been told it under-performs GPT-J.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "The sole difference is due to an oversight discussed in Section 2.1.2", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "See the Weights & Biases reports here and here for further details.4 See GitHub for implementation details.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "https://www.courtlistener.com/ 6 https://bulkdata.uspto.gov/ 7 https://philpapers.org/ 8 https://exporter.nih.gov/ 9 https://archive.org/details/stackexchange", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "https://irclogs.ubuntu.com/ 11 https://news.ycombinator.com/", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "https://discuss.huggingface.co/t/ bpe-tokenizers-and-spaces-before-words/475/213 In private communication, the authors confirmed that Jurassic-1 was trained on the Pile (Gao et al., 2020).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "Paul Christiano, Ajeya Cotra, and Mark Xu. 2021. Eliciting latent knowledge: How to tell if your eyes deceive you.Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "We choose to present environmental impact figures in metric tons to align with standard reporting.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "https://github.com/allenai/allennlp/discussions/5056", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [ { "text": "We thank staff at CoreWeave-in particular Max Hjelm, Brannin McBee, Peter Salanki, and Brian Venturo-for providing the GPUs and computing infrastructure that made this project possible. We would also like to acknowledge Eren Dogan and Wesley Brown for feedback and technical support throughout the project, and John Schulman, Evan Hubinger, Victor Sanh, Jacob Hilton, and Siddharth Karamcheti for providing feedback on drafts of the paper.Finally, we thank Anthony DiPofi, Charles Foster, Jeffrey Hsu, Eric Tang ", "cite_spans": [], "ref_spans": [ { "start": 507, "end": 511, "text": "Tang", "ref_id": null } ], "eq_spans": [], "section": "Acknowledgments", "sec_num": null }, { "text": "0.359 \u00b1 0.014 0.347 \u00b1 0.014 0.370 \u00b1 0.014 0.326 \u00b1 0.014 0.367 \u00b1 0.014 0.357 \u00b1 0.014 LAMBADA 0.268 \u00b1 0.006 0.349 \u00b1 0.007 0.427 \u00b1 0.007 0.460 \u00b1 0.007 0.494 \u00b1 0.007 0.518 \u00b1 0.007 WSC 0.365 \u00b1 0.047 0.365 \u00b1 0.047 0.365 \u00b1 0.047 0.356 \u00b1 0.047 0.500 \u00b1 0.049 0.404 \u00b1 0.048 HellaSwag 0.308 \u00b1 0.005 0.379 \u00b1 0.005 0.451 \u00b1 0.005 0.497 \u00b1 0.005 0.531 \u00b1 0.005 0.559 \u00b1 0.005 Winogrande 0.516 \u00b1 0.014 0.538 \u00b1 0.014 0.612 \u00b1 0.014 0.633 \u00b1 0.014 0.657 \u00b1 0.013 0.690 \u00b1 0.013 SciQ 0.758 \u00b1 0.014 0.819 \u00b1 0.012 0.859 \u00b1 0.011 0.875 \u00b1 0.010 0.871 \u00b1 0.011 0.899 \u00b1 0.010 PIQA 0.656 \u00b1 0.011 0.700 \u00b1 0.011 0.731 \u00b1 0.010 0.750 \u00b1 0.010 0.764 \u00b1 0.010 0.769 \u00b1 0.010 TriviaQA 0.044 \u00b1 0.002 0.097 \u00b1 0.003 0.160 \u00b1 0.003 0.225 \u00b1 0.004 0.293 \u00b1 0.004 0.323 \u00b1 0.004 ARC (Easy) 0.453 \u00b1 0.010 0.533 \u00b1 0.010 0.618 \u00b1 0.010 0.664 \u00b1 0.010 0.686 \u00b1 0.010 0.702 \u00b1 0.009 ARC (Challenge) 0.198 \u00b1 0.012 0.231 \u00b1 0.012 0.278 \u00b1 0.013 0.310 \u00b1 0.014 0.359 \u00b1 0.014 0.370 \u00b1 0.014 OpenBookQA 0.184 \u00b1 0.017 0.206 \u00b1 0.018 0.218 \u00b1 0.018 0.258 \u00b1 0.020 0.288 \u00b1 0.020 0.290 \u00b1 0.020 HeadQA (English) 0.235 \u00b1 0.008 0.240 \u00b1 0.008 0.254 \u00b1 0.008 0.266 \u00b1 0.008 0.276 \u00b1 0.009 0.282 \u00b1 0.009 LogiQA 0.218 \u00b1 0.016 0.207 \u00b1 0.016 0.210 \u00b1 0.016 0.214 \u00b1 0.016 0.214 \u00b1 0.016 0.223 \u00b1 0.016 QA4MRE (2013) 0.324 \u00b1 0.028 0.338 \u00b1 0.028 0.338 \u00b1 0.028 0.352 \u00b1 0.028 0.391 \u00b1 0.029 0.387 \u00b1 0.029 0.088 \u00b1 0.006 0.098 \u00b1 0.007 0.029 \u00b1 0.000 0.001 \u00b1 0.000 0.024 \u00b1 0.000 0.098 \u00b1 0.000 2D+ 0.238 \u00b1 0.010 0.570 \u00b1 0.011 0.006 \u00b1 0.000 0.009 \u00b1 0.000 0.025 \u00b1 0.000 0.769 \u00b1 0.000 2Dx 0.139 \u00b1 0.008 0.148 \u00b1 0.008 0.022 \u00b1 0.000 0.021 \u00b1 0.000 0.058 \u00b1 0.000 0.198 \u00b1 0.000 2D-0.216 \u00b1 0.009 0.680 \u00b1 0.010 0.013 \u00b1 0.000 0.013 \u00b1 0.000 0.076 \u00b1 0.000 0.580 \u00b1 0.000 3D+ 0.088 \u00b1 0.006 0.099 \u00b1 0.007 0.001 \u00b1 0.000 0.001 \u00b1 0.000 0.003 \u00b1 0.000 0.342 \u00b1 0.000 3D-0.046 \u00b1 0.005 0.344 \u00b1 0.011 0.001 \u00b1 0.000 0.001 \u00b1 0.000 0.004 \u00b1 0.000 0.483 \u00b1 0.000 4D+ 0.007 \u00b1 0.002 0.007 \u00b1 0.002 0.001 \u00b1 0.000 0.000 \u00b1 0.000 0.001 \u00b1 0.000 0.040 \u00b1 0.000 4D-0.005 \u00b1 0.002 0.029 \u00b1 0.004 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.075 \u00b1 0.000 5D+ 0.001 \u00b1 0.001 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.006 \u00b1 0.000 5D-0.000 \u00b1 0.000 0.004 \u00b1 0.001 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.008 \u00b1 0.000 MATH (Algebra) 0.013 \u00b1 0.003 0.010 \u00b1 0.003 0.003 \u00b1 0.002 0.008 \u00b1 0.003 0.003 \u00b1 0.002 0.008 \u00b1 0.003 MATH (Counting and Probability) 0.011 \u00b1 0.005 0.017 \u00b1 0.006 0.000 \u00b1 0.000 0.004 \u00b1 0.003 0.000 \u00b1 0.000 0.006 \u00b1 0.004 MATH (Geometry) 0.004 \u00b1 0.003 0.017 \u00b1 0.006 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.002 \u00b1 0.002 0.002 \u00b1 0.002 MATH (Intermediate Algebra) 0.004 \u00b1 0.002 0.001 \u00b1 0.001 0.000 \u00b1 0.000 0.003 \u00b1 0.002 0.006 \u00b1 0.002 0.003 \u00b1 0.002 MATH (Number Theory) 0.007 \u00b1 0.004 0.013 \u00b1 0.005 0.007 \u00b1 0.004 0.000 \u00b1 0.000 0.006 \u00b1 0.003 0.011 \u00b1 0.005 MATH (Pre-Algebra) 0.010 \u00b1 0.003 0.018 \u00b1 0.005 0.007 \u00b1 0.003 0.006 \u00b1 0.003 0.008 \u00b1 0.003 0.014 \u00b1 0.004 MATH (Pre-Calculus) 0.005 \u00b1 0.003 0.005 \u00b1 0.003 0.004 \u00b1 0.003 0.000 \u00b1 0.000 0.002 \u00b1 0.002 0.004 \u00b1 0.003 Table 6 : Zero-Shot Results on Basic Arithmetic and MATH (GPT-J, GPT-NeoX, and GPT-3)0.001 \u00b1 0.001 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.011 \u00b1 0.002 0.024 \u00b1 0.003 0.001 \u00b1 0.001 2D+ 0.005 \u00b1 0.002 0.001 \u00b1 0.001 0.002 \u00b1 0.001 0.009 \u00b1 0.002 0.019 \u00b1 0.003 0.020 \u00b1 0.003 2Dx 0.020 \u00b1 0.003 0.004 \u00b1 0.001 0.018 \u00b1 0.003 0.023 \u00b1 0.003 0.036 \u00b1 0.004 0.028 \u00b1 0.004 2D-0.005 \u00b1 0.002 0.002 \u00b1 0.001 0.006 \u00b1 0.002 0.013 \u00b1 0.002 0.013 \u00b1 0.003 0.015 \u00b1 0.003 3D+ 0.001 \u00b1 0.001 0.001 \u00b1 0.001 0.001 \u00b1 0.001 0.001 \u00b1 0.001 0.001 \u00b1 0.001 0.001 \u00b1 0.001 3D-0.002 \u00b1 0.001 0.001 \u00b1 0.001 0.002 \u00b1 0.001 0.002 \u00b1 0.001 0.002 \u00b1 0.001 0.002 \u00b1 0.001 4D+ 0.001 \u00b1 0.001 0.000 \u00b1 0.000 0.001 \u00b1 0.001 0.001 \u00b1 0.001 0.001 \u00b1 0.001 0.001 \u00b1 0.001 4D-0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 5D+ 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 5D-0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 MATH (Algebra) 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.001 \u00b1 0.001 0.003 \u00b1 0.002 0.004 \u00b1 0.002 0.003 \u00b1 0.001 MATH (Counting and Probability) 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.004 \u00b1 0.003 0.000 \u00b1 0.000 MATH (Geometry) 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.002 \u00b1 0.002 0.000 \u00b1 0.000 0.000 \u00b1 0.000 MATH (Intermediate Algebra) 0.000 \u00b1 0.002 0.000 \u00b1 0.002 0.000 \u00b1 0.000 0.001 \u00b1 0.001 0.006 \u00b1 0.002 0.002 \u00b1 0.002 MATH (Number Theory) 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.002 \u00b1 0.002 0.000 \u00b1 0.000 0.004 \u00b1 0.003 MATH (Pre-Algebra) 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.003 \u00b1 0.002 0.002 \u00b1 0.002 0.001 \u00b1 0.001 0.000 \u00b1 0.000 MATH (Pre-Calculus) 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.002 \u00b1 0.002 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.019 \u00b1 0.003 0.024 \u00b1 0.003 0.029 \u00b1 0.004 0.032 \u00b1 0.004 0.046 \u00b1 0.005 0.046 \u00b1 0.005 2D+ 0.005 \u00b1 0.002 0.004 \u00b1 0.001 0.006 \u00b1 0.002 0.029 \u00b1 0.004 0.034 \u00b1 0.004 0.051 \u00b1 0.005 2Dx 0.001 \u00b1 0.001 0.025 \u00b1 0.004 0.025 \u00b1 0.003 0.025 \u00b1 0.003 0.049 \u00b1 0.005 0.053 \u00b1 0.005 2D-0.007 \u00b1 0.002 0.011 \u00b1 0.002 0.008 \u00b1 0.002 0.013 \u00b1 0.003 0.018 \u00b1 0.003 0.030 \u00b1 0.004 3D+ 0.002 \u00b1 0.001 0.002 \u00b1 0.001 0.001 \u00b1 0.001 0.003 \u00b1 0.001 0.001 \u00b1 0.001 0.003 \u00b1 0.001 3D-0.002 \u00b1 0.001 0.004 \u00b1 0.001 0.003 \u00b1 0.001 0.003 \u00b1 0.001 0.002 \u00b1 0.001 0.003 \u00b1 0.001 4D+ 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 4D-0.001 \u00b1 0.001 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.001 \u00b1 0.001 0.000 \u00b1 0.000 0.000 \u00b1 0.000 5D+ 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 5D-0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 0.000 \u00b1 0.000 MATH (Algebra) 0.023 \u00b1 0.004 0.010 \u00b1 0.003 0.013 \u00b1 0.003 0.014 \u00b1 0.003 0.017 \u00b1 0.004 0.012 \u00b1 0.003 MATH (Counting and Probability) 0.008 \u00b1 0.004 0.004 \u00b1 0.003 0.015 \u00b1 0.006 0.017 \u00b1 0.006 0.015 \u00b1 0.006 0.017 \u00b1 0.006 MATH (Geometry) 0.000 \u00b1 0.000 0.013 \u00b1 0.005 0.006 \u00b1 0.004 0.015 \u00b1 0.005 0.015 \u00b1 0.005 0.006 \u00b1 0.004 MATH (Intermediate Algebra) 0.010 \u00b1 0.003 0.002 \u00b1 0.002 0.007 \u00b1 0.003 0.010 \u00b1 0.003 0.011 \u00b1 0.003 0.004 \u00b1 0.002 MATH (Number Theory) 0.019 \u00b1 0.006 0.009 \u00b1 0.004 0.007 \u00b1 0.004 0.011 \u00b1 0.005 0.028 \u00b1 0.007 0.019 \u00b1 0.006 MATH (Pre-Algebra) 0.013 \u00b1 0.004 0.008 \u00b1 0.003 0.010 \u00b1 0.003 0.011 \u00b1 0.004 0.021 \u00b1 0.005 0.013 \u00b1 0.004 MATH (Pre-Calculus) 0.002 \u00b1 0.002 0.002 \u00b1 0.002 0.004 \u00b1 0.003 0.000 \u00b1 0.000 0.002 \u00b1 0.002 0.000 \u00b1 0.000 ", "cite_spans": [ { "start": 728, "end": 734, "text": "(Easy)", "ref_id": null }, { "start": 1206, "end": 1219, "text": "QA4MRE (2013)", "ref_id": "BIBREF9" }, { "start": 2391, "end": 2406, "text": "MATH (Geometry)", "ref_id": null }, { "start": 5915, "end": 5930, "text": "(Number Theory)", "ref_id": null } ], "ref_spans": [ { "start": 2496, "end": 2518, "text": "(Intermediate Algebra)", "ref_id": null }, { "start": 2608, "end": 2623, "text": "(Number Theory)", "ref_id": null }, { "start": 2915, "end": 2922, "text": "Table 6", "ref_id": null }, { "start": 4192, "end": 4214, "text": "(Intermediate Algebra)", "ref_id": null }, { "start": 4304, "end": 4319, "text": "(Number Theory)", "ref_id": null } ], "eq_spans": [], "section": "FairSeq", "sec_num": null }, { "text": "Both tokenizers share 36938 out of 50257 tokens, a \u223c73.5% overlap in tokens. In this section, we perform comparison between the GPT-NeoX-20B tokenizer to the GPT-2 tokenizer using the validation set of the Pile.In Table 15 , we show the resulting number of tokens from tokenizing each component of the Pile's validation set with both tokenizers, and the ratio of GPT-NeoX-20B tokens to GPT-2 tokens.We observe that the GPT-NeoX-20B tokenizer represents all Pile components using fewer or very closely comparable numbers of tokens. The largest percentage improvement in token counts are in the EuroParl, GitHub, and PubMed Central components, with a more than 20% savings in the number of tokens needed to represent that component. We highlight that arXiv, GitHub, and StackExchange-subsets with large code components-can be represented with meaningfully fewer tokens with the GPT-NeoX-20B tokenizer compared to the GPT-2 tokenizer. Overall, the GPT-NeoX-20B tokenizer represents the Pile validation set with approximately 10% fewer tokens compared to the GPT-2 tokenizer.Given that the GPT-NeoX-20B tokenizer is tweaked to better tokenize whitespace, we also perform a comparison between the two tokenizers excluding whitespace. We perform the same analysis as the above, but exclude all whitespace tokens from our computations, only counting the non-whitespace tokens. A token is considered a whitespace token if it consists only of whitespace characters. The results are shown in Table 16 in the Appendix. We observe that the GPT-NeoX-20B tokenizer still uses 5% fewer tokens to represent the Pile validation set compared to the GPT-2 tokenizer. As expected, the token ratios for certain components such as GitHub and StackExchange become closer to even once the whitespace characters are excluded.GPT-2 GPT-NeoX-20B GPT-2 While we evaluated our tokenizer using the validation set for the Pile, the Pile components would still be considered in-domain for the tokenizer and may not provide the most informative comparison point. To perform an out-of-domain comparison, we perform the same analysis using the AllenAI replication of C4, 15 , another popular pretraining corpus for large language models. As above, we use the validation set for our analysis. Our results are shown in Table 14 . We find that the GPT-NeoX-20B tokenizer tokenizes the C4 validation set to approximately the same number of tokens as the GPT-2 tokenizer. When excluding all whitespace tokens, the GPT-NeoX-20B requires approximately 1% more tokens to represent the corpus compared to the GPT-2 tokenizer.", "cite_spans": [], "ref_spans": [ { "start": 214, "end": 222, "text": "Table 15", "ref_id": null }, { "start": 1482, "end": 1490, "text": "Table 16", "ref_id": null }, { "start": 2282, "end": 2290, "text": "Table 14", "ref_id": null } ], "eq_spans": [], "section": "F Tokenizer Analysis", "sec_num": null }, { "text": "We show in Table 17 the 10 longest tokens in each tokenizer vocabulary. We exclude consideration of tokens that comprise only symbols or whitespace characters. We observe that for the GPT-2 tokenizer, many of the longest tokens appear to reflect artifacts in the tokenizer training data, likely with certain websites or web-scrapes being overrepresented in the training data. For the GPT-NeoX-20B tokenizer, we observe that most of the longest tokens are scientific terms, likely arising from the PubMed components of the Pile.", "cite_spans": [], "ref_spans": [ { "start": 11, "end": 19, "text": "Table 17", "ref_id": null } ], "eq_spans": [], "section": "F.1.1 Longest Tokens", "sec_num": null }, { "text": "We consider the words for which there is the greatest discrepancy in the resulting token length between the two tokenizers, where one tokenizer needs many tokens to represent while the other tokenizer uses Table 17 : Ten longest tokens (excluding tokens comprising mainly symbols, numbers and spaces) in tokenizer vocabularies. \"\u0120\" indicates a word delimiter. Table 18 : Worst case word tokenization with respective tokenizers. We show cases where one tokenizer requires many more tokens to represent a word compared to the other tokenizer.", "cite_spans": [], "ref_spans": [ { "start": 206, "end": 214, "text": "Table 17", "ref_id": null }, { "start": 360, "end": 368, "text": "Table 18", "ref_id": null } ], "eq_spans": [], "section": "F.1.2 Worst Case Word Tokenization Comparison", "sec_num": null }, { "text": "GPT-2 Tokenization 253 tokens --\u2190 abstract: 'The maximal minors of a $p\\times (m + p)$-matrix of univariate polynomials of degree $n$ with indeterminate coefficients are themselves polynomials of degree $np$. The subalgebra generated by their coefficients is the coordinate ring of the quantum Grassmannian, a singular compactification of the space of rational curves of degree $np$ in the Grassmannian of $p$planes in ($m + p$)-space. These subalgebra generators are shown to form a sagbi basis. The resulting flat deformation from the quantum Grassmannian to a toric variety gives a new \" Gr\u00f6 bner basis style\" proof of the Ravi-Rosenthal-Wang formulas in quantum Schubert calculus. The coordinate ring of the quantum Grassmannian is an algebra with straightening law, which is normal, Cohen-Macaulay, Gorenstein and Koszul, and the ideal of quantum Pl\u00fccker relations has a quadratic Gr\u00f6bner basis. This holds more generally for skew quantum Schubert varieties. These results are well-known for the classical Schubert varietie GPT-NeoX-20B Tokenization 229 tokens --\u2190 abstract: 'The maximal minors of a $p\\times (m + p)$-matrix of univariate polynomials of degree $n$ with indeterminate coefficients are themselves polynomials of degree $np$. The subalgebra generated by their coefficients is the coordinate ring of the quantum Grassmannian, a singular compactification of the space of rational curves of degree $np$ in the Grassmannian of $p$planes in ($m + p$)-space. These subalgebra generators are shown to form a sagbi basis. The resulting flat deformation from the quantum Grassmannian to a toric variety gives a new \"Gr\u00f6 bner basis style\" proof of the Ravi-Rosenthal-Wang formulas in quantum Schubert calculus. The coordinate ring of the quantum Grassmannian is an algebra with straightening law, which is normal, Cohen-Macaulay, Gorenstein and Koszul, and the ideal of quantum Pl\u00fccker relations has a quadratic Gr\u00f6bner basis. This holds more generally for skew quantum Schubert varieties. These results are well-known for the classical Schubert varietie This is a work of fiction. Names, characters, places and incidents are products of the author 's imagination or are used fictitiously and are not to be construed as real. Any resemblance to actual events, locales, organizations, or persons, living or dead, is completely coincidental. \u2190 \u2190 www.beverleykendall.com\u2190 \u2190 Cover Design \u00a9 Okay Creations, Sarah Hansen\u2190 \u2190 All rights reserved. Except as permitted under the U.S. Copyright Act of 1976, no part of this publication may be reproduced, distributed or transmitted in any form or by any means, or stored in a database or retrieval system, without the prior written permission of the author .\u2190 \u2190 ** License Statement **\u2190 \u2190 This ebook is licensed for your personal enjoyment only. This ebook may not be re-sold or given away to other people. If you would like to share this book with another person, please purchase an additional copy for each reader. If ", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "GPT-2 Worst-case Tokenization", "sec_num": null }, { "text": "GPT-2 Tokenization 430 tokens \u2190 \u2190 \u2190 \u2190 \u2190 ::vm.strings.get('CLOSE') \u2190 \u2190 \u2190 ::vm.strings.get('RUN') \u2190 \u2190 \u2190 \u2190 \u2190 GPT-NeoX-20B Tokenization 257 tokens \u2190 \u2190 \u2190 \u2190 \u2190 ::vm.strings.get('CLOSE') \u2190 \u2190 \u2190 ::vm.strings.get('RUN') \u2190 \u2190 \u2190 \u2190 \u2190 178 tokens Theresa May is expected to appoint an EU ambassador who \" believes in Brexit\" in the wake of the current Brussels representative's decision to quit after being cut adrift by Downing Street. \u2190 \u2190 Sir Ivan Rogers on Tuesday announced his resignation as Britain' s ambassador in Brussels after it was made clear Mrs May and her senior team had \" lost confidence\" in him over his \" pessim istic\" view of Brexit.\u2190 \u2190 Government sources made clear that Sir Ivan had \" jumped before he was pushed\" and that Number 10 believed his negative view of Brexit meant that he could not lead the negotiations after the Prime Minister triggers Article 50.\u2190 \u2190 In a 1,400-word resignation letter to his staff leaked on Tuesday night, Sir Ivan launched a thinly-veiled attack on the \"muddled thinking\" in Mrs May's Government.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "134", "sec_num": null }, { "text": "170 tokens Theresa May is expected to appoint an EU ambassador who \"believes in Brexit\" in the wake of the current Brussels representative's decision to quit after being cut adrift by Downing Street. \u2190 \u2190 Sir Ivan Rogers on Tuesday announced his resignation as Britain's ambassador in Brussels after it was made clear Mrs May and her senior team had \"lost confidence\" in him over his \"pessim istic\" view of Brexit.\u2190 \u2190 Government sources made clear that Sir Ivan had \"jumped before he was pushed\" and that Number 10 believed his negative view of Brexit meant that he could not lead the negotiations after the Prime Minister triggers Article 50.\u2190 \u2190 In a 1,400-word resignation letter to his staff leaked on Tuesday night, Sir Ivan launched a thinly-veiled attack on the \"muddled thinking\" in Mrs May's Government. 268 tokens Carotid endarterectomy: operative risks, recurrent stenosis, and long-term stroke rates in a modern series.\u2190 To determine whether carotid endarterectomy (CEA) safely and effectively maintained a durable reduction in stroke complications over an extended period, we reviewed our data on 478 consecutive patients who underwent 544 CEA's since 1976. Follow-up was complete in 83% of patients (mean 44 months). There were 7 early deaths (1.3%), only 1 stroke related (0.2%). Peri operative stroke rates (overall 2.9%) varied according to operative indications: asymptomatic, 1 .4%; transient ischemic attacks (TIA)/amaurosis fugax (AF), 1.3%; nonhemispheric symptoms (NH), 4.9%; and prior stroke (CVA), 7.1%. Five and 10-year stroke-free rates were 96% and 92% in the asymptomatic group, 93% and 87% in the TIA/AF group, 92% and 92% in the NH group, and 80% and 73% in the CVA group. Late ipsilateral strokes occurred infrequently (8 patients, 1.7%). Late deaths were primarily cardiac related (51.3%). Stro GPT-NeoX-20B Tokenization 250 tokens Carotid endarterectomy: operative risks, recurrent stenosis, and long-term stroke rates in a modern series.\u2190 To determine whether carotid endarterectomy (CEA) safely and effectively maintained a durable reduction in stroke complications over an extended period, we reviewed our data on 478 consecutive patients who underwent 544 CEA's since 1976. Follow-up was complete in 83% of patients (mean 44 months). There were 7 early deaths (1.3%), only 1 stroke related (0.2%). Peri operative stroke rates (overall 2.9%) varied according to operative indications: asymptomatic, 1 .4%; transient ischemic attacks (TIA)/amaurosis fugax (AF), 1.3%; nonhemispheric symptoms (NH), 4.9%; and prior stroke (CVA), 7.1%. Five and 10-year stroke-free rates were 96% and 92% in the asymptomatic group, 93% and 87% in the TIA/AF group, 92% and 92% in the NH group, and 80% and 73% in the CVA group. Late ipsilateral strokes occurred infrequently (8 patients, 1.7%). Late deaths were primarily cardiac related (51.3%). Stro ", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "GPT-NeoX-20B Tokenization", "sec_num": null } ], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Occam's razor is insufficient to infer the preferences of irrational agents", "authors": [ { "first": "Stuart", "middle": [], "last": "Armstrong", "suffix": "" }, { "first": "S\u00f6ren", "middle": [], "last": "Mindermann", "suffix": "" } ], "year": 2018, "venue": "Advances in Neural Information Processing Systems", "volume": "31", "issue": "", "pages": "5598--5609", "other_ids": {}, "num": null, "urls": [], "raw_text": "Stuart Armstrong and S\u00f6ren Mindermann. 2018. Oc- cam's razor is insufficient to infer the preferences of irrational agents. In Advances in Neural Information Processing Systems, volume 31, pages 5598-5609. Curran Associates, Inc.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "Thinking inside the box: Controlling and using an oracle AI. Minds and Machines", "authors": [ { "first": "Stuart", "middle": [], "last": "Armstrong", "suffix": "" }, { "first": "Anders", "middle": [], "last": "Sandberg", "suffix": "" }, { "first": "Nick", "middle": [], "last": "Bostrom", "suffix": "" } ], "year": 2012, "venue": "", "volume": "22", "issue": "", "pages": "299--324", "other_ids": { "DOI": [ "10.1007/s11023-012-9282-2" ] }, "num": null, "urls": [], "raw_text": "Stuart Armstrong, Anders Sandberg, and Nick Bostrom. 2012. Thinking inside the box: Controlling and using an oracle AI. Minds and Machines, 22(4):299-324.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "PROST: Physical reasoning about objects through space and time", "authors": [ { "first": "St\u00e9phane", "middle": [], "last": "Aroca-Ouellette", "suffix": "" }, { "first": "Cory", "middle": [], "last": "Paik", "suffix": "" }, { "first": "Alessandro", "middle": [], "last": "Roncone", "suffix": "" }, { "first": "Katharina", "middle": [], "last": "Kann", "suffix": "" } ], "year": 2021, "venue": "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", "volume": "", "issue": "", "pages": "4597--4608", "other_ids": { "DOI": [ "10.18653/v1/2021.findings-acl.404" ] }, "num": null, "urls": [], "raw_text": "St\u00e9phane Aroca-Ouellette, Cory Paik, Alessandro Ron- cone, and Katharina Kann. 2021. PROST: Physi- cal reasoning about objects through space and time. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 4597-4608, Online. Association for Computational Linguistics.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "Can a suit of armor conduct electricity? a new dataset for open book question answering", "authors": [ { "first": "Todor", "middle": [], "last": "Mihaylov", "suffix": "" }, { "first": "Peter", "middle": [], "last": "Clark", "suffix": "" }, { "first": "Tushar", "middle": [], "last": "Khot", "suffix": "" }, { "first": "Ashish", "middle": [], "last": "Sabharwal", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "2381--2391", "other_ids": { "DOI": [ "10.18653/v1/D18-1260" ] }, "num": null, "urls": [], "raw_text": "Todor Mihaylov, Peter Clark, Tushar Khot, and Ashish Sabharwal. 2018. Can a suit of armor conduct elec- tricity? a new dataset for open book question an- swering. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2381-2391, Brussels, Belgium. Association for Computational Linguistics.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "Transformers without tears: Improving the normalization of self-attention", "authors": [ { "first": "Q", "middle": [], "last": "Toan", "suffix": "" }, { "first": "Julian", "middle": [], "last": "Nguyen", "suffix": "" }, { "first": "", "middle": [], "last": "Salazar", "suffix": "" } ], "year": 2019, "venue": "Computing Research Repository", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1910.05895" ] }, "num": null, "urls": [], "raw_text": "Toan Q. Nguyen and Julian Salazar. 2019. Trans- formers without tears: Improving the normalization of self-attention. Computing Research Repository, arXiv:1910.05895. Version 2.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Adversarial NLI: A new benchmark for natural language understanding", "authors": [ { "first": "Yixin", "middle": [], "last": "Nie", "suffix": "" }, { "first": "Adina", "middle": [], "last": "Williams", "suffix": "" }, { "first": "Emily", "middle": [], "last": "Dinan", "suffix": "" }, { "first": "Mohit", "middle": [], "last": "Bansal", "suffix": "" }, { "first": "Jason", "middle": [], "last": "Weston", "suffix": "" }, { "first": "Douwe", "middle": [], "last": "Kiela", "suffix": "" } ], "year": 2020, "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "4885--4901", "other_ids": { "DOI": [ "10.18653/v1/2020.acl-main.441" ] }, "num": null, "urls": [], "raw_text": "Yixin Nie, Adina Williams, Emily Dinan, Mohit Bansal, Jason Weston, and Douwe Kiela. 2020. Adversarial NLI: A new benchmark for natural language under- standing. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 4885-4901, Online. Association for Computa- tional Linguistics.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Show your work: Scratchpads for intermediate computation with language models", "authors": [ { "first": "Maxwell", "middle": [], "last": "Nye", "suffix": "" }, { "first": "Anders", "middle": [ "Johan" ], "last": "Andreassen", "suffix": "" }, { "first": "Guy", "middle": [], "last": "Gur-Ari", "suffix": "" }, { "first": "Henryk", "middle": [], "last": "Michalewski", "suffix": "" }, { "first": "Jacob", "middle": [], "last": "Austin", "suffix": "" }, { "first": "David", "middle": [], "last": "Bieber", "suffix": "" }, { "first": "David", "middle": [], "last": "Dohan", "suffix": "" }, { "first": "Aitor", "middle": [], "last": "Lewkowycz", "suffix": "" }, { "first": "Maarten", "middle": [], "last": "Bosma", "suffix": "" }, { "first": "David", "middle": [], "last": "Luan", "suffix": "" }, { "first": "Charles", "middle": [], "last": "Sutton", "suffix": "" }, { "first": "Augustus", "middle": [], "last": "Odena", "suffix": "" } ], "year": 2021, "venue": "Computing Research Repository", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2112.00114" ] }, "num": null, "urls": [], "raw_text": "Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. 2021. Show your work: Scratchpads for intermedi- ate computation with language models. Computing Research Repository, arXiv:2112.00114. Version 1.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "Nando de Freitas, and Shane Legg. 2021. Shaking the foundations: delusions in sequence models for interaction and control", "authors": [ { "first": "Pedro", "middle": [ "A" ], "last": "Ortega", "suffix": "" }, { "first": "Markus", "middle": [], "last": "Kunesch", "suffix": "" }, { "first": "Gr\u00e9goire", "middle": [], "last": "Del\u00e9tang", "suffix": "" }, { "first": "Tim", "middle": [], "last": "Genewein", "suffix": "" }, { "first": "Jordi", "middle": [], "last": "Grau-Moya", "suffix": "" }, { "first": "Joel", "middle": [], "last": "Veness", "suffix": "" }, { "first": "Jonas", "middle": [], "last": "Buchli", "suffix": "" }, { "first": "Jonas", "middle": [], "last": "Degrave", "suffix": "" }, { "first": "Bilal", "middle": [], "last": "Piot", "suffix": "" }, { "first": "Julien", "middle": [], "last": "Perolat", "suffix": "" }, { "first": "Tom", "middle": [], "last": "Everitt", "suffix": "" }, { "first": "Corentin", "middle": [], "last": "Tallec", "suffix": "" }, { "first": "Emilio", "middle": [], "last": "Parisotto", "suffix": "" }, { "first": "Tom", "middle": [], "last": "Erez", "suffix": "" }, { "first": "Yutian", "middle": [], "last": "Chen", "suffix": "" } ], "year": null, "venue": "Computing Research Repository", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2110.10819" ] }, "num": null, "urls": [], "raw_text": "Pedro A. Ortega, Markus Kunesch, Gr\u00e9goire Del\u00e9tang, Tim Genewein, Jordi Grau-Moya, Joel Veness, Jonas Buchli, Jonas Degrave, Bilal Piot, Julien Perolat, Tom Everitt, Corentin Tallec, Emilio Parisotto, Tom Erez, Yutian Chen, Scott Reed, Marcus Hutter, Nando de Freitas, and Shane Legg. 2021. Shaking the foun- dations: delusions in sequence models for interac- tion and control. Computing Research Repository, arXiv:2110.10819. Version 1.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "The LAMBADA dataset: Word prediction requiring a broad discourse context", "authors": [ { "first": "Denis", "middle": [], "last": "Paperno", "suffix": "" }, { "first": "Germ\u00e1n", "middle": [], "last": "Kruszewski", "suffix": "" }, { "first": "Angeliki", "middle": [], "last": "Lazaridou", "suffix": "" }, { "first": "Ngoc", "middle": [ "Quan" ], "last": "Pham", "suffix": "" }, { "first": "Raffaella", "middle": [], "last": "Bernardi", "suffix": "" }, { "first": "Sandro", "middle": [], "last": "Pezzelle", "suffix": "" }, { "first": "Marco", "middle": [], "last": "Baroni", "suffix": "" }, { "first": "Gemma", "middle": [], "last": "Boleda", "suffix": "" }, { "first": "Raquel", "middle": [], "last": "Fern\u00e1ndez", "suffix": "" } ], "year": 2016, "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", "volume": "1", "issue": "", "pages": "1525--1534", "other_ids": { "DOI": [ "10.18653/v1/P16-1144" ] }, "num": null, "urls": [], "raw_text": "Denis Paperno, Germ\u00e1n Kruszewski, Angeliki Lazari- dou, Ngoc Quan Pham, Raffaella Bernardi, Sandro Pezzelle, Marco Baroni, Gemma Boleda, and Raquel Fern\u00e1ndez. 2016. The LAMBADA dataset: Word prediction requiring a broad discourse context. In Proceedings of the 54th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 1525-1534, Berlin, Germany. Association for Computational Linguistics.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "Overview of question answering for machine reading evaluation", "authors": [ { "first": "Anselmo", "middle": [], "last": "Pe\u00f1as", "suffix": "" }, { "first": "Eduard", "middle": [], "last": "Hovy", "suffix": "" }, { "first": "Pamela", "middle": [], "last": "Forner", "suffix": "" }, { "first": "\u00c1lvaro", "middle": [], "last": "Rodrigo", "suffix": "" }, { "first": "Richard", "middle": [], "last": "Sutcliffe", "suffix": "" }, { "first": "Roser", "middle": [], "last": "Morante", "suffix": "" } ], "year": 2011, "venue": "Information Access Evaluation. Multilinguality, Multimodality, and Visualization", "volume": "", "issue": "", "pages": "303--320", "other_ids": { "DOI": [ "10.1007/978-3-642-40802-1_29" ] }, "num": null, "urls": [], "raw_text": "Anselmo Pe\u00f1as, Eduard Hovy, Pamela Forner, \u00c1lvaro Rodrigo, Richard Sutcliffe, and Roser Morante. 2013. QA4MRE 2011-2013: Overview of question answer- ing for machine reading evaluation. In Information Access Evaluation. Multilinguality, Multimodality, and Visualization, pages 303-320, Berlin, Heidel- berg. Springer Berlin Heidelberg.", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "Improving language understanding by generative pre-training", "authors": [ { "first": "Alec", "middle": [], "last": "Radford", "suffix": "" }, { "first": "Karthik", "middle": [], "last": "Narasimhan", "suffix": "" }, { "first": "Tim", "middle": [], "last": "Salimans", "suffix": "" }, { "first": "Ilya", "middle": [], "last": "Sutskever", "suffix": "" } ], "year": 2018, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language under- standing by generative pre-training. Technical report, OpenAI.", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "Language models are unsupervised multitask learners", "authors": [ { "first": "Alec", "middle": [], "last": "Radford", "suffix": "" }, { "first": "Jeff", "middle": [], "last": "Wu", "suffix": "" }, { "first": "Rewon", "middle": [], "last": "Child", "suffix": "" }, { "first": "David", "middle": [], "last": "Luan", "suffix": "" }, { "first": "Dario", "middle": [], "last": "Amodei", "suffix": "" }, { "first": "Ilya", "middle": [], "last": "Sutskever", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Alec Radford, Jeff Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. Techni- cal report, OpenAI.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "Version 2", "authors": [ { "first": "Jack", "middle": [ "W" ], "last": "Rae", "suffix": "" }, { "first": "Sebastian", "middle": [], "last": "Borgeaud", "suffix": "" }, { "first": "Trevor", "middle": [], "last": "Cai", "suffix": "" }, { "first": "Katie", "middle": [], "last": "Millican", "suffix": "" }, { "first": "Jordan", "middle": [], "last": "Hoffmann", "suffix": "" }, { "first": "H", "middle": [ "Francis" ], "last": "Song", "suffix": "" }, { "first": "John", "middle": [], "last": "Aslanides", "suffix": "" }, { "first": "Sarah", "middle": [], "last": "Henderson", "suffix": "" }, { "first": "Roman", "middle": [], "last": "Ring", "suffix": "" }, { "first": "Susannah", "middle": [], "last": "Young", "suffix": "" }, { "first": "Eliza", "middle": [], "last": "Rutherford", "suffix": "" }, { "first": "Tom", "middle": [], "last": "Hennigan", "suffix": "" }, { "first": "Jacob", "middle": [], "last": "Menick", "suffix": "" }, { "first": "Albin", "middle": [], "last": "Cassirer", "suffix": "" }, { "first": "Richard", "middle": [], "last": "Powell", "suffix": "" }, { "first": "George", "middle": [], "last": "Van Den Driessche", "suffix": "" }, { "first": "Lisa", "middle": [ "Anne" ], "last": "Hendricks", "suffix": "" }, { "first": "Maribeth", "middle": [], "last": "Rauh", "suffix": "" }, { "first": "Po-Sen", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Amelia", "middle": [], "last": "Glaese", "suffix": "" }, { "first": "Johannes", "middle": [], "last": "Welbl", "suffix": "" }, { "first": "Sumanth", "middle": [], "last": "Dathathri", "suffix": "" }, { "first": "Saffron", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Jonathan", "middle": [], "last": "Uesato", "suffix": "" }, { "first": "John", "middle": [], "last": "Mellor", "suffix": "" }, { "first": "Irina", "middle": [], "last": "Higgins", "suffix": "" }, { "first": "Antonia", "middle": [], "last": "Creswell", "suffix": "" }, { "first": "Nat", "middle": [], "last": "Mcaleese", "suffix": "" }, { "first": "Amy", "middle": [], "last": "Wu", "suffix": "" }, { "first": "Erich", "middle": [], "last": "Elsen", "suffix": "" }, { "first": "M", "middle": [], "last": "Siddhant", "suffix": "" }, { "first": "Elena", "middle": [], "last": "Jayakumar", "suffix": "" }, { "first": "David", "middle": [], "last": "Buchatskaya", "suffix": "" }, { "first": "Esme", "middle": [], "last": "Budden", "suffix": "" }, { "first": "Karen", "middle": [], "last": "Sutherland", "suffix": "" }, { "first": "Michela", "middle": [], "last": "Simonyan", "suffix": "" }, { "first": "Laurent", "middle": [], "last": "Paganini", "suffix": "" }, { "first": "Lena", "middle": [], "last": "Sifre", "suffix": "" }, { "first": "", "middle": [], "last": "Martens", "suffix": "" }, { "first": "Lorraine", "middle": [], "last": "Xiang", "suffix": "" }, { "first": "Adhiguna", "middle": [], "last": "Li", "suffix": "" }, { "first": "Aida", "middle": [], "last": "Kuncoro", "suffix": "" }, { "first": "Elena", "middle": [], "last": "Nematzadeh", "suffix": "" }, { "first": "Domenic", "middle": [], "last": "Gribovskaya", "suffix": "" }, { "first": "", "middle": [], "last": "Donato", "suffix": "" } ], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2112.11446" ] }, "num": null, "urls": [], "raw_text": "Jack W. Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, H. Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susan- nah Young, Eliza Rutherford, Tom Hennigan, Ja- cob Menick, Albin Cassirer, Richard Powell, George van den Driessche, Lisa Anne Hendricks, Mari- beth Rauh, Po-Sen Huang, Amelia Glaese, Jo- hannes Welbl, Sumanth Dathathri, Saffron Huang, Jonathan Uesato, John Mellor, Irina Higgins, Antonia Creswell, Nat McAleese, Amy Wu, Erich Elsen, Sid- dhant M. Jayakumar, Elena Buchatskaya, David Bud- den, Esme Sutherland, Karen Simonyan, Michela Pa- ganini, Laurent Sifre, Lena Martens, Xiang Lorraine Li, Adhiguna Kuncoro, Aida Nematzadeh, Elena Gribovskaya, Domenic Donato, Angeliki Lazaridou, Arthur Mensch, Jean-Baptiste Lespiau, Maria Tsim- poukelli, Nikolai Grigorev, Doug Fritz, Thibault Sot- tiaux, Mantas Pajarskas, Toby Pohlen, Zhitao Gong, Daniel Toyama, Cyprien de Masson d'Autume, Yujia Li, Tayfun Terzi, Vladimir Mikulik, Igor Babuschkin, Aidan Clark, Diego de Las Casas, Aurelia Guy, Chris Jones, James Bradbury, Matthew Johnson, Blake A. Hechtman, Laura Weidinger, Iason Gabriel, William S. Isaac, Edward Lockhart, Simon Osindero, Laura Rimell, Chris Dyer, Oriol Vinyals, Kareem Ayoub, Jeff Stanway, Lorrayne Bennett, Demis Hass- abis, Koray Kavukcuoglu, and Geoffrey Irving. 2022. Scaling language models: Methods, analysis & in- sights from training Gopher. Computing Research Repository, arXiv:2112.11446. Version 2.", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "Compressive transformers for long-range sequence modelling", "authors": [ { "first": "Anna", "middle": [], "last": "Jack W Rae", "suffix": "" }, { "first": "", "middle": [], "last": "Potapenko", "suffix": "" }, { "first": "M", "middle": [], "last": "Siddhant", "suffix": "" }, { "first": "Chloe", "middle": [], "last": "Jayakumar", "suffix": "" }, { "first": "Timothy", "middle": [ "P" ], "last": "Hillier", "suffix": "" }, { "first": "", "middle": [], "last": "Lillicrap", "suffix": "" } ], "year": 2019, "venue": "Computing Research Repository", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1911.05507" ] }, "num": null, "urls": [], "raw_text": "Jack W Rae, Anna Potapenko, Siddhant M Jayaku- mar, Chloe Hillier, and Timothy P Lillicrap. 2019. Compressive transformers for long-range se- quence modelling. Computing Research Repository, arXiv:1911.05507. Version 1.", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "Exploring the limits of transfer learning with a unified text-to-text transformer", "authors": [ { "first": "Colin", "middle": [], "last": "Raffel", "suffix": "" }, { "first": "Noam", "middle": [], "last": "Shazeer", "suffix": "" }, { "first": "Adam", "middle": [], "last": "Roberts", "suffix": "" }, { "first": "Katherine", "middle": [], "last": "Lee", "suffix": "" }, { "first": "Sharan", "middle": [], "last": "Narang", "suffix": "" }, { "first": "Michael", "middle": [], "last": "Matena", "suffix": "" }, { "first": "Yanqi", "middle": [], "last": "Zhou", "suffix": "" }, { "first": "Wei", "middle": [], "last": "Li", "suffix": "" }, { "first": "Peter J", "middle": [], "last": "Liu", "suffix": "" } ], "year": 2020, "venue": "Journal of Machine Learning Research", "volume": "21", "issue": "", "pages": "1--67", "other_ids": {}, "num": null, "urls": [], "raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text trans- former. Journal of Machine Learning Research, 21:1- 67.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "ZeRO: Memory optimizations toward training trillion parameter models", "authors": [ { "first": "Samyam", "middle": [], "last": "Rajbhandari", "suffix": "" }, { "first": "Jeff", "middle": [], "last": "Rasley", "suffix": "" }, { "first": "Olatunji", "middle": [], "last": "Ruwase", "suffix": "" }, { "first": "Yuxiong", "middle": [], "last": "He", "suffix": "" } ], "year": 2020, "venue": "Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis, SC '20", "volume": "", "issue": "", "pages": "", "other_ids": { "DOI": [ "10.5555/3433701.3433727" ] }, "num": null, "urls": [], "raw_text": "Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, and Yuxiong He. 2020. ZeRO: Memory optimiza- tions toward training trillion parameter models. In Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis, SC '20. IEEE Press.", "links": null }, "BIBREF16": { "ref_id": "b16", "title": "DeepSpeed: System optimizations enable training deep learning models with over 100 billion parameters", "authors": [ { "first": "Jeff", "middle": [], "last": "Rasley", "suffix": "" }, { "first": "Samyam", "middle": [], "last": "Rajbhandari", "suffix": "" }, { "first": "Olatunji", "middle": [], "last": "Ruwase", "suffix": "" }, { "first": "Yuxiong", "middle": [], "last": "He", "suffix": "" } ], "year": 2020, "venue": "Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining", "volume": "", "issue": "", "pages": "3505--3506", "other_ids": { "DOI": [ "10.1145/3394486.3406703" ] }, "num": null, "urls": [], "raw_text": "Jeff Rasley, Samyam Rajbhandari, Olatunji Ruwase, and Yuxiong He. 2020. DeepSpeed: System optimiza- tions enable training deep learning models with over 100 billion parameters. In Proceedings of the 26th ACM SIGKDD International Conference on Knowl- edge Discovery & Data Mining, pages 3505-3506, New York, NY, USA. Association for Computing Machinery.", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "WuDao: Pretrain the world", "authors": [ { "first": "Jie", "middle": [], "last": "Tang", "suffix": "" } ], "year": 2021, "venue": "Keynote address at the European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jie Tang. 2021. WuDao: Pretrain the world. Keynote ad- dress at the European Conference on Machine Learn- ing and Principles and Practice of Knowledge Dis- covery in Databases.", "links": null }, "BIBREF18": { "ref_id": "b18", "title": "HEAD-QA: A healthcare dataset for complex reasoning", "authors": [ { "first": "David", "middle": [], "last": "Vilares", "suffix": "" }, { "first": "Carlos", "middle": [], "last": "G\u00f3mez-Rodr\u00edguez", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "960--966", "other_ids": { "DOI": [ "10.18653/v1/P19-1092" ] }, "num": null, "urls": [], "raw_text": "David Vilares and Carlos G\u00f3mez-Rodr\u00edguez. 2019. HEAD-QA: A healthcare dataset for complex reason- ing. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 960-966, Florence, Italy. Association for Computa- tional Linguistics.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "SuperGLUE: A stickier benchmark for general-purpose language understanding systems", "authors": [ { "first": "Alex", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Yada", "middle": [], "last": "Pruksachatkun", "suffix": "" }, { "first": "Nikita", "middle": [], "last": "Nangia", "suffix": "" }, { "first": "Amanpreet", "middle": [], "last": "Singh", "suffix": "" }, { "first": "Julian", "middle": [], "last": "Michael", "suffix": "" }, { "first": "Felix", "middle": [], "last": "Hill", "suffix": "" }, { "first": "Omer", "middle": [], "last": "Levy", "suffix": "" }, { "first": "Samuel", "middle": [], "last": "Bowman", "suffix": "" } ], "year": 2019, "venue": "Advances in Neural Information Processing Systems", "volume": "32", "issue": "", "pages": "3266--3280", "other_ids": {}, "num": null, "urls": [], "raw_text": "Alex Wang, Yada Pruksachatkun, Nikita Nangia, Aman- preet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2019. SuperGLUE: A stick- ier benchmark for general-purpose language under- standing systems. Advances in Neural Information Processing Systems, 32:3266-3280.", "links": null }, "BIBREF20": { "ref_id": "b20", "title": "Mesh-Transformer-JAX: Modelparallel implementation of transformer language model with JAX", "authors": [ { "first": "Ben", "middle": [], "last": "Wang", "suffix": "" } ], "year": 2021, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ben Wang. 2021. Mesh-Transformer-JAX: Model- parallel implementation of transformer language model with JAX.", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "GPT-J-6B: A 6 billion parameter autoregressive language model", "authors": [ { "first": "Ben", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Aran", "middle": [], "last": "Komatsuzaki", "suffix": "" } ], "year": 2021, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ben Wang and Aran Komatsuzaki. 2021. GPT-J-6B: A 6 billion parameter autoregressive language model.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "Finetuned language models are zero-shot learners", "authors": [ { "first": "Jason", "middle": [], "last": "Wei", "suffix": "" }, { "first": "Maarten", "middle": [], "last": "Bosma", "suffix": "" }, { "first": "Y", "middle": [], "last": "Vincent", "suffix": "" }, { "first": "Kelvin", "middle": [], "last": "Zhao", "suffix": "" }, { "first": "Adams", "middle": [ "Wei" ], "last": "Guu", "suffix": "" }, { "first": "Brian", "middle": [], "last": "Yu", "suffix": "" }, { "first": "Nan", "middle": [], "last": "Lester", "suffix": "" }, { "first": "", "middle": [], "last": "Du", "suffix": "" }, { "first": "M", "middle": [], "last": "Andrew", "suffix": "" }, { "first": "Quoc V", "middle": [], "last": "Dai", "suffix": "" }, { "first": "", "middle": [], "last": "Le", "suffix": "" } ], "year": 2021, "venue": "Computing Research Repository", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2109.01652" ] }, "num": null, "urls": [], "raw_text": "Jason Wei, Maarten Bosma, Vincent Y Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, An- drew M Dai, and Quoc V Le. 2021. Finetuned lan- guage models are zero-shot learners. Computing Research Repository, arXiv:2109.01652. Version 5.", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "Crowdsourcing multiple choice science questions", "authors": [ { "first": "Johannes", "middle": [], "last": "Welbl", "suffix": "" }, { "first": "Nelson", "middle": [ "F" ], "last": "Liu", "suffix": "" }, { "first": "Matt", "middle": [], "last": "Gardner", "suffix": "" } ], "year": 2017, "venue": "Proceedings of the 3rd Workshop on Noisy Usergenerated Text", "volume": "", "issue": "", "pages": "94--106", "other_ids": { "DOI": [ "10.18653/v1/W17-4413" ] }, "num": null, "urls": [], "raw_text": "Johannes Welbl, Nelson F. Liu, and Matt Gardner. 2017. Crowdsourcing multiple choice science questions. In Proceedings of the 3rd Workshop on Noisy User- generated Text, pages 94-106, Copenhagen, Den- mark. Association for Computational Linguistics.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "Alignment by default. AI Alignment Forum", "authors": [ { "first": "John", "middle": [], "last": "Wentworth", "suffix": "" } ], "year": 2020, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "John Wentworth. 2020. Alignment by default. AI Align- ment Forum.", "links": null }, "BIBREF25": { "ref_id": "b25", "title": "The steep cost of capture", "authors": [ { "first": "Meredith", "middle": [], "last": "Whittaker", "suffix": "" } ], "year": 2021, "venue": "Interactions", "volume": "28", "issue": "6", "pages": "50--55", "other_ids": { "DOI": [ "10.1145/3488666" ] }, "num": null, "urls": [], "raw_text": "Meredith Whittaker. 2021. The steep cost of capture. Interactions, 28(6):50-55.", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "Byt5: Towards a token-free future with pre-trained byte-to-byte models", "authors": [ { "first": "Linting", "middle": [], "last": "Xue", "suffix": "" }, { "first": "Aditya", "middle": [], "last": "Barua", "suffix": "" }, { "first": "Noah", "middle": [], "last": "Constant", "suffix": "" }, { "first": "Rami", "middle": [], "last": "Al-Rfou", "suffix": "" }, { "first": "Sharan", "middle": [], "last": "Narang", "suffix": "" }, { "first": "Mihir", "middle": [], "last": "Kale", "suffix": "" }, { "first": "Adam", "middle": [], "last": "Roberts", "suffix": "" }, { "first": "Colin", "middle": [], "last": "Raffel", "suffix": "" } ], "year": 2021, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Linting Xue, Aditya Barua, Noah Constant, Rami Al- Rfou, Sharan Narang, Mihir Kale, Adam Roberts, and Colin Raffel. 2021. Byt5: Towards a token-free future with pre-trained byte-to-byte models. Comput- ing Research Repository.", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "Aditya Barua, and Colin Raffel. 2020. mT5: A massively multilingual pre-trained text-to-text transformer", "authors": [ { "first": "Linting", "middle": [], "last": "Xue", "suffix": "" }, { "first": "Noah", "middle": [], "last": "Constant", "suffix": "" }, { "first": "Adam", "middle": [], "last": "Roberts", "suffix": "" }, { "first": "Mihir", "middle": [], "last": "Kale", "suffix": "" }, { "first": "Rami", "middle": [], "last": "Al-Rfou", "suffix": "" }, { "first": "Aditya", "middle": [], "last": "Siddhant", "suffix": "" } ], "year": null, "venue": "Computing Research Repository", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2010.11934" ] }, "num": null, "urls": [], "raw_text": "Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, and Colin Raffel. 2020. mT5: A massively multilingual pre-trained text-to-text transformer. Computing Re- search Repository, arXiv:2010.11934. Version 1.", "links": null }, "BIBREF28": { "ref_id": "b28", "title": "HellaSwag: Can a machine really finish your sentence?", "authors": [ { "first": "Rowan", "middle": [], "last": "Zellers", "suffix": "" }, { "first": "Ari", "middle": [], "last": "Holtzman", "suffix": "" }, { "first": "Yonatan", "middle": [], "last": "Bisk", "suffix": "" }, { "first": "Ali", "middle": [], "last": "Farhadi", "suffix": "" }, { "first": "Yejin", "middle": [], "last": "Choi", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "4791--4800", "other_ids": { "DOI": [ "10.18653/v1/P19-1472" ] }, "num": null, "urls": [], "raw_text": "Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. 2019. HellaSwag: Can a ma- chine really finish your sentence? In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 4791-4800, Florence, Italy. Association for Computational Linguistics.", "links": null }, "BIBREF29": { "ref_id": "b29", "title": "Xuefeng Jin, Qun Liu, and Yonghong Tian. 2021. Pangu\u03b1: Large-scale autoregressive pretrained chinese language models with auto-parallel computation. Computing Research Repository", "authors": [ { "first": "Wei", "middle": [], "last": "Zeng", "suffix": "" }, { "first": "Xiaozhe", "middle": [], "last": "Ren", "suffix": "" }, { "first": "Teng", "middle": [], "last": "Su", "suffix": "" }, { "first": "Hui", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Yi", "middle": [], "last": "Liao", "suffix": "" }, { "first": "Zhiwei", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Xin", "middle": [], "last": "Jiang", "suffix": "" }, { "first": "Zhenzhang", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Kaisheng", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Xiaoda", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Chen", "middle": [], "last": "Li", "suffix": "" }, { "first": "Ziyan", "middle": [], "last": "Gong", "suffix": "" }, { "first": "Yifan", "middle": [], "last": "Yao", "suffix": "" }, { "first": "Xinjing", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Jun", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Jianfeng", "middle": [], "last": "Yu", "suffix": "" }, { "first": "Qi", "middle": [], "last": "Guo", "suffix": "" }, { "first": "Yue", "middle": [], "last": "Yu", "suffix": "" }, { "first": "Yan", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Jin", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Hengtao", "middle": [], "last": "Tao", "suffix": "" }, { "first": "Dasen", "middle": [], "last": "Yan", "suffix": "" }, { "first": "Zexuan", "middle": [], "last": "Yi", "suffix": "" }, { "first": "Fang", "middle": [], "last": "Peng", "suffix": "" }, { "first": "Fangqing", "middle": [], "last": "Jiang", "suffix": "" }, { "first": "Han", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Lingfeng", "middle": [], "last": "Deng", "suffix": "" }, { "first": "Yehong", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Zhe", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Chao", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Shaojie", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Mingyue", "middle": [], "last": "Guo", "suffix": "" }, { "first": "Shanzhi", "middle": [], "last": "Gu", "suffix": "" }, { "first": "Gaojun", "middle": [], "last": "Fan", "suffix": "" }, { "first": "Yaowei", "middle": [], "last": "Wang", "suffix": "" } ], "year": null, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2104.12369.Ver-sion1" ] }, "num": null, "urls": [], "raw_text": "Wei Zeng, Xiaozhe Ren, Teng Su, Hui Wang, Yi Liao, Zhiwei Wang, Xin Jiang, ZhenZhang Yang, Kaisheng Wang, Xiaoda Zhang, Chen Li, Ziyan Gong, Yi- fan Yao, Xinjing Huang, Jun Wang, Jianfeng Yu, Qi Guo, Yue Yu, Yan Zhang, Jin Wang, Hengtao Tao, Dasen Yan, Zexuan Yi, Fang Peng, Fangqing Jiang, Han Zhang, Lingfeng Deng, Yehong Zhang, Zhe Lin, Chao Zhang, Shaojie Zhang, Mingyue Guo, Shanzhi Gu, Gaojun Fan, Yaowei Wang, Xuefeng Jin, Qun Liu, and Yonghong Tian. 2021. Pangu- \u03b1: Large-scale autoregressive pretrained chinese lan- guage models with auto-parallel computation. Com- puting Research Repository, arXiv:2104.12369. Ver- sion 1.", "links": null }, "BIBREF30": { "ref_id": "b30", "title": "Florian Tram\u00e8r, and Nicholas Carlini. 2021. Counterfactual memorization in neural language models", "authors": [ { "first": "Chiyuan", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Daphne", "middle": [], "last": "Ippolito", "suffix": "" }, { "first": "Katherine", "middle": [], "last": "Lee", "suffix": "" }, { "first": "Matthew", "middle": [], "last": "Jagielski", "suffix": "" } ], "year": null, "venue": "Computing Research Repository", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2112.12938" ] }, "num": null, "urls": [], "raw_text": "Chiyuan Zhang, Daphne Ippolito, Katherine Lee, Matthew Jagielski, Florian Tram\u00e8r, and Nicholas Car- lini. 2021. Counterfactual memorization in neural language models. Computing Research Repository, arXiv:2112.12938. Version 1.", "links": null }, "BIBREF31": { "ref_id": "b31", "title": "Adapting language models for zero-shot learning by meta-tuning on dataset and prompt collections", "authors": [ { "first": "Ruiqi", "middle": [], "last": "Zhong", "suffix": "" }, { "first": "Kristy", "middle": [], "last": "Lee", "suffix": "" }, { "first": "Zheng", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Dan", "middle": [], "last": "Klein", "suffix": "" } ], "year": 2021, "venue": "Computing Research Repository", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2104.04670" ] }, "num": null, "urls": [], "raw_text": "Ruiqi Zhong, Kristy Lee, Zheng Zhang, and Dan Klein. 2021. Adapting language models for zero-shot learn- ing by meta-tuning on dataset and prompt collections. Computing Research Repository, arXiv:2104.04670. Version 5.", "links": null } }, "ref_entries": { "FIGREF0": { "text": "Training and validation loss for GPT-NeoX-20B. As the validation loss continued to fall into the beginning of the second epoch, we decided to let it train further.", "num": null, "type_str": "figure", "uris": null }, "FIGREF1": { "text": ",ARC (Clark et al., 2018), HeadQA (English)(Vilares and G\u00f3mez-Rodr\u00edguez, 2019), HellaSwag(Zellers et al., 2019), LAMBDADA(Paperno et al., 2016), LogiQA (Liu et al., 2020, OpenBookQA (Mihaylov et al., 2018), PiQA (Bisk et al., 2020), PROST (Aroca-Ouellette et al., 2021), QA4MRE(Pe\u00f1as et al., 2013) (2013), SciQ(Welbl et al., 2017), TriviaQA (Joshi et al., 2017,Winogrande (Sakaguchi et al., 2021), and the SuperGlue version of the Winograd Schemas Challenge (WSC)(Wang et al., 2019).", "num": null, "type_str": "figure", "uris": null }, "FIGREF2": { "text": "Zero-shot performance of GPT-NeoX-20B compared to GPT-J-6B and FairSeq and OpenAI models on a variety of language modeling benchmarks.", "num": null, "type_str": "figure", "uris": null }, "FIGREF3": { "text": "Figure 4: Zero-shot performance of GPT-NeoX-20B compared to and FairSeq and OpenAI models on arithmetic tasks. Random performance on these tasks is 0%, and we were unable to find information on median human performance.", "num": null, "type_str": "figure", "uris": null }, "FIGREF4": { "text": "Five-shot performance of GPT-NeoX-20B compared to GPT-J-6B and FairSeq and OpenAI models on Hendrycks et al. (2021a).", "num": null, "type_str": "figure", "uris": null }, "FIGREF5": { "text": "Architecture diagram of a single training node.", "num": null, "type_str": "figure", "uris": null }, "FIGREF6": { "text": "Zero-shot performance of GPT-NeoX-20B compared to GPT-J-6B and FairSeq and OpenAI models on a variety of language modeling benchmarks.", "num": null, "type_str": "figure", "uris": null }, "FIGREF7": { "text": "Length-normalized zero-shot performance of GPT-NeoX-20B compared to GPT-J-6B and FairSeq and OpenAI models on a variety of language modeling benchmarks.", "num": null, "type_str": "figure", "uris": null }, "FIGREF8": { "text": "Five-Shot Results on Natural Language Understanding Tasks (GPT-J and GPT-NeoX). GPT-3 is omitted due to financial limitations.", "num": null, "type_str": "figure", "uris": null }, "FIGREF9": { "text": "Zero-shot performance of GPT-NeoX-20B compared to GPT-J-6B and FairSeq and OpenAI models on Hendrycks et al. (2021a).", "num": null, "type_str": "figure", "uris": null }, "FIGREF10": { "text": "Five-shot performance of GPT-NeoX-20B compared to GPT-J-6B and FairSeq and OpenAI models onHendrycks et al. (2021a)", "num": null, "type_str": "figure", "uris": null }, "TABREF1": { "type_str": "table", "text": "Kim, HyoungSeok Kim, Sang-Woo Lee, Gichang Lee, Donghyun Kwak, Jeon Dong Hyeon, Sunghyun Park, Sungju Kim, Seonhoon Kim, Dongpil Seo, Heungsub Lee, Minyoung Jeong, Sungjae Lee, Minsub Kim, Suk Hyun Ko, Seokhun Kim, Taeyong Park, Jinuk Kim, Soyoung Kang, Na-Hyeon Ryu, Kang Min Yoo, Minsuk Chang, Soobin Suh, Black was the lead developer and overall point person for the project. Stella Biderman was the lead scientist and project manager.InTable 1we attach the full configuration details used to train GPT-NeoX-20B. The file is available in .yaml format usable in gpt-neox at https:// github.com/EleutherAI/gpt-neox, where we also provide documentation describing the role of each parameter.", "html": null, "num": null, "content": "
Yasaman Razeghi, Robert L Logan IV, Matt Gardner, A Individual Contributions Boseop Sookyo In, Jinseong Park, Kyungduk Kim, Hiun Kim, Jisu Jeong, Yong Goo Yeo, Donghoon Ham, Dongju Park, Min Young Lee, Jaewook Kang, Inho Kang, Jung-Woo Ha, Woomyoung Park, and Nako Sung. 2021. What changes can large-scale language models bring? intensive study on HyperCLOVA: Billions-scale Korean generative pretrained trans-formers. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Process-ing, pages 3405-3424, Online and Punta Cana, Do-minican Republic. Association for Computational Linguistics. Philipp Koehn. 2005. Europarl: A parallel corpus for statistical machine translation. In Proceedings of Machine Translation Summit X: Papers, pages 79-86, Phuket, Thailand. Aran Komatsuzaki. 2019. One epoch is all you need. Computing Research Repository, arXiv:1906.06669. Version 1. Vanessa Kosoy. 2016. IRL is hard. AI Alignment Fo-rum. Julia Kreutzer, Isaac Caswell, Lisa Wang, Ahsan Wahab, Daan van Esch, Nasanbayar Ulzii-Orshikh, Allah-sera Tapo, Nishant Subramani, Artem Sokolov, Clay-tone Sikasote, Monang Setyawan, Supheakmungkol Sarin, Sokhar Samb, Beno\u00eet Sagot, Clara Rivera, An-nette Rios, Isabel Papadimitriou, Salomey Osei, Pe-dro Ortiz Suarez, Iroro Orife, Kelechi Ogueji, An-dre Niyongabo Rubungo, Toan Q. Nguyen, Math-ias M\u00fcller, Andr\u00e9 M\u00fcller, Shamsuddeen Hassan Muhammad, Nanda Muhammad, Ayanda Mnyak-eni, Jamshidbek Mirzakhalov, Tapiwanashe Matan-gira, Colin Leong, Nze Lawson, Sneha Kudugunta, Yacine Jernite, Mathias Jenny, Orhan Firat, Bonaven-ture F. P. Dossou, Sakhile Dlamini, Nisansa de Silva, Sakine \u00c7abuk Ball\u0131, Stella Biderman, Alessia Bat-tisti, Ahmed Baruwa, Ankur Bapna, Pallavi Baljekar, Implementation and Engineering Implementation of training infrastructure: Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Samuel Weinbach Scaling experiments and optimization: Sid Black, Stella Biderman, Quentin Anthony, Samuel Weinbach Positional Embeddings: Sid Black, Eric Hallahan, Michael Pieler Tokenizer: Sid Black Miscellaneous: USVSN Sai Prashanth, Ben Wang Scientific Experimentation Evaluations: Stella Biderman, Leo Gao, Jonathan Tow, Sid Black, Shivanshu Purohit, Horace He, Laurence Golding Positional Embeddings: Stella Biderman, Laurence Golding, Michael Pieler Tokenizer: Stella Biderman, Jason Phang, Leo Gao Broader Impacts Alignment Implications: Leo Gao, Connor Leahy, Laria Reynolds, Kyle McDonell Environmental Impact: Stella Biderman, Eric Hallahan B Full Configuration DetailsMandar Joshi, Eunsol Choi, Daniel Weld, and Luke Zettlemoyer. 2017. TriviaQA: A large scale distantly supervised challenge dataset for reading comprehen-sion. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol-ume 1: Long Papers), pages 1601-1611, Vancouver, Canada. Association for Computational Linguistics. Nikhil Kandpal, Eric Wallace, and Colin Raffel. 2022. Deduplicating training data mitigates privacy risks in language models. Computing Research Repository, arXiv:2202.06539. Version 2. Alexandre Lacoste, Alexandra Luccioni, Victor Schmidt, and Thomas Dandres. 2019. Quantifying the carbon emissions of machine learning. Comput-ing Research Repository, arXiv:1910.09700. Version 2. Connor Leahy. 2021. Why Release a Large Language Model? EleutherAI Blog. Connor Leahy and Stella Biderman. 2021. The hard problem of aligning AI to human values. In The State of AI Ethics Report, volume 4, pages 180-183. The Montreal AI Ethics Institute. Katherine Lee, Daphne Ippolito, Andrew Nystrom, Chiyuan Zhang, Douglas Eck, Chris Callison-Burch, and Nicholas Carlini. 2021. Deduplicating training data makes language models better. Computing Re-search Repository, arXiv:2107.06499. Version 1. Opher Lieber, Or Sharir, Barak Lenz, and Yoav Shoham. 2021. Jurassic-1: Technical details and evaluation. Technical report, AI21 Labs. Stephanie Lin, Jacob Hilton, and Owain Evans. 2021. TruthfulQA: Measuring how models mimic hu-man falsehoods. Computing Research Repository, arXiv:2109.07958. Version 1. Pierre Lison and J\u00f6rg Tiedemann. 2016. OpenSub-titles2016: Extracting large parallel corpora from movie and TV subtitles. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 923-929, Portoro\u017e, Slovenia. European Language Resources Association (ELRA). Jian Liu, Leyang Cui, Hanmeng Liu, Dandan Huang, Yile Wang, and Yue Zhang. 2020. LogiQA: A chal-lenge dataset for machine reading comprehension with logical reasoning. In Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI-20, pages 3622-3628. Interna-tional Joint Conferences on Artificial Intelligence Organization. Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. Computing Research Repository, arXiv:1711.05101. Version 3. J. Nathan Matias. 2020. Why we need industry-independent research on tech & society. Citizens and Technology Lab. search Repository, arXiv:2005.00661. Version 1. ality in abstractive summarization. Computing Re-Ryan McDonald. 2020. On faithfulness and factu-Joshua Maynez, Shashi Narayan, Bernd Bohnet, and Configuration Key Value attention-dropout 0 bias-gelu-fusion True checkpoint-activations True checkpoint-num-layers 1 data-impl mmap distributed-backend nccl eval-interval 1000 eval-iters 10 fp16.enabled True fp16.fp16 True fp16.hysteresis 2 fp16.initial-scale-power 12 fp16.loss-scale 0 fp16.loss-scale-window 1000 fp16.min-loss-scale 1 gpt-j-residual True gradient-accumulation-steps 32 gradient-clipping 1.0 hidden-dropout 0 hidden-size 6144 init-method small-init log-interval 2 lr-decay-iters 150000 lr-decay-style cosine max-position-embeddings 2048 min-lr 9.7e-06 model-parallel-size 2 no-weight-tying True norm layernorm num-attention-heads 64 num-layers 44 optimizer.params.betas [0.9, 0.95] optimizer.params.eps 1e-08 optimizer.params.lr 9.7e-05 optimizer.type Adam output-layer-init-method wang-init output-layer-parallelism column partition-activations False pipe-parallel-size 4 pos-emb rotary rotary-pct 0.25 save-interval 500 scaled-upper-triang-masked-softmax-fusion True seq-length 2048 split 995,4,1 steps-per-print 2 synchronize-each-layer True tokenizer-type HFTokenizer train-iters 150000 train-micro-batch-size-per-gpu 4 vocab-file 20B-tokenizer.json wall-clock-breakdown False warmup 0.01 weight-decay 0.01 zero-optimization.allgather-bucket-size 1260000000 zero-optimization.allgather-partitions True zero-optimization.contiguous-gradients True zero-optimization.cpu-offload False zero-optimization.overlap-comm True zero-optimization.reduce-bucket-size 1260000000 zero-optimization.reduce-scatter True zero-optimization.stage 1
" }, "TABREF2": { "type_str": "table", "text": "The full configuration details for GPT-NeoX-20B training", "html": null, "num": null, "content": "" }, "TABREF6": { "type_str": "table", "text": "\u00b1 0.012 0.233 \u00b1 0.012 0.263 \u00b1 0.013 0.296 \u00b1 0.013 0.329 \u00b1 0.014 0.345 \u00b1 0.014 OpenBookQA 0.168 \u00b1 0.017 0.190 \u00b1 0.018 0.238 \u00b1 0.019 0.254 \u00b1 0.019 0.292 \u00b1 0.020 0.296 \u00b1 0.020 HeadQA (English) 0.233 \u00b1 0.008 0.233 \u00b1 0.008 0.256 \u00b1 0.008 0.264 \u00b1 0.008 0.280 \u00b1 0.009 0.280 \u00b1 0.009 LogiQA 0.220 \u00b1 0.016 0.230 \u00b1 0.017 0.214 \u00b1 0.016 0.212 \u00b1 0.016 0.232 \u00b1 0.017 0.240 \u00b1 0.017 PROST 0.215 \u00b1 0.003 0.257 \u00b1 0.003 0.257 \u00b1 0.003 0.230 \u00b1 0.003 0.272 \u00b1 0.003 0.252 \u00b1 0.003 QA4MRE (2013) 0.285 \u00b1 0.027 0.335 \u00b1 0.028 0.327 \u00b1 0.028 0.380 \u00b1 0.029 0.370 \u00b1 0.029 0.380 \u00b1 0.029", "html": null, "num": null, "content": "
-3)
" }, "TABREF7": { "type_str": "table", "text": "", "html": null, "num": null, "content": "
GPT-JGPT-NeoXGPT-3
Task6B20BAda Babbage Curie DaVinci
ANLI Round 1 ANLI Round 2 ANLI Round 3 LAMBADA WSC0.322 \u00b1 0.015 0.312 \u00b1 0.015 -0.331 \u00b1 0.015 0.329 \u00b1 0.015 -0.346 \u00b1 0.014 0.342 \u00b1 0.014 -0.662 \u00b1 0.007 0.698 \u00b1 0.006 -0.365 \u00b1 0.047 0.385 \u00b1 0.------------
" }, "TABREF10": { "type_str": "table", "text": "Number of tokens from tokenizing the Pile validation set, excluding whitespace tokens.", "html": null, "num": null, "content": "" } } } }