title_list = selector.css('#detail-list-select li a:nth-child(2)::text').getall()
url_list = selector.css('#detail-list-select li a:nth-child(2)::attr(href)').getall()
if not os.path.exists('./妖神记/'):
    os.makedirs('./妖神记/')
for title, url in zip(title_list, url_list):
print(f'--------------------------正在爬取{title}-------------------------')
index = 1
target_url = f"https://www.kuimh.com{url}"
resp = requests.get(target_url)
selector = parsel.Selector(resp.text)
sub_url_list = selector.css('.comicpage div img::attr(src)').getall()[:3]
sub_list = selector.css('.comicpage div img::attr(data-echo)').getall()
for i in sub_list:
    sub_url_list.append(i)
title = re.sub(r'[\/:*?"<>|.]', "", title)
    if not os.path.exists('./妖神记/' + title):
        os.makedirs('./妖神记/' + title)
    for url_ in sub_url_list:
        image = requests.get(url_).content
        with open('./妖神记/' + title + '/'+''+str(index)+'页.jpg', mode='wb') as f:
            f.write(image)
        print('', str(index), '页,爬取成功')
        index += 1
    print(title, '爬取成功!!!')