import os
import sys
import copy
import time
import json
import zipfile
import requests
import dicttoxml
from io import BytesIO
from datetime import date
from PIL import Image, ImageFont, ImageDraw

BASE = 'https://www.akuankka.fi'
TYPE = 't'
WIDTH = 79
try:
	WIDTH = os.get_terminal_size()[0]-1
except: pass

# THIS IS THE COOKIE USED TO LOGIN AND RECIEVE THE FILES, login to akuankka.fi and open developer tools to get this
COOKIES = {'smf': 'eiodqugn646kmj9odncjh8brom'}
HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:103.0) Gecko/20100101 Firefox/103.0'}
STRUCTURE = {
	'xmlns': 'http://www.acbf.info/xml/acbf/1.1',
	'meta-data': {
		'book-info': {
			'author': [{
				'@attrs': {
					'activity': ''
				},
				'nickname': ''
			}],
			'book-title': '',
			'characters': {
				'name': []
			},
			'coverpage': {
				'image': {
					'@attrs': {
						'href': ''
					}
				},
				'frame': [{
					'@attrs': {
						'points': ''
					}
				}]
			},
			'sequence': {
				'@attrs': {
					'title': '',
					'volume': ''
				},
				'': ''
			},
			'databaseref': [{
				'@attrs': {
					'dbname': '',
					'type': ''
				},
				'': ''
			}]
		},
		'publish-info': {
			'publisher': '',
			'publish-date': {
				'@attrs': {
					'value': ''
				},
				'': ''
			}
		},
		'document-info': {
			'creation-date': {
				'@attrs': {
					'value': ''
				},
				'': ''
			},
			'source': {
				'p': ''
			},
			'id': '',
			'version': ''
		}
	},
	'body': {
		'@attrs': {
			'bgcolor': '#FFFFFF'
		},
		'page': [{
			'@attrs': {
				'transition': 'scroll_right'
			},
			'title': '',
			'image': {
				'@attrs': {
					'href': ''
				}
			},
			'frame': [{
				'@attrs': {
					'points': ''
				}
			}]
		}]
	}
}


def remove_a_key(d, remove_key):
	if isinstance(d, dict):
		for key in list(d.keys()):
			if key == remove_key: del d[key]
			else: remove_a_key(d[key], remove_key)
	elif isinstance(d, list):
		for el in d:
			remove_a_key(el, remove_key)

def repm(main, old, new):
	for x in old:
		main = main.replace(x, new)
	return main

def points(d, size):
	w, h = size
	vals = []
	for v in list(d.values())[-4:]:
		vals.append(
			str(round(w/float(v) if float(v) else w)))
	return ' '.join(vals)

def dlimg(url):
	r = requests.get(url, headers=HEADERS, cookies=COOKIES, stream=True)
	if r.status_code == 200:
		return Image.open(r.raw)
	else:
		raise Exception

def im(bg, default=None, txt=None):
	bg = dlimg(BASE+bg)
	if default:
		default = dlimg(default).resize(bg.size).crop((430, 0, 671, 22))
		bg.paste(default, (430, 0))
		default.close()
	else:
		draw = ImageDraw.Draw(bg)
		draw.rectangle([430, 0, 671, 22], bg.getpixel((672, 23)))
	if txt:
		txt = dlimg(BASE+txt).resize(bg.size).convert('RGBA')
		bg.paste(txt, (0, 0), txt)
		txt.close()
	return bg

def dlimages(data, issue):
	storiesid = []
	stories = []
	for story in data['stories']:
		if story['id'] not in storiesid:
			storiesid.append(story['id'])
			stories.append(story)
	data['stories'] = stories
	year = data['publication_date'][:4]
	if TYPE == 't':
		try:
			name = repm(f'{data["display_name"]} - {data["title"]}', '<>:"/\\|?*', '')
		except KeyError:
			name = repm(data["title"], '<>:"/\\|?*', '')
		#name = repm(f'{data["display_name"]} - {data["title"]}', '<>:"/\\|?*', '')
	else:
		# year = data['display_name'][-4:]
		if not os.path.isdir(year):
			os.makedirs(year)
		# name = year + '/' + data['text'].replace('/', '-')
		# name2 = data['text'].replace('/', '-')
		name = os.path.join(year, repm(
			' '.join(data['text'].split(' ')[:-1]) + f' - Year {year} {data["title"]}',
			'<>:"/\\|?*',
			''
		))
	count = 1
	sizes = []
	pc = data['page_count']
	if os.path.isfile(name+'.cbz'):
		namec = 98
		while os.path.isfile(name+'-'+chr(namec)+'.cbz'):
			namec += 1
		name = name + '-' + chr(namec)
		#name2 = name2 + '-' + chr(namec)
	z = zipfile.ZipFile(name+'.cbz', 'w')
	try:
		print('Downloading issue {}: {} - {}'.format(issue, data['display_name'], data['title']))
	except KeyError:
		print('Downloading issue {}: {}'.format(issue, data['title']))
	print('\r[  0.0%] {}0/{}'.format(' '*(len(str(pc))-1), pc), end='')
	for story in data['stories']:
		for i, page in enumerate(story['pages']):
			try:
				if i == 0:
					img = im(page['images']['default']['url'], story['images']['default']['url'])
				else:
					img = im(page['images']['default']['url'])
			except KeyError:
				print('KeyError occured during download of page', count)
				img = Image.open('not-found.jpg')
			except Exception as e:
				img = Image.open('not-found.jpg')
				with open(name+'.log', 'a') as f:
					f.write(str(e))
				time.sleep(1)
			sizes.append(img.size)
			out = BytesIO()
			img.save(out, format='JPEG', quality=75)
			z.writestr(str(count)+'.jpg', out.getvalue())
			print('\r'+' '*WIDTH, end='')
			print('\r[{}{}%]{} {}/{}'.format(
				' '*(5-len(str(round(100*count/pc, 1)))),
				round(100*count/pc, 1),
				' '*(len(str(pc))-len(str(count))),
				count,
				pc
			), end='')
			out.close()
			img.close()
			count += 1

	print('\n')
	# return buildmetadata(data, name2, z, sizes)
	return buildmetadata(data, z, sizes)

def buildmetadata(data, z, sizes):
	s = copy.deepcopy(STRUCTURE)
	bi = s['meta-data']['book-info']
	pi = s['meta-data']['publish-info']
	di = s['meta-data']['document-info']

	author = bi['author'] = []
	for x in data['stories']:
		if 'artists' in x:
			for y in x['artists']:
				if str(author).find(y['name']) == -1:
					d = copy.deepcopy(STRUCTURE['meta-data']['book-info']['author'][0])
					d['@attrs']['activity'] = 'artist'
					d['nickname'] = y['name']
					author.append(d)
		if 'writers' in x:
			for y in x['writers']:
				if str(author).find(y['name']) == -1:
					d = copy.deepcopy(STRUCTURE['meta-data']['book-info']['author'][0])
					d['@attrs']['activity'] = 'writer'
					d['nickname'] = y['name']
					author.append(d)

		if x['characters']:
			for y in x['characters']:
				if str(bi['characters']['name']).find(y['name']) == -1:
					bi['characters']['name'].append(y['name'])

	bi['book-title'] = data['title'] if TYPE == 't' else data['text']
	bi['coverpage']['image']['@attrs']['href'] = '1.jpg'

	coverpoints = bi['coverpage']['frame'] = []
	for x in data['stories'][0]['pages'][0]['panels']:
		d = copy.deepcopy(STRUCTURE['meta-data']['book-info']['coverpage']['frame'][0])
		d['@attrs']['points'] = points(x, sizes[0])
		coverpoints.append(d)

	bi['sequence']['@attrs']['title'] = ' '.join(data['text'][:-1])
	if TYPE == 't':
		#bi['sequence']['@attrs']['title'] = 'Aku Ankan Taskukirja'
		try:
			bi['sequence']['@attrs']['volume'] = bi['sequence'][''] = int(data['display_name'])
		except KeyError: pass
	else:
		#bi['sequence']['@attrs']['title'] = 'Aku Ankka'
		try:
			bi['sequence']['@attrs']['volume'] = bi['sequence'][''] = data['display_name']
		except KeyError: pass
	src = 'https://www.akuankka.fi/lehti/'+str(data['id'])

	dbref = bi['databaseref'] = []
	for x in range(2):
		d = copy.deepcopy(STRUCTURE['meta-data']['book-info']['databaseref'][0])
		if not x:
			d['@attrs']['dbname'] = 'akuankka.fi'
			d['@attrs']['type'] = 'URL'
			d[''] = src
		dbref.append(d)
	for x in data['stories']:
		if str(dbref).find(x['story_code']) == -1:
			d = copy.deepcopy(STRUCTURE['meta-data']['book-info']['databaseref'][0])
			d['@attrs']['dbname'] = 'INDUCKS'
			d['@attrs']['type'] = 'code'
			d[''] = x['story_code']
			dbref.append(d)

	pi['publisher'] = 'Sanoma'
	pi['publish-date']['@attrs']['value'] = data['publication_date']
	pi['publish-date'][''] = data['publication_date'][:4]

	today = date.today()
	di['creation-date']['@attrs']['value'] = today.strftime('%Y-%m-%d')
	di['creation-date'][''] = today.strftime('%B %d, %Y')
	di['source']['p'] = src
	di['id'] = data['id']
	di['version'] = 1

	count = 1
	page = s['body']['page'] = []
	for x in data['stories']:
		for y in x['pages']:
			d = copy.deepcopy(STRUCTURE['body']['page'][0])
			d['frame'] = []
			d['title'] = x['title']
			d['image']['@attrs']['href'] = str(count) + '.jpg'
			for p in y['panels']:
				d2 = copy.deepcopy(STRUCTURE['body']['page'][0]['frame'][0])
				d2['@attrs']['points'] = points(p, sizes[count-1])
				d['frame'].append(d2)
			page.append(d)
			count += 1

	metadata = dicttoxml.dicttoxml(s, attr_type=False, fold_list=False)
	metadata = metadata.decode('utf-8').replace(
		'<root><xmlns>http://www.acbf.info/xml/acbf/1.1</xmlns>',
		'<acbf xmlns="http://www.acbf.info/xml/acbf/1.1">'
	).replace(
		'</root>',
		'</acbf>'
	)
	z.writestr('metadata.acbf', metadata)
	remove_a_key(data, 'url')
	z.writestr('metadata.json', json.dumps(data, ensure_ascii=False, indent=4))
	z.close()

	if 'next' in data:
		i = str(data['next']['id'])
		r = requests.get(BASE+'/api/v2/issues/'+i, headers=HEADERS, params={'stories-full': 1}, cookies=COOKIES)
		return (r, i)
	else:
		return False


if __name__ == '__main__':
	issue = ''
	for i, arg in enumerate(sys.argv):
		if arg in ['-h', '--help']:
			print('-h, --help				Show this help message')
			print('-t, --type	t|l		 Issue type, t=Taskukirja, l=Lehti, defaults to \'t\'')
			print('-i, --issue   issue #	 Issue number (from url, not same as book number)')
			print('							   defaults to 3667 for \'t\' and 25 for \'l\'\n')
			print('To start downloading Taskukirja starting at issue 3667 (=#1):')
			print('  akuankka.py -t t -i 3667')
			print('Or simply following because of defaults:')
			print('  akuankka.py')
			sys.exit()
		if arg in ['-t', '--type']:
			TYPE = sys.argv[i+1]
		if arg in ['-i', '--issue']:
			issue = sys.argv[i+1]
	if not issue:
		issue = '3667' if TYPE == 't' else '25'

	r = requests.get(BASE+'/api/v2/issues/'+issue, headers=HEADERS, params={'stories-full': 1}, cookies=COOKIES)
	while True:
		try:
			r, issue = dlimages(r.json(), issue)
		except TypeError:
			break
