Commit c074c0295d513ef2af5db5cd9858fe99c9d5048f
Exists in
rhitier-dev
and in
1 other branch
Merge branch 'ToPython3' into dev
Showing
2 changed files
with
66 additions
and
56 deletions
Show diff stats
1 | 1 | # coding=utf-8 |
2 | 2 | |
3 | -import StringIO | |
3 | +from io import StringIO | |
4 | 4 | import datetime |
5 | 5 | import gzip |
6 | 6 | import json |
... | ... | @@ -8,7 +8,7 @@ import logging |
8 | 8 | import random |
9 | 9 | import tarfile |
10 | 10 | import time |
11 | -import urllib | |
11 | +import urllib.request as urllib_request | |
12 | 12 | import requests |
13 | 13 | import re # regex |
14 | 14 | import numpy |
... | ... | @@ -25,8 +25,13 @@ from dateutil import parser as dateparser |
25 | 25 | from flask import Flask |
26 | 26 | from flask import request |
27 | 27 | from flask import url_for, send_from_directory, abort as abort_flask |
28 | -from jinja2 import Environment, FileSystemLoader, Markup | |
29 | -from yaml import load as yaml_load | |
28 | +from jinja2 import Environment, FileSystemLoader | |
29 | +from jinja2.utils import markupsafe | |
30 | +from yaml import load as yaml_load, dump | |
31 | +try: | |
32 | + from yaml import CLoader as Loader | |
33 | +except ImportError: | |
34 | + from yaml import Loader | |
30 | 35 | from netCDF4 import Dataset, date2num |
31 | 36 | |
32 | 37 | |
... | ... | @@ -48,7 +53,7 @@ with open(get_path('../VERSION'), 'r') as version_file: |
48 | 53 | |
49 | 54 | # CONFIG |
50 | 55 | with open(get_path('../config.yml'), 'r') as config_file: |
51 | - config = yaml_load(config_file.read()) | |
56 | + config = yaml_load(config_file.read(), Loader=Loader) | |
52 | 57 | |
53 | 58 | FILE_DATE_FMT = "%Y-%m-%dT%H:%M:%S" |
54 | 59 | MOMENT_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" |
... | ... | @@ -148,13 +153,11 @@ PARAMETERS = { |
148 | 153 | }, |
149 | 154 | } |
150 | 155 | |
151 | - | |
152 | 156 | # SETUP ENVIRONMENT ########################################################### |
153 | 157 | |
154 | 158 | environ['SPACEPY'] = CACHE_DIR |
155 | 159 | environ['CDF_LIB'] = CDF_LIB |
156 | 160 | |
157 | - | |
158 | 161 | # SETUP FLASK ENGINE ########################################################## |
159 | 162 | |
160 | 163 | app = Flask(__name__, root_path=THIS_DIRECTORY) |
... | ... | @@ -211,20 +214,20 @@ def markdown_filter(value, nl2br=False, p=True): |
211 | 214 | |
212 | 215 | |
213 | 216 | _js_escapes = { |
214 | - '\\': '\\u005C', | |
215 | - '\'': '\\u0027', | |
216 | - '"': '\\u0022', | |
217 | - '>': '\\u003E', | |
218 | - '<': '\\u003C', | |
219 | - '&': '\\u0026', | |
220 | - '=': '\\u003D', | |
221 | - '-': '\\u002D', | |
222 | - ';': '\\u003B', | |
223 | - u'\u2028': '\\u2028', | |
224 | - u'\u2029': '\\u2029' | |
217 | + '\\': '\\u005C', | |
218 | + '\'': '\\u0027', | |
219 | + '"': '\\u0022', | |
220 | + '>': '\\u003E', | |
221 | + '<': '\\u003C', | |
222 | + '&': '\\u0026', | |
223 | + '=': '\\u003D', | |
224 | + '-': '\\u002D', | |
225 | + ';': '\\u003B', | |
226 | + u'\u2028': '\\u2028', | |
227 | + u'\u2029': '\\u2029' | |
225 | 228 | } |
226 | 229 | # Escape every ASCII character with a value less than 32. |
227 | -_js_escapes.update(('%c' % z, '\\u%04X' % z) for z in xrange(32)) | |
230 | +_js_escapes.update(('%c' % z, '\\u%04X' % z) for z in range(32)) | |
228 | 231 | |
229 | 232 | |
230 | 233 | def escapejs_filter(value): |
... | ... | @@ -235,7 +238,7 @@ def escapejs_filter(value): |
235 | 238 | else: |
236 | 239 | escaped.append(letter) |
237 | 240 | |
238 | - return Markup("".join(escaped)) | |
241 | + return markupsafe.Markup("".join(escaped)) | |
239 | 242 | |
240 | 243 | tpl_engine = Environment(loader=FileSystemLoader([get_path('view')]), |
241 | 244 | trim_blocks=True, |
... | ... | @@ -275,7 +278,7 @@ def render_view(view, context=None): |
275 | 278 | """ |
276 | 279 | context = {} if context is None else context |
277 | 280 | return tpl_engine.get_template(view).render( |
278 | - dict(tpl_global_vars.items() + context.items()) | |
281 | + dict(list(tpl_global_vars.items()) + list(context.items())) | |
279 | 282 | ) |
280 | 283 | |
281 | 284 | |
... | ... | @@ -314,7 +317,7 @@ def round_time(dt=None, round_to=60): |
314 | 317 | dt = datetime.datetime.now() |
315 | 318 | seconds = (dt.replace(tzinfo=None) - dt.min).seconds |
316 | 319 | rounding = (seconds + round_to / 2) // round_to * round_to |
317 | - return dt + datetime.timedelta(0, rounding-seconds, -dt.microsecond) | |
320 | + return dt + datetime.timedelta(0, rounding - seconds, -dt.microsecond) | |
318 | 321 | |
319 | 322 | |
320 | 323 | def datetime_from_list(time_list): |
... | ... | @@ -459,10 +462,10 @@ ORDER BY time_min, granule_gid |
459 | 462 | try: |
460 | 463 | response = requests.post(api_url, { |
461 | 464 | 'REQUEST': 'doQuery', |
462 | - 'LANG': 'ADQL', | |
463 | - 'QUERY': query, | |
465 | + 'LANG': 'ADQL', | |
466 | + 'QUERY': query, | |
464 | 467 | 'TIMEOUT': '30', |
465 | - 'FORMAT': 'VOTable/td' | |
468 | + 'FORMAT': 'VOTable/td' | |
466 | 469 | }) |
467 | 470 | |
468 | 471 | response_xml = response.text |
... | ... | @@ -516,7 +519,7 @@ def retrieve_amda_netcdf(orbiter, what, started_at, stopped_at): |
516 | 519 | remote_gzip_files = [] |
517 | 520 | while not success and retries < 3: |
518 | 521 | try: |
519 | - response = urllib.urlopen(url) | |
522 | + response = urllib_request.urlopen(url) | |
520 | 523 | remote_gzip_files = json.loads(response.read()) |
521 | 524 | if not remote_gzip_files: |
522 | 525 | raise Exception("Failed to fetch data at '%s'." % url) |
... | ... | @@ -529,7 +532,7 @@ def retrieve_amda_netcdf(orbiter, what, started_at, stopped_at): |
529 | 532 | # raise Exception("API says it's out of time at '%s'." % url) |
530 | 533 | success = True |
531 | 534 | except Exception as e: |
532 | - log.warn("Failed (%d/3) '%s' : %s" % (retries+1, url, e.message)) | |
535 | + log.warning("Failed (%d/3) '%s' : %s" % (retries + 1, url, e.message)) | |
533 | 536 | remote_gzip_files = [] |
534 | 537 | errors.append(e) |
535 | 538 | finally: |
... | ... | @@ -537,9 +540,9 @@ def retrieve_amda_netcdf(orbiter, what, started_at, stopped_at): |
537 | 540 | if not remote_gzip_files: |
538 | 541 | log.error("Failed to retrieve data from AMDA.") |
539 | 542 | log.error("Failed to fetch gzip files list for %s at '%s' : %s" % |
540 | - (orbiter, url, errors)) | |
543 | + (orbiter, url, errors)) | |
541 | 544 | abort(400, "Failed to fetch gzip files list for %s at '%s' : %s" % |
542 | - (orbiter, url, errors)) | |
545 | + (orbiter, url, errors)) | |
543 | 546 | else: |
544 | 547 | remote_gzip_files = list(set(remote_gzip_files)) |
545 | 548 | |
... | ... | @@ -558,7 +561,7 @@ def retrieve_amda_netcdf(orbiter, what, started_at, stopped_at): |
558 | 561 | local_gzip_files.append(local_gzip_file) |
559 | 562 | if not isfile(local_gzip_file): |
560 | 563 | log.debug("Retrieving '%s'..." % local_gzip_file) |
561 | - urllib.urlretrieve(remote_gzip_file, local_gzip_file) | |
564 | + urllib_request.urlretrieve(remote_gzip_file, local_gzip_file) | |
562 | 565 | log.debug("Retrieved '%s'." % local_gzip_file) |
563 | 566 | else: |
564 | 567 | log.debug("Found '%s' in the cache." % local_gzip_file) |
... | ... | @@ -708,13 +711,17 @@ def get_data_for_target(target_config, input_slug, |
708 | 711 | (target_config['name'], orbit_file)) |
709 | 712 | for ltime, datum_hee in zip(times, data_hee): |
710 | 713 | try: |
714 | + try: | |
715 | + ltime = [str(i, 'UTF8') for i in ltime] | |
716 | + except Exception as e: | |
717 | + print(e) | |
711 | 718 | dtime = datetime_from_list(ltime) |
712 | 719 | except Exception: |
713 | - log.error("Failed to parse time from %s." % ltime) | |
720 | + log.error("Failed to parse time from get__data_for_target %s." % ltime) | |
714 | 721 | raise |
715 | 722 | # Keep only what's in the interval |
716 | 723 | if s0 <= dtime <= s1: |
717 | - dkey = round_time(dtime, 60*60).strftime(precision) | |
724 | + dkey = round_time(dtime, 60 * 60).strftime(precision) | |
718 | 725 | orbit_data[dkey] = datum_hee |
719 | 726 | cdf_handle.close() |
720 | 727 | |
... | ... | @@ -730,8 +737,8 @@ def get_data_for_target(target_config, input_slug, |
730 | 737 | nc_keys.update(model['parameters']) |
731 | 738 | |
732 | 739 | if len(model_files) == 0: |
733 | - log.warn("No model data for '%s' '%s'." | |
734 | - % (target_config['slug'], model['slug'])) | |
740 | + log.warning("No model data for '%s' '%s'." | |
741 | + % (target_config['slug'], model['slug'])) | |
735 | 742 | |
736 | 743 | for model_file in model_files: |
737 | 744 | log.debug("%s: opening model NETCDF4 '%s'..." % |
... | ... | @@ -769,6 +776,10 @@ def get_data_for_target(target_config, input_slug, |
769 | 776 | in zip(times, data_v, data_b, data_t, data_n, data_p, data_a): |
770 | 777 | |
771 | 778 | try: |
779 | + try: | |
780 | + ltime = [str(i, 'UTF8') for i in ltime] | |
781 | + except Exception as e: | |
782 | + print(e) | |
772 | 783 | dtime = datetime_from_list(ltime) |
773 | 784 | except Exception: |
774 | 785 | log.error("Failed to parse time from %s." % ltime) |
... | ... | @@ -777,7 +788,7 @@ def get_data_for_target(target_config, input_slug, |
777 | 788 | if not (s0 <= dtime <= s1): |
778 | 789 | continue # Cull what's out of the interval |
779 | 790 | |
780 | - droundtime = round_time(dtime, 60*60) | |
791 | + droundtime = round_time(dtime, 60 * 60) | |
781 | 792 | dkey = droundtime.strftime(precision) |
782 | 793 | |
783 | 794 | x_hee = None |
... | ... | @@ -849,7 +860,7 @@ def get_data_for_target(target_config, input_slug, |
849 | 860 | def generate_csv_contents(target_slug, input_slug, started_at, stopped_at): |
850 | 861 | target_config = get_target_config(target_slug) |
851 | 862 | log.debug("Crunching CSV contents for '%s'..." % target_config['name']) |
852 | - si = StringIO.StringIO() | |
863 | + si = StringIO() | |
853 | 864 | cw = csv_writer(si) |
854 | 865 | cw.writerow(PROPERTIES) |
855 | 866 | |
... | ... | @@ -901,8 +912,9 @@ def generate_csv_file_if_needed(target_slug, input_slug, |
901 | 912 | for trace in extract_tb(exc_traceback): |
902 | 913 | log.error(trace) |
903 | 914 | if isfile(local_csv_file): |
904 | - log.warn("Removing failed CSV '%s'..." % local_csv_file) | |
915 | + log.warning("Removing failed CSV '%s'..." % local_csv_file) | |
905 | 916 | removefile(local_csv_file) |
917 | + # pprint(e) | |
906 | 918 | abort(500, "Failed creating CSV '%s' : %s" % (filename, e)) |
907 | 919 | |
908 | 920 | |
... | ... | @@ -1057,7 +1069,7 @@ def get_catalog_layers(input_slug, target_slug, started_at, stopped_at): |
1057 | 1069 | validates_any_constraint = True |
1058 | 1070 | for constraint in constraints: |
1059 | 1071 | validates_constraint = True |
1060 | - for key, possible_values in constraint.iteritems(): | |
1072 | + for key, possible_values in iter(constraint.items()): | |
1061 | 1073 | actual_value = json_datum[_get_index_of_key( |
1062 | 1074 | json_data, key |
1063 | 1075 | )] |
... | ... | @@ -1084,7 +1096,7 @@ def get_catalog_layers(input_slug, target_slug, started_at, stopped_at): |
1084 | 1096 | |
1085 | 1097 | catalog_layers[config_layer['slug']].append({ |
1086 | 1098 | 'start': start_time.strftime(MOMENT_DATE_FMT), |
1087 | - 'stop': stop_time.strftime(MOMENT_DATE_FMT), | |
1099 | + 'stop': stop_time.strftime(MOMENT_DATE_FMT), | |
1088 | 1100 | }) |
1089 | 1101 | |
1090 | 1102 | return catalog_layers |
... | ... | @@ -1149,7 +1161,7 @@ def favicon(): # we want it served from the root, not from static/ |
1149 | 1161 | def home(): |
1150 | 1162 | increment_hit_counter() |
1151 | 1163 | parameters = PARAMETERS.values() |
1152 | - parameters.sort(key=lambda x: x['position']) | |
1164 | + parameters = sorted(parameters, key=lambda x: x['position']) | |
1153 | 1165 | input_slug = get_input_slug_from_query() |
1154 | 1166 | targets = [t for t in config['targets'] if not t['locked']] |
1155 | 1167 | started_at, stopped_at = get_interval_from_query() |
... | ... | @@ -1165,9 +1177,9 @@ def home(): |
1165 | 1177 | 'started_at': started_at, |
1166 | 1178 | 'stopped_at': stopped_at, |
1167 | 1179 | 'planets': [s for s in config['targets'] if s['type'] == 'planet'], |
1168 | - 'probes': [s for s in config['targets'] if s['type'] == 'probe'], | |
1169 | - 'comets': [s for s in config['targets'] if s['type'] == 'comet'], | |
1170 | - 'visits': get_hit_counter(), | |
1180 | + 'probes': [s for s in config['targets'] if s['type'] == 'probe'], | |
1181 | + 'comets': [s for s in config['targets'] if s['type'] == 'comet'], | |
1182 | + 'visits': get_hit_counter(), | |
1171 | 1183 | }) |
1172 | 1184 | |
1173 | 1185 | |
... | ... | @@ -1235,7 +1247,7 @@ def download_targets_tarball(targets, inp, started_at, stopped_at): |
1235 | 1247 | """ |
1236 | 1248 | separator = '-' |
1237 | 1249 | targets = targets.split(separator) |
1238 | - targets.sort() | |
1250 | + targets.sorty() | |
1239 | 1251 | targets_configs = [] |
1240 | 1252 | for target in targets: |
1241 | 1253 | if not target: |
... | ... | @@ -1307,7 +1319,7 @@ def download_targets_netcdf(targets, inp, params, started_at, stopped_at): |
1307 | 1319 | """ |
1308 | 1320 | separator = '-' # /!\ this char should never be in target's slugs |
1309 | 1321 | targets = targets.split(separator) |
1310 | - targets.sort() | |
1322 | + targets.sorty() | |
1311 | 1323 | targets_configs = [] |
1312 | 1324 | for target in targets: |
1313 | 1325 | if not target: |
... | ... | @@ -1317,7 +1329,7 @@ def download_targets_netcdf(targets, inp, params, started_at, stopped_at): |
1317 | 1329 | abort(400, "No valid targets specified. What are you doing?") |
1318 | 1330 | |
1319 | 1331 | params = params.split(separator) |
1320 | - params.sort() | |
1332 | + params.sorty() | |
1321 | 1333 | if 0 == len(params): |
1322 | 1334 | abort(400, "No valid parameters specified. What are you doing?") |
1323 | 1335 | if not is_list_in_list(params, PARAMETERS.keys()): |
... | ... | @@ -1359,7 +1371,7 @@ def download_targets_netcdf(targets, inp, params, started_at, stopped_at): |
1359 | 1371 | started_at=started_at, stopped_at=stopped_at |
1360 | 1372 | ) |
1361 | 1373 | dkeys = sorted(data) |
1362 | - dimension = 'dim_'+target_slug | |
1374 | + dimension = 'dim_' + target_slug | |
1363 | 1375 | nc_handle.createDimension(dimension, len(dkeys)) |
1364 | 1376 | |
1365 | 1377 | # TIME # |
... | ... | @@ -1432,7 +1444,7 @@ def download_targets_cdf(targets, inp, started_at, stopped_at): |
1432 | 1444 | """ |
1433 | 1445 | separator = '-' # /!\ this char should never be in target's slugs |
1434 | 1446 | targets = targets.split(separator) |
1435 | - targets.sort() | |
1447 | + targets.sorty() | |
1436 | 1448 | targets_configs = [] |
1437 | 1449 | for target in targets: |
1438 | 1450 | if not target: |
... | ... | @@ -1552,7 +1564,7 @@ def download_targets_cdf(targets, inp, started_at, stopped_at): |
1552 | 1564 | else: |
1553 | 1565 | values_xhee.append(0) |
1554 | 1566 | values_yhee.append(0) |
1555 | - log.warn("Orbit data for %s has NaNs." % target_slug) | |
1567 | + log.warning("Orbit data for %s has NaNs." % target_slug) | |
1556 | 1568 | cdf_handle[kx] = values_xhee |
1557 | 1569 | cdf_handle[ky] = values_yhee |
1558 | 1570 | cdf_handle[kx].attrs['UNITS'] = 'Au' |
... | ... | @@ -1599,11 +1611,11 @@ def download_auroral_catalog_csv(target): |
1599 | 1611 | header = ('time_min', 'time_max', 'thumbnail_url', 'external_link') |
1600 | 1612 | if len(emissions): |
1601 | 1613 | header = emissions[0].keys() |
1602 | - si = StringIO.StringIO() | |
1614 | + si = StringIO() | |
1603 | 1615 | cw = csv_dict_writer(si, fieldnames=header) |
1604 | 1616 | cw.writeheader() |
1605 | 1617 | # 'time_min', 'time_max', 'thumbnail_url', 'external_link' |
1606 | - #cw.writerow(head) | |
1618 | + # cw.writerow(head) | |
1607 | 1619 | |
1608 | 1620 | log.debug("Writing auroral emissions CSV for %s..." % tc['name']) |
1609 | 1621 | cw.writerows(emissions) |
... | ... | @@ -1617,8 +1629,6 @@ def download_auroral_catalog_csv(target): |
1617 | 1629 | # return send_from_directory(CACHE_DIR, filename) |
1618 | 1630 | |
1619 | 1631 | |
1620 | - | |
1621 | - | |
1622 | 1632 | @app.route("/test/auroral/<target>") |
1623 | 1633 | def test_auroral_emissions(target): |
1624 | 1634 | tc = validate_tap_target_config(target) |
... | ... | @@ -1666,7 +1676,7 @@ def cache_warmup(): |
1666 | 1676 | |
1667 | 1677 | targets = get_active_targets() |
1668 | 1678 | targets_slugs = [target['slug'] for target in targets] |
1669 | - targets_slugs.sort() | |
1679 | + targets_slugs.sorty() | |
1670 | 1680 | |
1671 | 1681 | update_spacepy() |
1672 | 1682 | for target in targets: |
... | ... | @@ -1704,7 +1714,7 @@ def log_clear(): |
1704 | 1714 | # cdf_to_inspect = get_path("../res/dummy.nc") |
1705 | 1715 | # cdf_to_inspect = get_path("../res/dummy_jupiter_coordinates.nc") |
1706 | 1716 | # |
1707 | -# si = StringIO.StringIO() | |
1717 | +# si = StringIO() | |
1708 | 1718 | # cw = csv.DictWriter(si, fieldnames=['Name', 'Shape', 'Length']) |
1709 | 1719 | # cw.writeheader() |
1710 | 1720 | # | ... | ... |
web/view/home.html.jinja2
... | ... | @@ -272,7 +272,7 @@ var sw_configuration = { |
272 | 272 | orbit: { a: {{ target.orbit.semimajor or 0 }}, b: {{ target.orbit.semiminor or 0 }} }, |
273 | 273 | img: '{{ static('img/target/'~target.slug~'_128.png') }}', |
274 | 274 | layers: { |
275 | -{% for catalog_slug, catalog_intervals in target.catalog_layers.iteritems() %} | |
275 | +{% for catalog_slug, catalog_intervals in target.catalog_layers.items() %} | |
276 | 276 | "{{ catalog_slug }}": [ |
277 | 277 | {% for interval in catalog_intervals %} |
278 | 278 | {# { start: "2018-03-28T00:00:00Z", stop: "2018-03-29T00:00:00Z" }, #} | ... | ... |