Skip to content

Commit

Permalink
v3.2.0
Browse files Browse the repository at this point in the history
  • Loading branch information
answerquest committed Oct 25, 2018
1 parent 893690c commit f3ff414
Show file tree
Hide file tree
Showing 73 changed files with 102,621 additions and 18,926 deletions.
2 changes: 1 addition & 1 deletion .github/ISSUE_TEMPLATE.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ A: <!--- enter answer here --->
Q: What is the size of the dataset : how many lines in stops.txt, routes.txt and stop_times.txt?
A: <!--- enter answer here --->

Are you starting a new feed from scratch and haven't filled in any of the other files like calendar.txt?
Q: Are you starting a new feed from scratch and haven't filled in any of the other files like calendar.txt?
A: <!--- enter answer here --->

#### Anything else
Expand Down
Binary file modified GTFSManager.exe
Binary file not shown.
92 changes: 92 additions & 0 deletions GTFSManager.py
Original file line number Diff line number Diff line change
Expand Up @@ -1228,6 +1228,95 @@ def post(self):
end = time.time()
logmessage("hydGTFS POST call took {} seconds.".format(round(end-start,2)))

class frequencies(tornado.web.RequestHandler):
def get(self):
# ${APIpath}frequencies
start = time.time()
logmessage('\nfrequencies GET call')

freqJson = readTableDB('frequencies').to_json(orient='records', force_ascii=False)
self.write(freqJson)
end = time.time()
logmessage("frequences GET call took {} seconds.".format(round(end-start,2)))

def post(self):
# ${APIpath}frequencies
start = time.time()
logmessage('\nfrequencies POST call')
pw=self.get_argument('pw',default='')
if not decrypt(pw):
self.set_status(400)
self.write("Error: invalid password.")
return
# received text comes as bytestring. Convert to unicode using .decode('UTF-8') from https://stackoverflow.com/a/6273618/4355695
data = json.loads( self.request.body.decode('UTF-8') )

if replaceTableDB('frequencies', data): #replaceTableDB(tablename, data)
self.write('Saved frequencies data to DB.')
else:
self.set_status(400)
self.write("Error: Could not save to DB.")
end = time.time()
logmessage("frequencies POST call took {} seconds.".format(round(end-start,2)))

class tableReadSave(tornado.web.RequestHandler):
def get(self):
# ${APIpath}tableReadSave?table=table&key=key&value=value
start = time.time()

table=self.get_argument('table',default='')
logmessage('\ntableReadSave GET call for table={}'.format(table))

if not table:
self.set_status(400)
self.write("Error: invalid table.")
return

key=self.get_argument('key',default=None)
value=self.get_argument('value',default=None)
if key and value:
dataJson = readTableDB(table, key=key, value=value).to_json(orient='records', force_ascii=False)
else:
dataJson = readTableDB(table).to_json(orient='records', force_ascii=False)

self.write(dataJson)
end = time.time()
logmessage("tableReadSave GET call for table={} took {} seconds.".format(table,round(end-start,2)))

def post(self):
# ${APIpath}tableReadSave?pw=pw&table=table&key=key&value=value
start = time.time()
pw=self.get_argument('pw',default='')
if not decrypt(pw):
self.set_status(400)
self.write("Error: invalid password.")
return

table=self.get_argument('table',default='')
if not table:
self.set_status(400)
self.write("Error: invalid table.")
return

logmessage('\ntableReadSave POST call for table={}'.format(table))

# received text comes as bytestring. Convert to unicode using .decode('UTF-8') from https://stackoverflow.com/a/6273618/4355695
data = json.loads( self.request.body.decode('UTF-8') )

key = self.get_argument('key',default=None)
value = self.get_argument('value',default=None)
if key and value:
status = replaceTableDB(table, data, key, value)
else:
status = replaceTableDB(table, data)

if status:
self.write('Saved {} data to DB.'.format(table) )
else:
self.set_status(400)
self.write("Error: Could not save to DB.")
end = time.time()
logmessage("tableReadSave POST call for table={} took {} seconds.".format(table,round(end-start,2)))

def make_app():
return tornado.web.Application([
Expand Down Expand Up @@ -1266,6 +1355,9 @@ def make_app():
(r"/API/deleteByKey", deleteByKey),
(r"/API/replaceID", replaceID),
(r"/API/hydGTFS", hydGTFS),
(r"/API/frequencies", frequencies),
(r"/API/tableReadSave", tableReadSave),
#(r"/API/idList", idList),
(r"/(.*)", tornado.web.StaticFileHandler, {"path": root, "default_filename": "index.html"})
])

Expand Down
88 changes: 70 additions & 18 deletions GTFSserverfunctions.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,49 @@
#GTFSserverfunctions.py
# this file is to be inline included in the main script. Seriously, I do not want to keep declaring import statements everywhere.
'''
GTFSserverfunctions.py
this file is to be inline included in the main script. Seriously, I do not want to keep declaring import statements everywhere.
import tornado.web
import tornado.ioloop
import json
import os
import time, datetime
import xmltodict
import pandas as pd
from collections import OrderedDict
import zipfile, zlib
from tinydb import TinyDB, Query
from tinydb.operations import delete
import webbrowser
from Cryptodome.PublicKey import RSA #uses pycryptodomex package.. disambiguates from pycrypto, pycryptodome
import shutil # used in fareChartUpload to fix header if changed
import pathlib
from math import sin, cos, sqrt, atan2, radians # for lat-long distance calculations
# import requests # nope, not needed for now
from json.decoder import JSONDecodeError # used to catch corrupted DB file when tinyDB loads it.
import signal, sys # for catching Ctrl+C and exiting gracefully.
import gc # garbage collector, from https://stackoverflow.com/a/1316793/4355695
import csv
import numpy as np
import io # used in hyd csv import
global uploadFolder
global xmlFolder
global logFolder
global configFolder
global dbFolder
global exportFolder
global sequenceDBfile
global passwordFile
global chunkRulesFile
global configFile
if __name__ == "__main__":
print("Don't run this, run GTFSManager.py.")
'''

def csvwriter( array2write, filename, keys=None ):
# 15.4.18: Changing to use pandas instead of csv.DictWriter. Solves https://github.com/WRI-Cities/static-GTFS-manager/issues/3
Expand Down Expand Up @@ -29,7 +73,7 @@ def exportGTFS (folder):

try:
df = pd.read_hdf(dbFolder + h5File).fillna('').astype(str)
except KeyError as e:
except (KeyError, ValueError) as e:
df = pd.DataFrame()
logmessage('Note: {} does not have any data.'.format(h5File))

Expand Down Expand Up @@ -59,7 +103,7 @@ def exportGTFS (folder):
for count,h5File in enumerate(filenames):
try:
df = pd.read_hdf(dbFolder + h5File,stop=0)
except KeyError as e:
except (KeyError, ValueError) as e:
df = pd.DataFrame()
logmessage('Note: {} does not have any data.'.format(h5File))
columnsList.update(df.columns.tolist())
Expand All @@ -77,7 +121,7 @@ def exportGTFS (folder):
logmessage('Writing {} to csv'.format(h5File))
try:
df1 = pd.read_hdf(dbFolder + h5File).fillna('').astype(str)
except KeyError as e:
except (KeyError, ValueError) as e:
df1 = pd.DataFrame()
logmessage('Note: {} does not have any data.'.format(h5File))
# in case the final columns list has more columns than df1 does, concatenating an empty df with the full columns list.
Expand Down Expand Up @@ -287,7 +331,7 @@ def GTFSstats():
try:
count = hdf.get_storer('df').nrows
# gets number of rows, without reading the entire file into memory. From https://stackoverflow.com/a/26466301/4355695
except KeyError as e:
except (KeyError, ValueError) as e:
logmessage('Note: {} does not have any data.'.format(tablename + '.h5'))
hdf.close()
# have to close this opened file, else will conflict with pd.read_csv later on
Expand All @@ -306,7 +350,7 @@ def GTFSstats():
hdf = pd.HDFStore(dbFolder + h5File)
try:
count += hdf.get_storer('df').nrows
except KeyError as e:
except (KeyError, ValueError) as e:
logmessage('Note: {} does not have any data.'.format(h5File))
hdf.close()
coveredFiles.append(h5File)
Expand All @@ -329,7 +373,7 @@ def GTFSstats():
hdf = pd.HDFStore(dbFolder + tablename + '.h5')
try:
count = hdf.get_storer('df').nrows
except KeyError as e:
except (KeyError, ValueError) as e:
logmessage('Note: {} does not have any data.'.format(tablename + '.h5'))
hdf.close()
coveredFiles.append(tablename+'.h5')
Expand All @@ -345,7 +389,7 @@ def GTFSstats():
hdf = pd.HDFStore(dbFolder + h5File)
try:
count += hdf.get_storer('df').nrows
except KeyError as e:
except (KeyError, ValueError) as e:
logmessage('Note: {} does not have any data.'.format(h5File))
hdf.close()
coveredFiles.append(h5File)
Expand All @@ -362,7 +406,7 @@ def GTFSstats():
hdf = pd.HDFStore(dbFolder + h5File)
try:
count = hdf.get_storer('df').nrows
except KeyError as e:
except (KeyError, ValueError) as e:
logmessage('Note: {} does not have any data.'.format(h5File))
count = 0
hdf.close()
Expand Down Expand Up @@ -418,7 +462,7 @@ def readTableDB(tablename, key=None, value=None):
try:
df = pd.read_hdf(dbFolder + h5File).fillna('').astype(str)
# typecasting as str, keeping NA values blank ''
except KeyError as e:
except (KeyError, ValueError) as e:
df = pd.DataFrame()
logmessage('Note: {} does not have any data.'.format(h5File))

Expand Down Expand Up @@ -479,7 +523,7 @@ def replaceTableDB(tablename, data, key=None, value=None):
# remove entries matching the key and value
try:
df = pd.read_hdf(dbFolder+h5File).fillna('').astype(str)
except KeyError as e:
except (KeyError, ValueError) as e:
df = pd.DataFrame()
logmessage('Note: {} does not have any data.'.format(h5File))
oldLen = len( df[ df[key] == str(value)])
Expand Down Expand Up @@ -1085,7 +1129,7 @@ def replaceTableCell(h5File,column,valueFrom,valueTo):

try:
df = pd.read_hdf(dbFolder + h5File).fillna('').astype(str)
except KeyError as e:
except (KeyError, ValueError) as e:
df = pd.DataFrame()
logmessage('Note: {} does not have any data.'.format(h5File))
if column not in df.columns:
Expand Down Expand Up @@ -1217,7 +1261,7 @@ def replaceChunkyTableDB(xdf, value, tablename='stop_times'):
logmessage('Editing ' + chunkFile)
try:
df = pd.read_hdf(dbFolder + chunkFile).fillna('').astype(str)
except KeyError as e:
except (KeyError, ValueError) as e:
df = pd.DataFrame()
logmessage('Note: {} does not have any data.'.format(chunkFile))
initLen = len(df)
Expand All @@ -1238,15 +1282,16 @@ def replaceChunkyTableDB(xdf, value, tablename='stop_times'):
chunkFile = smallestChunk(tablename)
try:
df = pd.read_hdf(dbFolder + chunkFile).fillna('').astype(str)
except KeyError as e:
except (KeyError, ValueError) as e:
df = pd.DataFrame()
logmessage('Note: {} does not have any data.'.format(chunkFile))
except FileNotFoundError as e:
df = pd.DataFrame()
logmessage('Note: {} does not exist yet, so we will likely create it.'.format(chunkFile))

# next 3 lines to be done in either case
newdf = pd.concat([df,xdf],ignore_index=True)
# newdf = pd.concat([df,xdf],ignore_index=True)
newdf = df.append(xdf, ignore_index=True, sort=False)
logmessage('{} new entries for id {} added. Now writing to {}.'.format( str( len(xdf) ),value, chunkFile ))
newdf.to_hdf(dbFolder+chunkFile, 'df', format='table', mode='w', complevel=1)

Expand Down Expand Up @@ -1399,7 +1444,7 @@ def readChunkTableDB(tablename, key, value):
try:
df = pd.read_hdf(dbFolder+h5File).fillna('').astype(str)\
.query( '{}=="{}"'.format(key,value) )
except KeyError as e:
except (KeyError, ValueError) as e:
df = pd.DataFrame()
logmessage('Note: {} does not have any data.'.format(h5File))
if len(df):
Expand All @@ -1420,6 +1465,10 @@ def readChunkTableDB(tablename, key, value):


def deleteID(column,value):
'''
Note: this is a container function.
The actual deleting is taking place in deleteInTable() func below.
'''
content = ''

# special case: if its a route_id or a calendar service_id, have to delete all the trips under it first, so their respective entries in stop_times are deleted too.
Expand Down Expand Up @@ -1456,12 +1505,14 @@ def deleteID(column,value):

def deleteInTable(tablename, key, value, action="delete"):
if tablename not in chunkRules.keys():
# its not a chunked table
h5Files = [tablename + '.h5']
# since we've composed this filename, check if file exists.
if not os.path.exists(dbFolder + h5Files[0]):
logmessage('deleteInTable: {} not found.'.format(h5Files[0]))
return ''
else:
# its a chunked table
if key == chunkRules[tablename].get('key'):
h5Files = [findChunk(value, tablename)]

Expand All @@ -1476,14 +1527,15 @@ def deleteInTable(tablename, key, value, action="delete"):
json.dump(table_lookup, outfile, indent=2)

else:
# list all the chunks
h5Files = findFiles(dbFolder, ext='.h5', prefix=tablename, chunk='y')

# now in h5Files we have which all files to process.
returnMessage = ''
for h5File in h5Files:
try:
df = pd.read_hdf(dbFolder + h5File).fillna('').astype(str)
except KeyError as e:
except (KeyError, ValueError) as e:
df = pd.DataFrame()
logmessage('Note: {} does not have any data.'.format(h5File))

Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

A browser-based user interface for creating, editing, exporting of static GTFS (General Transit Feed Specification Reference) feeds for a public transit authority.

**Development Status** : V 3.1.0
**Development Status** : V 3.2.0
Windows binary is available too now. Download from [Releases page](https://github.com/WRI-Cities/static-GTFS-manager/releases/).

## Intro
Expand Down
19 changes: 16 additions & 3 deletions agency.html
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
<html>
<head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
<title>Agency</title>
<title>Agency and Feed_info</title>
<link href="lib/jquery-ui.min.css" rel="stylesheet">
<link href="lib/tabulator.min.css" rel="stylesheet">
<link href="lib/bootstrap.v4.0.0.min.css" crossorigin="anonymous" alt="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" rel="stylesheet">
Expand Down Expand Up @@ -47,8 +47,21 @@ <h2>Agency</h2>

</div></div>



<hr>
<h2>Feed_info</h2>
<p>This is an optional table (only one row), for giving information about the feed. See <a href="https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#feed_infotxt" target="_blank">specs document</a> for details.</p>
<p>
feed_publisher_name* : <input id="feed_publisher_name"></p><p>
feed_publisher_url* : <input id="feed_publisher_url"></p><p>
feed_lang* : <input id="feed_lang"></p><p>
feed_start_date : <input id="feed_start_date"></p><p>
feed_end_date : <input id="feed_end_date"></p><p>
feed_version : <input id="feed_version"></p><p>
feed_contact_email : <input id="feed_contact_email"></p><p>
feed_contact_url : <input id="feed_contact_url"></p><p>
</p>
<p><button id="saveFeedInfoButton" class="btn btn-outline-success btn-md">Save Feed_info Changes</button> &nbsp; <small id="feedInfoSaveStatus"></small></p>
<p><small>Note: If you are having feed_info table at all, then the top 3 fields are compulsory. Otherwise, leave everything blank.</small></p>
</div>
<script src="config/settings.js" type="text/javascript"></script>
<script src="js/commonfuncs.js" type="text/javascript"></script>
Expand Down
Loading

0 comments on commit f3ff414

Please sign in to comment.